]>
Commit | Line | Data |
---|---|---|
d155a8f0 JB |
1 | --- qemu-7.2.10/subprojects/libvfio-user/lib/dma.c.orig 2024-03-04 17:26:53.000000000 +0100 |
2 | +++ qemu-7.2.10/subprojects/libvfio-user/lib/dma.c 2024-04-03 21:26:55.422317782 +0200 | |
3 | @@ -249,7 +249,7 @@ dma_map_region(dma_controller_t *dma, dm | |
4 | region->info.vaddr = mmap_base + (region->offset - offset); | |
5 | ||
6 | vfu_log(dma->vfu_ctx, LOG_DEBUG, "mapped DMA region iova=[%p, %p) " | |
7 | - "vaddr=%p page_size=%#lx mapping=[%p, %p)", | |
8 | + "vaddr=%p page_size=%#zx mapping=[%p, %p)", | |
9 | region->info.iova.iov_base, iov_end(®ion->info.iova), | |
10 | region->info.vaddr, region->info.page_size, | |
11 | region->info.mapping.iov_base, iov_end(®ion->info.mapping)); | |
12 | @@ -300,8 +300,8 @@ MOCK_DEFINE(dma_controller_add_region)(d | |
13 | ||
14 | assert(dma != NULL); | |
15 | ||
16 | - snprintf(rstr, sizeof(rstr), "[%p, %p) fd=%d offset=%#lx prot=%#x", | |
17 | - dma_addr, (char *)dma_addr + size, fd, offset, prot); | |
18 | + snprintf(rstr, sizeof(rstr), "[%p, %p) fd=%d offset=%#"PRIx64" prot=%#x", | |
19 | + dma_addr, (char *)dma_addr + size, fd, (int64_t)offset, prot); | |
20 | ||
21 | if (size > dma->max_size) { | |
22 | vfu_log(dma->vfu_ctx, LOG_ERR, "DMA region size %zu > max %zu", | |
23 | @@ -317,7 +317,7 @@ MOCK_DEFINE(dma_controller_add_region)(d | |
24 | region->info.iova.iov_len == size) { | |
25 | if (offset != region->offset) { | |
26 | vfu_log(dma->vfu_ctx, LOG_ERR, "bad offset for new DMA region " | |
27 | - "%s; existing=%#lx", rstr, region->offset); | |
28 | + "%s; existing=%#"PRIx64, rstr, (int64_t)(region->offset)); | |
29 | return ERROR_INT(EINVAL); | |
30 | } | |
31 | if (!fds_are_same_file(region->fd, fd)) { | |
32 | @@ -573,7 +573,7 @@ dma_controller_dirty_page_get(dma_contro | |
33 | } | |
34 | ||
35 | if (pgsize != dma->dirty_pgsize) { | |
36 | - vfu_log(dma->vfu_ctx, LOG_ERR, "bad page size %ld", pgsize); | |
37 | + vfu_log(dma->vfu_ctx, LOG_ERR, "bad page size %zu", pgsize); | |
38 | return ERROR_INT(EINVAL); | |
39 | } | |
40 | ||
41 | @@ -588,7 +588,7 @@ dma_controller_dirty_page_get(dma_contro | |
42 | * receive. | |
43 | */ | |
44 | if (size != (size_t)bitmap_size) { | |
45 | - vfu_log(dma->vfu_ctx, LOG_ERR, "bad bitmap size %ld != %ld", size, | |
46 | + vfu_log(dma->vfu_ctx, LOG_ERR, "bad bitmap size %zu != %zd", size, | |
47 | bitmap_size); | |
48 | return ERROR_INT(EINVAL); | |
49 | } | |
50 | --- qemu-7.2.10/subprojects/libvfio-user/lib/libvfio-user.c.orig 2024-03-04 17:26:53.000000000 +0100 | |
51 | +++ qemu-7.2.10/subprojects/libvfio-user/lib/libvfio-user.c 2024-04-03 22:01:11.697844648 +0200 | |
52 | @@ -183,16 +183,16 @@ debug_region_access(vfu_ctx_t *vfu_ctx, | |
53 | case 2: val = *((uint16_t *)buf); break; | |
54 | case 1: val = *((uint8_t *)buf); break; | |
55 | default: | |
56 | - vfu_log(vfu_ctx, LOG_DEBUG, "region%zu: %s %zu bytes at %#lx", | |
57 | + vfu_log(vfu_ctx, LOG_DEBUG, "region%zu: %s %zu bytes at %#"PRIx64, | |
58 | region, verb, count, offset); | |
59 | return; | |
60 | } | |
61 | ||
62 | if (is_write) { | |
63 | - vfu_log(vfu_ctx, LOG_DEBUG, "region%zu: wrote %#zx to (%#lx:%zu)", | |
64 | + vfu_log(vfu_ctx, LOG_DEBUG, "region%zu: wrote %#"PRIx64" to (%#"PRIx64":%zu)", | |
65 | region, val, offset, count); | |
66 | } else { | |
67 | - vfu_log(vfu_ctx, LOG_DEBUG, "region%zu: read %#zx from (%#lx:%zu)", | |
68 | + vfu_log(vfu_ctx, LOG_DEBUG, "region%zu: read %#"PRIx64" from (%#"PRIx64":%zu)", | |
69 | region, val, offset, count); | |
70 | } | |
71 | } | |
72 | @@ -235,7 +235,7 @@ region_access(vfu_ctx_t *vfu_ctx, size_t | |
73 | ||
74 | out: | |
75 | if (ret != (ssize_t)count) { | |
76 | - vfu_log(vfu_ctx, LOG_DEBUG, "region%zu: %s (%#lx:%zu) failed: %m", | |
77 | + vfu_log(vfu_ctx, LOG_DEBUG, "region%zu: %s (%#"PRIx64":%zu) failed: %m", | |
78 | region, verb, offset, count); | |
79 | } else { | |
80 | debug_region_access(vfu_ctx, region, buf, count, offset, is_write); | |
81 | @@ -266,7 +266,7 @@ is_valid_region_access(vfu_ctx_t *vfu_ct | |
82 | ||
83 | if (cmd == VFIO_USER_REGION_WRITE && size - sizeof(*ra) != ra->count) { | |
84 | vfu_log(vfu_ctx, LOG_ERR, "region write count too small: " | |
85 | - "expected %lu, got %u", size - sizeof(*ra), ra->count); | |
86 | + "expected %zu, got %u", size - sizeof(*ra), ra->count); | |
87 | return false; | |
88 | } | |
89 | ||
90 | @@ -278,7 +278,7 @@ is_valid_region_access(vfu_ctx_t *vfu_ct | |
91 | } | |
92 | ||
93 | if (satadd_u64(ra->offset, ra->count) > vfu_ctx->reg_info[index].size) { | |
94 | - vfu_log(vfu_ctx, LOG_ERR, "out of bounds region access %#lx-%#lx " | |
95 | + vfu_log(vfu_ctx, LOG_ERR, "out of bounds region access %#"PRIx64"-%"PRIx64" " | |
96 | "(size %u)", ra->offset, ra->offset + ra->count, | |
97 | vfu_ctx->reg_info[index].size); | |
98 | ||
99 | @@ -337,7 +337,7 @@ handle_region_access(vfu_ctx_t *vfu_ctx, | |
100 | ||
101 | ret = region_access(vfu_ctx, in_ra->region, buf, in_ra->count, | |
102 | in_ra->offset, msg->hdr.cmd == VFIO_USER_REGION_WRITE); | |
103 | - if (ret != in_ra->count) { | |
104 | + if ((unsigned long)ret != (unsigned long)(in_ra->count)) { | |
105 | /* FIXME we should return whatever has been accessed, not an error */ | |
106 | if (ret >= 0) { | |
107 | ret = ERROR_INT(EINVAL); | |
108 | @@ -671,7 +671,7 @@ handle_dma_map(vfu_ctx_t *vfu_ctx, vfu_m | |
109 | return ERROR_INT(EINVAL); | |
110 | } | |
111 | ||
112 | - snprintf(rstr, sizeof(rstr), "[%#lx, %#lx) offset=%#lx flags=%#x", | |
113 | + snprintf(rstr, sizeof(rstr), "[%#"PRIx64", %#"PRIx64") offset=%#"PRIx64" flags=%#x", | |
114 | dma_map->addr, dma_map->addr + dma_map->size, dma_map->offset, | |
115 | dma_map->flags); | |
116 | ||
117 | @@ -700,7 +700,7 @@ handle_dma_map(vfu_ctx_t *vfu_ctx, vfu_m | |
118 | } | |
119 | } | |
120 | ||
121 | - ret = dma_controller_add_region(vfu_ctx->dma, (void *)dma_map->addr, | |
122 | + ret = dma_controller_add_region(vfu_ctx->dma, (void *)(uintptr_t)dma_map->addr, | |
123 | dma_map->size, fd, dma_map->offset, | |
124 | prot); | |
125 | if (ret < 0) { | |
126 | @@ -747,7 +747,7 @@ is_valid_unmap(vfu_ctx_t *vfu_ctx, vfu_m | |
127 | ||
128 | case VFIO_DMA_UNMAP_FLAG_ALL: | |
129 | if (dma_unmap->addr || dma_unmap->size) { | |
130 | - vfu_log(vfu_ctx, LOG_ERR, "bad addr=%#lx or size=%#lx, expected " | |
131 | + vfu_log(vfu_ctx, LOG_ERR, "bad addr=%#"PRIx64" or size=%#"PRIx64", expected " | |
132 | "both to be zero", dma_unmap->addr, dma_unmap->size); | |
133 | errno = EINVAL; | |
134 | return false; | |
135 | @@ -791,7 +791,7 @@ handle_dma_unmap(vfu_ctx_t *vfu_ctx, vfu | |
136 | return -1; | |
137 | } | |
138 | ||
139 | - snprintf(rstr, sizeof(rstr), "[%#lx, %#lx) flags=%#x", | |
140 | + snprintf(rstr, sizeof(rstr), "[%#"PRIx64", %#"PRIx64") flags=%#x", | |
141 | dma_unmap->addr, dma_unmap->addr + dma_unmap->size, dma_unmap->flags); | |
142 | ||
143 | vfu_log(vfu_ctx, LOG_DEBUG, "removing DMA region %s", rstr); | |
144 | @@ -817,7 +817,7 @@ handle_dma_unmap(vfu_ctx_t *vfu_ctx, vfu | |
145 | if (dma_unmap->flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) { | |
146 | memcpy(msg->out.iov.iov_base + sizeof(*dma_unmap), dma_unmap->bitmap, sizeof(*dma_unmap->bitmap)); | |
147 | ret = dma_controller_dirty_page_get(vfu_ctx->dma, | |
148 | - (vfu_dma_addr_t)dma_unmap->addr, | |
149 | + (vfu_dma_addr_t)(uintptr_t)dma_unmap->addr, | |
150 | dma_unmap->size, | |
151 | dma_unmap->bitmap->pgsize, | |
152 | dma_unmap->bitmap->size, | |
153 | @@ -829,7 +829,7 @@ handle_dma_unmap(vfu_ctx_t *vfu_ctx, vfu | |
154 | } | |
155 | ||
156 | ret = dma_controller_remove_region(vfu_ctx->dma, | |
157 | - (void *)dma_unmap->addr, | |
158 | + (void *)(uintptr_t)dma_unmap->addr, | |
159 | dma_unmap->size, | |
160 | vfu_ctx->dma_unregister, | |
161 | vfu_ctx); | |
162 | @@ -924,7 +924,7 @@ handle_dirty_pages_get(vfu_ctx_t *vfu_ct | |
163 | range_out = msg->out.iov.iov_base + sizeof(*dirty_pages_out); | |
164 | memcpy(range_out, range_in, sizeof(*range_out)); | |
165 | ret = dma_controller_dirty_page_get(vfu_ctx->dma, | |
166 | - (vfu_dma_addr_t)range_in->iova, | |
167 | + (vfu_dma_addr_t)(uintptr_t)range_in->iova, | |
168 | range_in->size, | |
169 | range_in->bitmap.pgsize, | |
170 | range_in->bitmap.size, bitmap_out); | |
171 | @@ -939,7 +939,7 @@ handle_dirty_pages_get(vfu_ctx_t *vfu_ct | |
172 | } | |
173 | } else { | |
174 | vfu_log(vfu_ctx, LOG_ERR, | |
175 | - "dirty pages: get [%#lx, %#lx): buffer too small (%u < %lu)", | |
176 | + "dirty pages: get [%#"PRIx64", %#"PRIx64"): buffer too small (%u < %zu)", | |
177 | range_in->iova, range_in->iova + range_in->size, | |
178 | dirty_pages_in->argsz, argsz); | |
179 | } | |
180 | @@ -2124,7 +2124,7 @@ vfu_dma_transfer(vfu_ctx_t *vfu_ctx, enu | |
181 | while (remaining > 0) { | |
182 | int ret; | |
183 | ||
184 | - dma_req->addr = (uint64_t)sg->dma_addr + count; | |
185 | + dma_req->addr = (uint64_t)(uintptr_t)sg->dma_addr + count; | |
186 | dma_req->count = MIN(remaining, vfu_ctx->client_max_data_xfer_size); | |
187 | ||
188 | if (cmd == VFIO_USER_DMA_WRITE) { | |
189 | @@ -2154,7 +2154,7 @@ vfu_dma_transfer(vfu_ctx_t *vfu_ctx, enu | |
190 | if (dma_reply->addr != dma_req->addr || | |
191 | dma_reply->count != dma_req->count) { | |
192 | vfu_log(vfu_ctx, LOG_ERR, "bad reply to DMA transfer: " | |
193 | - "request:%#lx,%lu reply:%#lx,%lu", | |
194 | + "request:%#"PRIx64",%"PRIu64" reply:%#"PRIx64",%"PRIu64, | |
195 | dma_req->addr, dma_req->count, | |
196 | dma_reply->addr, dma_reply->count); | |
197 | free(rbuf); | |
198 | --- qemu-7.2.10/subprojects/libvfio-user/lib/migration.c.orig 2024-03-04 17:26:53.000000000 +0100 | |
199 | +++ qemu-7.2.10/subprojects/libvfio-user/lib/migration.c 2024-04-03 22:17:23.329247535 +0200 | |
200 | @@ -413,7 +413,7 @@ MOCK_DEFINE(migration_region_access_regi | |
201 | case offsetof(struct vfio_user_migration_info, device_state): | |
202 | if (count != sizeof(migr->info.device_state)) { | |
203 | vfu_log(vfu_ctx, LOG_ERR, | |
204 | - "bad device_state access size %ld", count); | |
205 | + "bad device_state access size %zd", count); | |
206 | return ERROR_INT(EINVAL); | |
207 | } | |
208 | device_state = (uint32_t *)buf; | |
209 | @@ -443,7 +443,7 @@ MOCK_DEFINE(migration_region_access_regi | |
210 | case offsetof(struct vfio_user_migration_info, pending_bytes): | |
211 | if (count != sizeof(migr->info.pending_bytes)) { | |
212 | vfu_log(vfu_ctx, LOG_ERR, | |
213 | - "bad pending_bytes access size %ld", count); | |
214 | + "bad pending_bytes access size %zd", count); | |
215 | return ERROR_INT(EINVAL); | |
216 | } | |
217 | ret = handle_pending_bytes(vfu_ctx, migr, (uint64_t *)buf, is_write); | |
218 | @@ -451,7 +451,7 @@ MOCK_DEFINE(migration_region_access_regi | |
219 | case offsetof(struct vfio_user_migration_info, data_offset): | |
220 | if (count != sizeof(migr->info.data_offset)) { | |
221 | vfu_log(vfu_ctx, LOG_ERR, | |
222 | - "bad data_offset access size %ld", count); | |
223 | + "bad data_offset access size %zd", count); | |
224 | return ERROR_INT(EINVAL); | |
225 | } | |
226 | ret = handle_data_offset(vfu_ctx, migr, (uint64_t *)buf, is_write); | |
227 | @@ -459,14 +459,14 @@ MOCK_DEFINE(migration_region_access_regi | |
228 | case offsetof(struct vfio_user_migration_info, data_size): | |
229 | if (count != sizeof(migr->info.data_size)) { | |
230 | vfu_log(vfu_ctx, LOG_ERR, | |
231 | - "bad data_size access size %ld", count); | |
232 | + "bad data_size access size %zd", count); | |
233 | return ERROR_INT(EINVAL); | |
234 | } | |
235 | ret = handle_data_size(vfu_ctx, migr, (uint64_t *)buf, is_write); | |
236 | break; | |
237 | default: | |
238 | - vfu_log(vfu_ctx, LOG_ERR, "bad migration region register offset %#lx", | |
239 | - pos); | |
240 | + vfu_log(vfu_ctx, LOG_ERR, "bad migration region register offset %#"PRIx64, | |
241 | + (int64_t)pos); | |
242 | return ERROR_INT(EINVAL); | |
243 | } | |
244 | return ret; | |
245 | @@ -502,8 +502,8 @@ migration_region_access(vfu_ctx_t *vfu_c | |
246 | * any access to the data region properly. | |
247 | */ | |
248 | vfu_log(vfu_ctx, LOG_WARNING, | |
249 | - "bad access to dead space %#lx-%#lx in migration region", | |
250 | - pos, pos + count - 1); | |
251 | + "bad access to dead space %#"PRIx64"-%#"PRIx64" in migration region", | |
252 | + (int64_t)pos, (int64_t)(pos + count - 1)); | |
253 | return ERROR_INT(EINVAL); | |
254 | } | |
255 | ||
256 | --- qemu-7.2.10/subprojects/libvfio-user/lib/pci.c.orig 2024-03-04 17:26:53.000000000 +0100 | |
257 | +++ qemu-7.2.10/subprojects/libvfio-user/lib/pci.c 2024-04-03 22:39:07.265516839 +0200 | |
258 | @@ -264,8 +264,8 @@ pci_hdr_write(vfu_ctx_t *vfu_ctx, const | |
259 | ret = handle_erom_write(vfu_ctx, cfg_space, buf); | |
260 | break; | |
261 | default: | |
262 | - vfu_log(vfu_ctx, LOG_ERR, "PCI config write %#lx not handled", | |
263 | - offset); | |
264 | + vfu_log(vfu_ctx, LOG_ERR, "PCI config write %#"PRIx64" not handled", | |
265 | + (int64_t)offset); | |
266 | ret = ERROR_INT(EINVAL); | |
267 | } | |
268 | ||
269 | @@ -315,7 +315,7 @@ pci_nonstd_access(vfu_ctx_t *vfu_ctx, ch | |
270 | ||
271 | if (is_write) { | |
272 | vfu_log(vfu_ctx, LOG_ERR, "no callback for write to config space " | |
273 | - "offset %lu size %zu", offset, count); | |
274 | + "offset %"PRId64" size %zu", (int64_t)offset, count); | |
275 | return ERROR_INT(EINVAL); | |
276 | } | |
277 | ||
278 | @@ -429,8 +429,8 @@ pci_config_space_access(vfu_ctx_t *vfu_c | |
279 | size = pci_config_space_next_segment(vfu_ctx, count, offset, is_write, | |
280 | &cb); | |
281 | if (cb == NULL) { | |
282 | - vfu_log(vfu_ctx, LOG_ERR, "bad write to PCI config space %#lx-%#lx", | |
283 | - offset, offset + count - 1); | |
284 | + vfu_log(vfu_ctx, LOG_ERR, "bad write to PCI config space %#"PRIx64"-%#"PRIx64, | |
285 | + (int64_t)offset, (int64_t)(offset + count - 1)); | |
286 | return size; | |
287 | } | |
288 | ||
289 | --- qemu-7.2.10/subprojects/libvfio-user/lib/pci_caps.c.orig 2024-03-04 17:26:53.000000000 +0100 | |
290 | +++ qemu-7.2.10/subprojects/libvfio-user/lib/pci_caps.c 2024-04-03 22:41:43.031339650 +0200 | |
291 | @@ -483,7 +483,7 @@ cap_place(vfu_ctx_t *vfu_ctx, struct pci | |
292 | ||
293 | if (cap->off != 0) { | |
294 | if (cap->off < PCI_STD_HEADER_SIZEOF) { | |
295 | - vfu_log(vfu_ctx, LOG_ERR, "invalid offset %#lx for capability " | |
296 | + vfu_log(vfu_ctx, LOG_ERR, "invalid offset %#zx for capability " | |
297 | "%u (%s)", cap->off, cap->id, cap->name); | |
298 | return ERROR_INT(EINVAL); | |
299 | } | |
300 | @@ -516,7 +516,7 @@ cap_place(vfu_ctx_t *vfu_ctx, struct pci | |
301 | ||
302 | if (cap->off + cap->size > pci_config_space_size(vfu_ctx)) { | |
303 | vfu_log(vfu_ctx, LOG_ERR, "no config space left for capability " | |
304 | - "%u (%s) of size %zu bytes at offset %#lx", cap->id, | |
305 | + "%u (%s) of size %zu bytes at offset %#zx", cap->id, | |
306 | cap->name, cap->size, cap->off); | |
307 | return ERROR_INT(ENOSPC); | |
308 | } | |
309 | @@ -547,7 +547,7 @@ ext_cap_place(vfu_ctx_t *vfu_ctx, struct | |
310 | ||
311 | if (cap->off != 0) { | |
312 | if (cap->off < PCI_CFG_SPACE_SIZE) { | |
313 | - vfu_log(vfu_ctx, LOG_ERR, "invalid offset %#lx for capability " | |
314 | + vfu_log(vfu_ctx, LOG_ERR, "invalid offset %#zx for capability " | |
315 | "%u (%s)", cap->off, cap->id, cap->name); | |
316 | return ERROR_INT(EINVAL); | |
317 | } | |
318 | @@ -581,7 +581,7 @@ ext_cap_place(vfu_ctx_t *vfu_ctx, struct | |
319 | ||
320 | if (cap->off + cap->size > pci_config_space_size(vfu_ctx)) { | |
321 | vfu_log(vfu_ctx, LOG_ERR, "no config space left for capability " | |
322 | - "%u (%s) of size %zu bytes at offset %#lx", cap->id, | |
323 | + "%u (%s) of size %zu bytes at offset %#zx", cap->id, | |
324 | cap->name, cap->size, cap->off); | |
325 | return ERROR_INT(ENOSPC); | |
326 | } | |
327 | @@ -700,7 +700,7 @@ vfu_pci_add_capability(vfu_ctx_t *vfu_ct | |
328 | ||
329 | if (cap.off + cap.size >= pci_config_space_size(vfu_ctx)) { | |
330 | vfu_log(vfu_ctx, LOG_DEBUG, | |
331 | - "PCI capability past end of config space, %#lx >= %#lx", | |
332 | + "PCI capability past end of config space, %#zx >= %#zx", | |
333 | cap.off + cap.size, pci_config_space_size(vfu_ctx)); | |
334 | return ERROR_INT(EINVAL); | |
335 | } | |
336 | --- qemu-7.2.10/subprojects/libvfio-user/lib/tran.c.orig 2024-03-04 17:26:53.000000000 +0100 | |
337 | +++ qemu-7.2.10/subprojects/libvfio-user/lib/tran.c 2024-04-03 22:42:30.844413958 +0200 | |
338 | @@ -176,7 +176,7 @@ recv_version(vfu_ctx_t *vfu_ctx, uint16_ | |
339 | ||
340 | if (msg.in.iov.iov_len < sizeof(*cversion)) { | |
341 | vfu_log(vfu_ctx, LOG_ERR, | |
342 | - "msg%#hx: VFIO_USER_VERSION: invalid size %lu", | |
343 | + "msg%#hx: VFIO_USER_VERSION: invalid size %zu", | |
344 | *msg_idp, msg.in.iov.iov_len); | |
345 | ret = EINVAL; | |
346 | goto out; | |
347 | --- qemu-7.2.10/subprojects/libvfio-user/samples/client.c.orig 2024-03-04 17:26:53.000000000 +0100 | |
348 | +++ qemu-7.2.10/subprojects/libvfio-user/samples/client.c 2024-04-04 06:27:19.254657097 +0200 | |
349 | @@ -110,7 +110,7 @@ send_version(int sock) | |
350 | "\"max_msg_fds\":%u," | |
351 | "\"max_data_xfer_size\":%u," | |
352 | "\"migration\":{" | |
353 | - "\"pgsize\":%zu" | |
354 | + "\"pgsize\":%lu" | |
355 | "}" | |
356 | "}" | |
357 | "}", CLIENT_MAX_FDS, CLIENT_MAX_DATA_XFER_SIZE, sysconf(_SC_PAGESIZE)); | |
358 | @@ -155,7 +155,7 @@ recv_version(int sock, int *server_max_f | |
359 | } | |
360 | ||
361 | if (vlen < sizeof(*sversion)) { | |
362 | - errx(EXIT_FAILURE, "VFIO_USER_VERSION: invalid size %lu", vlen); | |
363 | + errx(EXIT_FAILURE, "VFIO_USER_VERSION: invalid size %zu", vlen); | |
364 | } | |
365 | ||
366 | if (sversion->major != LIB_VFIO_USER_MAJOR) { | |
367 | @@ -290,7 +290,7 @@ mmap_sparse_areas(int *fds, struct vfio_ | |
368 | sparse->areas[i].offset); | |
369 | if (addr == MAP_FAILED) { | |
370 | err(EXIT_FAILURE, | |
371 | - "failed to mmap sparse region #%lu in %s (%#llx-%#llx)", | |
372 | + "failed to mmap sparse region #%zu in %s (%#llx-%#llx)", | |
373 | i, buf, sparse->areas[i].offset, | |
374 | sparse->areas[i].offset + sparse->areas[i].size - 1); | |
375 | } | |
376 | @@ -330,7 +330,7 @@ get_device_region_info(int sock, uint32_ | |
377 | ||
378 | cap_sz = region_info->argsz - sizeof(struct vfio_region_info); | |
379 | printf("client: %s: region_info[%d] offset %#llx flags %#x size %llu " | |
380 | - "cap_sz %lu #FDs %lu\n", __func__, index, region_info->offset, | |
381 | + "cap_sz %zu #FDs %zu\n", __func__, index, region_info->offset, | |
382 | region_info->flags, region_info->size, cap_sz, nr_fds); | |
383 | if (cap_sz) { | |
384 | struct vfio_region_info_cap_sparse_mmap *sparse = NULL; | |
385 | @@ -487,14 +487,14 @@ access_region(int sock, int region, bool | |
386 | recv_data, recv_data_len, NULL, 0); | |
387 | pthread_mutex_unlock(&mutex); | |
388 | if (ret != 0) { | |
389 | - warn("failed to %s region %d %#lx-%#lx", | |
390 | + warn("failed to %s region %d %#"PRIx64"-%#"PRIx64, | |
391 | is_write ? "write to" : "read from", region, offset, | |
392 | offset + data_len - 1); | |
393 | free(recv_data); | |
394 | return ret; | |
395 | } | |
396 | if (recv_data->count != data_len) { | |
397 | - warnx("bad %s data count, expected=%lu, actual=%d", | |
398 | + warnx("bad %s data count, expected=%zu, actual=%d", | |
399 | is_write ? "write" : "read", data_len, | |
400 | recv_data->count); | |
401 | free(recv_data); | |
402 | @@ -585,8 +585,8 @@ handle_dma_write(int sock, struct vfio_u | |
403 | c = pwrite(dma_region_fds[i], data, dma_access.count, offset); | |
404 | ||
405 | if (c != (ssize_t)dma_access.count) { | |
406 | - err(EXIT_FAILURE, "failed to write to fd=%d at [%#lx-%#lx)", | |
407 | - dma_region_fds[i], offset, offset + dma_access.count); | |
408 | + err(EXIT_FAILURE, "failed to write to fd=%d at [%#"PRIx64"-%#"PRIx64")", | |
409 | + dma_region_fds[i], (int64_t)offset, (int64_t)(offset + dma_access.count)); | |
410 | } | |
411 | break; | |
412 | } | |
413 | @@ -640,8 +640,8 @@ handle_dma_read(int sock, struct vfio_us | |
414 | c = pread(dma_region_fds[i], data, dma_access.count, offset); | |
415 | ||
416 | if (c != (ssize_t)dma_access.count) { | |
417 | - err(EXIT_FAILURE, "failed to read from fd=%d at [%#lx-%#lx)", | |
418 | - dma_region_fds[i], offset, offset + dma_access.count); | |
419 | + err(EXIT_FAILURE, "failed to read from fd=%d at [%#"PRIx64"-%#"PRIx64")", | |
420 | + dma_region_fds[i], (int64_t)offset, (int64_t)(offset + dma_access.count)); | |
421 | } | |
422 | break; | |
423 | } | |
424 | @@ -706,7 +706,7 @@ get_dirty_bitmap(int sock, struct vfio_u | |
425 | err(EXIT_FAILURE, "failed to get dirty page bitmap"); | |
426 | } | |
427 | ||
428 | - printf("client: %s: %#lx-%#lx\t%#x\n", __func__, range->iova, | |
429 | + printf("client: %s: %#"PRIx64"-%#"PRIx64"\t%#x\n", __func__, range->iova, | |
430 | range->iova + range->size - 1, bitmap[0]); | |
431 | ||
432 | free(data); | |
433 | @@ -900,7 +900,7 @@ migrate_from(int sock, size_t *nr_iters, | |
434 | _nr_iters += do_migrate(sock, 1, (*migr_iters) + _nr_iters); | |
435 | if (_nr_iters != 2) { | |
436 | errx(EXIT_FAILURE, | |
437 | - "expected 2 iterations instead of %ld while in stop-and-copy state", | |
438 | + "expected 2 iterations instead of %zd while in stop-and-copy state", | |
439 | _nr_iters); | |
440 | } | |
441 | ||
442 | @@ -1000,7 +1000,7 @@ migrate_to(char *old_sock_path, int *ser | |
443 | * TODO write half of migration data via regular write and other half via | |
444 | * memopy map. | |
445 | */ | |
446 | - printf("client: writing migration device data %#lx-%#lx\n", | |
447 | + printf("client: writing migration device data %#"PRIx64"-%#"PRIx64"\n", | |
448 | data_offset, data_offset + migr_iters[i].iov_len - 1); | |
449 | ret = access_region(sock, VFU_PCI_DEV_MIGR_REGION_IDX, true, | |
450 | data_offset, migr_iters[i].iov_base, | |
451 | --- qemu-7.2.10/subprojects/libvfio-user/samples/server.c.orig 2024-03-04 17:26:53.000000000 +0100 | |
452 | +++ qemu-7.2.10/subprojects/libvfio-user/samples/server.c 2024-04-04 16:58:57.346435682 +0200 | |
453 | @@ -93,8 +93,8 @@ bar0_access(vfu_ctx_t *vfu_ctx, char * c | |
454 | struct server_data *server_data = vfu_get_private(vfu_ctx); | |
455 | ||
456 | if (count != sizeof(time_t) || offset != 0) { | |
457 | - vfu_log(vfu_ctx, LOG_ERR, "bad BAR0 access %#lx-%#lx", | |
458 | - offset, offset + count - 1); | |
459 | + vfu_log(vfu_ctx, LOG_ERR, "bad BAR0 access %#"PRIx64"-%#"PRIx64, | |
460 | + (int64_t)offset, (int64_t)(offset + count - 1)); | |
461 | errno = EINVAL; | |
462 | return -1; | |
463 | } | |
464 | @@ -123,8 +123,8 @@ bar1_access(vfu_ctx_t *vfu_ctx, char * c | |
465 | struct server_data *server_data = vfu_get_private(vfu_ctx); | |
466 | ||
467 | if (offset + count > server_data->bar1_size) { | |
468 | - vfu_log(vfu_ctx, LOG_ERR, "bad BAR1 access %#lx-%#lx", | |
469 | - offset, offset + count - 1); | |
470 | + vfu_log(vfu_ctx, LOG_ERR, "bad BAR1 access %#"PRIx64"-%#"PRIx64, | |
471 | + (int64_t)offset, (int64_t)(offset + count - 1)); | |
472 | errno = EINVAL; | |
473 | return -1; | |
474 | } | |
475 | @@ -353,7 +353,7 @@ migration_write_data(vfu_ctx_t *vfu_ctx, | |
476 | assert(data != NULL); | |
477 | ||
478 | if (offset != 0 || size < server_data->bar1_size) { | |
479 | - vfu_log(vfu_ctx, LOG_DEBUG, "XXX bad migration data write %#lx-%#lx", | |
480 | + vfu_log(vfu_ctx, LOG_DEBUG, "XXX bad migration data write %#"PRIx64"-%#"PRIx64, | |
481 | offset, offset + size - 1); | |
482 | errno = EINVAL; | |
483 | return -1; | |
484 | --- qemu-7.2.10/subprojects/libvfio-user/test/unit-tests.c.orig 2024-03-04 17:26:53.000000000 +0100 | |
485 | +++ qemu-7.2.10/subprojects/libvfio-user/test/unit-tests.c 2024-04-04 16:59:18.846711984 +0200 | |
486 | @@ -161,8 +161,8 @@ static int | |
487 | check_dma_info(const LargestIntegralType value, | |
488 | const LargestIntegralType cvalue) | |
489 | { | |
490 | - vfu_dma_info_t *info = (vfu_dma_info_t *)value; | |
491 | - vfu_dma_info_t *cinfo = (vfu_dma_info_t *)cvalue; | |
492 | + vfu_dma_info_t *info = (vfu_dma_info_t *)(uintptr_t)value; | |
493 | + vfu_dma_info_t *cinfo = (vfu_dma_info_t *)(uintptr_t)cvalue; | |
494 | ||
495 | return info->iova.iov_base == cinfo->iova.iov_base && | |
496 | info->iova.iov_len == cinfo->iova.iov_len && | |
497 | @@ -330,7 +330,7 @@ test_dma_addr_to_sgl(void **state UNUSED | |
498 | assert_int_equal(1, ret); | |
499 | assert_int_equal(r->info.iova.iov_base, sg[0].dma_addr); | |
500 | assert_int_equal(0, sg[0].region); | |
501 | - assert_int_equal(0x2000 - (unsigned long long)r->info.iova.iov_base, | |
502 | + assert_int_equal(0x2000 - (unsigned long long)(uintptr_t)r->info.iova.iov_base, | |
503 | sg[0].offset); | |
504 | assert_int_equal(0x400, sg[0].length); | |
505 | assert_true(vfu_sg_is_mappable(&vfu_ctx, &sg[0])); | |
506 | --- qemu-7.2.10/subprojects/libvfio-user/lib/tran_pipe.c.orig 2024-03-04 17:26:53.000000000 +0100 | |
507 | +++ qemu-7.2.10/subprojects/libvfio-user/lib/tran_pipe.c 2024-04-04 17:01:32.872219201 +0200 | |
508 | @@ -83,7 +83,7 @@ tran_pipe_send_iovec(int fd, uint16_t ms | |
509 | return ERROR_INT(ECONNRESET); | |
510 | } | |
511 | return -1; | |
512 | - } else if (ret < hdr.msg_size) { | |
513 | + } else if ((uint32_t)ret < hdr.msg_size) { | |
514 | return ERROR_INT(ECONNRESET); | |
515 | } | |
516 |