4 * Copyright Red Hat, Inc. 2013-2014
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu-common.h"
17 #include "ui/console.h"
19 #include "hw/virtio/virtio.h"
20 #include "hw/virtio/virtio-gpu.h"
21 #include "hw/virtio/virtio-bus.h"
22 #include "migration/migration.h"
24 #include "qapi/error.h"
26 #define VIRTIO_GPU_VM_VERSION 1
28 static struct virtio_gpu_simple_resource
*
29 virtio_gpu_find_resource(VirtIOGPU
*g
, uint32_t resource_id
);
32 #include <virglrenderer.h>
33 #define VIRGL(_g, _virgl, _simple, ...) \
35 if (_g->use_virgl_renderer) { \
36 _virgl(__VA_ARGS__); \
38 _simple(__VA_ARGS__); \
42 #define VIRGL(_g, _virgl, _simple, ...) \
44 _simple(__VA_ARGS__); \
48 static void update_cursor_data_simple(VirtIOGPU
*g
,
49 struct virtio_gpu_scanout
*s
,
52 struct virtio_gpu_simple_resource
*res
;
55 res
= virtio_gpu_find_resource(g
, resource_id
);
60 if (pixman_image_get_width(res
->image
) != s
->current_cursor
->width
||
61 pixman_image_get_height(res
->image
) != s
->current_cursor
->height
) {
65 pixels
= s
->current_cursor
->width
* s
->current_cursor
->height
;
66 memcpy(s
->current_cursor
->data
,
67 pixman_image_get_data(res
->image
),
68 pixels
* sizeof(uint32_t));
73 static void update_cursor_data_virgl(VirtIOGPU
*g
,
74 struct virtio_gpu_scanout
*s
,
77 uint32_t width
, height
;
78 uint32_t pixels
, *data
;
80 data
= virgl_renderer_get_cursor_data(resource_id
, &width
, &height
);
85 if (width
!= s
->current_cursor
->width
||
86 height
!= s
->current_cursor
->height
) {
90 pixels
= s
->current_cursor
->width
* s
->current_cursor
->height
;
91 memcpy(s
->current_cursor
->data
, data
, pixels
* sizeof(uint32_t));
97 static void update_cursor(VirtIOGPU
*g
, struct virtio_gpu_update_cursor
*cursor
)
99 struct virtio_gpu_scanout
*s
;
100 bool move
= cursor
->hdr
.type
== VIRTIO_GPU_CMD_MOVE_CURSOR
;
102 if (cursor
->pos
.scanout_id
>= g
->conf
.max_outputs
) {
105 s
= &g
->scanout
[cursor
->pos
.scanout_id
];
107 trace_virtio_gpu_update_cursor(cursor
->pos
.scanout_id
,
110 move
? "move" : "update",
111 cursor
->resource_id
);
114 if (!s
->current_cursor
) {
115 s
->current_cursor
= cursor_alloc(64, 64);
118 s
->current_cursor
->hot_x
= cursor
->hot_x
;
119 s
->current_cursor
->hot_y
= cursor
->hot_y
;
121 if (cursor
->resource_id
> 0) {
122 VIRGL(g
, update_cursor_data_virgl
, update_cursor_data_simple
,
123 g
, s
, cursor
->resource_id
);
125 dpy_cursor_define(s
->con
, s
->current_cursor
);
129 s
->cursor
.pos
.x
= cursor
->pos
.x
;
130 s
->cursor
.pos
.y
= cursor
->pos
.y
;
132 dpy_mouse_set(s
->con
, cursor
->pos
.x
, cursor
->pos
.y
,
133 cursor
->resource_id
? 1 : 0);
136 static void virtio_gpu_get_config(VirtIODevice
*vdev
, uint8_t *config
)
138 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
139 memcpy(config
, &g
->virtio_config
, sizeof(g
->virtio_config
));
142 static void virtio_gpu_set_config(VirtIODevice
*vdev
, const uint8_t *config
)
144 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
145 struct virtio_gpu_config vgconfig
;
147 memcpy(&vgconfig
, config
, sizeof(g
->virtio_config
));
149 if (vgconfig
.events_clear
) {
150 g
->virtio_config
.events_read
&= ~vgconfig
.events_clear
;
154 static uint64_t virtio_gpu_get_features(VirtIODevice
*vdev
, uint64_t features
,
157 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
159 if (virtio_gpu_virgl_enabled(g
->conf
)) {
160 features
|= (1 << VIRTIO_GPU_F_VIRGL
);
165 static void virtio_gpu_set_features(VirtIODevice
*vdev
, uint64_t features
)
167 static const uint32_t virgl
= (1 << VIRTIO_GPU_F_VIRGL
);
168 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
170 g
->use_virgl_renderer
= ((features
& virgl
) == virgl
);
171 trace_virtio_gpu_features(g
->use_virgl_renderer
);
174 static void virtio_gpu_notify_event(VirtIOGPU
*g
, uint32_t event_type
)
176 g
->virtio_config
.events_read
|= event_type
;
177 virtio_notify_config(&g
->parent_obj
);
180 static struct virtio_gpu_simple_resource
*
181 virtio_gpu_find_resource(VirtIOGPU
*g
, uint32_t resource_id
)
183 struct virtio_gpu_simple_resource
*res
;
185 QTAILQ_FOREACH(res
, &g
->reslist
, next
) {
186 if (res
->resource_id
== resource_id
) {
193 void virtio_gpu_ctrl_response(VirtIOGPU
*g
,
194 struct virtio_gpu_ctrl_command
*cmd
,
195 struct virtio_gpu_ctrl_hdr
*resp
,
200 if (cmd
->cmd_hdr
.flags
& VIRTIO_GPU_FLAG_FENCE
) {
201 resp
->flags
|= VIRTIO_GPU_FLAG_FENCE
;
202 resp
->fence_id
= cmd
->cmd_hdr
.fence_id
;
203 resp
->ctx_id
= cmd
->cmd_hdr
.ctx_id
;
205 s
= iov_from_buf(cmd
->elem
.in_sg
, cmd
->elem
.in_num
, 0, resp
, resp_len
);
207 qemu_log_mask(LOG_GUEST_ERROR
,
208 "%s: response size incorrect %zu vs %zu\n",
209 __func__
, s
, resp_len
);
211 virtqueue_push(cmd
->vq
, &cmd
->elem
, s
);
212 virtio_notify(VIRTIO_DEVICE(g
), cmd
->vq
);
213 cmd
->finished
= true;
216 void virtio_gpu_ctrl_response_nodata(VirtIOGPU
*g
,
217 struct virtio_gpu_ctrl_command
*cmd
,
218 enum virtio_gpu_ctrl_type type
)
220 struct virtio_gpu_ctrl_hdr resp
;
222 memset(&resp
, 0, sizeof(resp
));
224 virtio_gpu_ctrl_response(g
, cmd
, &resp
, sizeof(resp
));
228 virtio_gpu_fill_display_info(VirtIOGPU
*g
,
229 struct virtio_gpu_resp_display_info
*dpy_info
)
233 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
234 if (g
->enabled_output_bitmask
& (1 << i
)) {
235 dpy_info
->pmodes
[i
].enabled
= 1;
236 dpy_info
->pmodes
[i
].r
.width
= g
->req_state
[i
].width
;
237 dpy_info
->pmodes
[i
].r
.height
= g
->req_state
[i
].height
;
242 void virtio_gpu_get_display_info(VirtIOGPU
*g
,
243 struct virtio_gpu_ctrl_command
*cmd
)
245 struct virtio_gpu_resp_display_info display_info
;
247 trace_virtio_gpu_cmd_get_display_info();
248 memset(&display_info
, 0, sizeof(display_info
));
249 display_info
.hdr
.type
= VIRTIO_GPU_RESP_OK_DISPLAY_INFO
;
250 virtio_gpu_fill_display_info(g
, &display_info
);
251 virtio_gpu_ctrl_response(g
, cmd
, &display_info
.hdr
,
252 sizeof(display_info
));
255 static pixman_format_code_t
get_pixman_format(uint32_t virtio_gpu_format
)
257 switch (virtio_gpu_format
) {
258 #ifdef HOST_WORDS_BIGENDIAN
259 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM
:
260 return PIXMAN_b8g8r8x8
;
261 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM
:
262 return PIXMAN_b8g8r8a8
;
263 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM
:
264 return PIXMAN_x8r8g8b8
;
265 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM
:
266 return PIXMAN_a8r8g8b8
;
267 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM
:
268 return PIXMAN_r8g8b8x8
;
269 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM
:
270 return PIXMAN_r8g8b8a8
;
271 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM
:
272 return PIXMAN_x8b8g8r8
;
273 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM
:
274 return PIXMAN_a8b8g8r8
;
276 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM
:
277 return PIXMAN_x8r8g8b8
;
278 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM
:
279 return PIXMAN_a8r8g8b8
;
280 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM
:
281 return PIXMAN_b8g8r8x8
;
282 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM
:
283 return PIXMAN_b8g8r8a8
;
284 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM
:
285 return PIXMAN_x8b8g8r8
;
286 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM
:
287 return PIXMAN_a8b8g8r8
;
288 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM
:
289 return PIXMAN_r8g8b8x8
;
290 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM
:
291 return PIXMAN_r8g8b8a8
;
298 static void virtio_gpu_resource_create_2d(VirtIOGPU
*g
,
299 struct virtio_gpu_ctrl_command
*cmd
)
301 pixman_format_code_t pformat
;
302 struct virtio_gpu_simple_resource
*res
;
303 struct virtio_gpu_resource_create_2d c2d
;
305 VIRTIO_GPU_FILL_CMD(c2d
);
306 trace_virtio_gpu_cmd_res_create_2d(c2d
.resource_id
, c2d
.format
,
307 c2d
.width
, c2d
.height
);
309 if (c2d
.resource_id
== 0) {
310 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource id 0 is not allowed\n",
312 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
316 res
= virtio_gpu_find_resource(g
, c2d
.resource_id
);
318 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource already exists %d\n",
319 __func__
, c2d
.resource_id
);
320 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
324 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
326 res
->width
= c2d
.width
;
327 res
->height
= c2d
.height
;
328 res
->format
= c2d
.format
;
329 res
->resource_id
= c2d
.resource_id
;
331 pformat
= get_pixman_format(c2d
.format
);
333 qemu_log_mask(LOG_GUEST_ERROR
,
334 "%s: host couldn't handle guest format %d\n",
335 __func__
, c2d
.format
);
337 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
340 res
->image
= pixman_image_create_bits(pformat
,
346 qemu_log_mask(LOG_GUEST_ERROR
,
347 "%s: resource creation failed %d %d %d\n",
348 __func__
, c2d
.resource_id
, c2d
.width
, c2d
.height
);
350 cmd
->error
= VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY
;
354 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
357 static void virtio_gpu_resource_destroy(VirtIOGPU
*g
,
358 struct virtio_gpu_simple_resource
*res
)
360 pixman_image_unref(res
->image
);
361 QTAILQ_REMOVE(&g
->reslist
, res
, next
);
365 static void virtio_gpu_resource_unref(VirtIOGPU
*g
,
366 struct virtio_gpu_ctrl_command
*cmd
)
368 struct virtio_gpu_simple_resource
*res
;
369 struct virtio_gpu_resource_unref unref
;
371 VIRTIO_GPU_FILL_CMD(unref
);
372 trace_virtio_gpu_cmd_res_unref(unref
.resource_id
);
374 res
= virtio_gpu_find_resource(g
, unref
.resource_id
);
376 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
377 __func__
, unref
.resource_id
);
378 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
381 virtio_gpu_resource_destroy(g
, res
);
384 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU
*g
,
385 struct virtio_gpu_ctrl_command
*cmd
)
387 struct virtio_gpu_simple_resource
*res
;
389 uint32_t src_offset
, dst_offset
, stride
;
391 pixman_format_code_t format
;
392 struct virtio_gpu_transfer_to_host_2d t2d
;
394 VIRTIO_GPU_FILL_CMD(t2d
);
395 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d
.resource_id
);
397 res
= virtio_gpu_find_resource(g
, t2d
.resource_id
);
398 if (!res
|| !res
->iov
) {
399 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
400 __func__
, t2d
.resource_id
);
401 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
405 if (t2d
.r
.x
> res
->width
||
406 t2d
.r
.y
> res
->height
||
407 t2d
.r
.width
> res
->width
||
408 t2d
.r
.height
> res
->height
||
409 t2d
.r
.x
+ t2d
.r
.width
> res
->width
||
410 t2d
.r
.y
+ t2d
.r
.height
> res
->height
) {
411 qemu_log_mask(LOG_GUEST_ERROR
, "%s: transfer bounds outside resource"
412 " bounds for resource %d: %d %d %d %d vs %d %d\n",
413 __func__
, t2d
.resource_id
, t2d
.r
.x
, t2d
.r
.y
,
414 t2d
.r
.width
, t2d
.r
.height
, res
->width
, res
->height
);
415 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
419 format
= pixman_image_get_format(res
->image
);
420 bpp
= (PIXMAN_FORMAT_BPP(format
) + 7) / 8;
421 stride
= pixman_image_get_stride(res
->image
);
423 if (t2d
.offset
|| t2d
.r
.x
|| t2d
.r
.y
||
424 t2d
.r
.width
!= pixman_image_get_width(res
->image
)) {
425 void *img_data
= pixman_image_get_data(res
->image
);
426 for (h
= 0; h
< t2d
.r
.height
; h
++) {
427 src_offset
= t2d
.offset
+ stride
* h
;
428 dst_offset
= (t2d
.r
.y
+ h
) * stride
+ (t2d
.r
.x
* bpp
);
430 iov_to_buf(res
->iov
, res
->iov_cnt
, src_offset
,
432 + dst_offset
, t2d
.r
.width
* bpp
);
435 iov_to_buf(res
->iov
, res
->iov_cnt
, 0,
436 pixman_image_get_data(res
->image
),
437 pixman_image_get_stride(res
->image
)
438 * pixman_image_get_height(res
->image
));
442 static void virtio_gpu_resource_flush(VirtIOGPU
*g
,
443 struct virtio_gpu_ctrl_command
*cmd
)
445 struct virtio_gpu_simple_resource
*res
;
446 struct virtio_gpu_resource_flush rf
;
447 pixman_region16_t flush_region
;
450 VIRTIO_GPU_FILL_CMD(rf
);
451 trace_virtio_gpu_cmd_res_flush(rf
.resource_id
,
452 rf
.r
.width
, rf
.r
.height
, rf
.r
.x
, rf
.r
.y
);
454 res
= virtio_gpu_find_resource(g
, rf
.resource_id
);
456 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
457 __func__
, rf
.resource_id
);
458 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
462 if (rf
.r
.x
> res
->width
||
463 rf
.r
.y
> res
->height
||
464 rf
.r
.width
> res
->width
||
465 rf
.r
.height
> res
->height
||
466 rf
.r
.x
+ rf
.r
.width
> res
->width
||
467 rf
.r
.y
+ rf
.r
.height
> res
->height
) {
468 qemu_log_mask(LOG_GUEST_ERROR
, "%s: flush bounds outside resource"
469 " bounds for resource %d: %d %d %d %d vs %d %d\n",
470 __func__
, rf
.resource_id
, rf
.r
.x
, rf
.r
.y
,
471 rf
.r
.width
, rf
.r
.height
, res
->width
, res
->height
);
472 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
476 pixman_region_init_rect(&flush_region
,
477 rf
.r
.x
, rf
.r
.y
, rf
.r
.width
, rf
.r
.height
);
478 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
479 struct virtio_gpu_scanout
*scanout
;
480 pixman_region16_t region
, finalregion
;
481 pixman_box16_t
*extents
;
483 if (!(res
->scanout_bitmask
& (1 << i
))) {
486 scanout
= &g
->scanout
[i
];
488 pixman_region_init(&finalregion
);
489 pixman_region_init_rect(®ion
, scanout
->x
, scanout
->y
,
490 scanout
->width
, scanout
->height
);
492 pixman_region_intersect(&finalregion
, &flush_region
, ®ion
);
493 pixman_region_translate(&finalregion
, -scanout
->x
, -scanout
->y
);
494 extents
= pixman_region_extents(&finalregion
);
495 /* work out the area we need to update for each console */
496 dpy_gfx_update(g
->scanout
[i
].con
,
497 extents
->x1
, extents
->y1
,
498 extents
->x2
- extents
->x1
,
499 extents
->y2
- extents
->y1
);
501 pixman_region_fini(®ion
);
502 pixman_region_fini(&finalregion
);
504 pixman_region_fini(&flush_region
);
507 static void virtio_unref_resource(pixman_image_t
*image
, void *data
)
509 pixman_image_unref(data
);
512 static void virtio_gpu_set_scanout(VirtIOGPU
*g
,
513 struct virtio_gpu_ctrl_command
*cmd
)
515 struct virtio_gpu_simple_resource
*res
;
516 struct virtio_gpu_scanout
*scanout
;
517 pixman_format_code_t format
;
520 struct virtio_gpu_set_scanout ss
;
522 VIRTIO_GPU_FILL_CMD(ss
);
523 trace_virtio_gpu_cmd_set_scanout(ss
.scanout_id
, ss
.resource_id
,
524 ss
.r
.width
, ss
.r
.height
, ss
.r
.x
, ss
.r
.y
);
526 if (ss
.scanout_id
>= g
->conf
.max_outputs
) {
527 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout id specified %d",
528 __func__
, ss
.scanout_id
);
529 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
534 if (ss
.resource_id
== 0) {
535 scanout
= &g
->scanout
[ss
.scanout_id
];
536 if (scanout
->resource_id
) {
537 res
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
539 res
->scanout_bitmask
&= ~(1 << ss
.scanout_id
);
542 if (ss
.scanout_id
== 0) {
543 qemu_log_mask(LOG_GUEST_ERROR
,
544 "%s: illegal scanout id specified %d",
545 __func__
, ss
.scanout_id
);
546 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
549 dpy_gfx_replace_surface(g
->scanout
[ss
.scanout_id
].con
, NULL
);
556 /* create a surface for this scanout */
557 res
= virtio_gpu_find_resource(g
, ss
.resource_id
);
559 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
560 __func__
, ss
.resource_id
);
561 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
565 if (ss
.r
.x
> res
->width
||
566 ss
.r
.y
> res
->height
||
567 ss
.r
.width
> res
->width
||
568 ss
.r
.height
> res
->height
||
569 ss
.r
.x
+ ss
.r
.width
> res
->width
||
570 ss
.r
.y
+ ss
.r
.height
> res
->height
) {
571 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout %d bounds for"
572 " resource %d, (%d,%d)+%d,%d vs %d %d\n",
573 __func__
, ss
.scanout_id
, ss
.resource_id
, ss
.r
.x
, ss
.r
.y
,
574 ss
.r
.width
, ss
.r
.height
, res
->width
, res
->height
);
575 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
579 scanout
= &g
->scanout
[ss
.scanout_id
];
581 format
= pixman_image_get_format(res
->image
);
582 bpp
= (PIXMAN_FORMAT_BPP(format
) + 7) / 8;
583 offset
= (ss
.r
.x
* bpp
) + ss
.r
.y
* pixman_image_get_stride(res
->image
);
584 if (!scanout
->ds
|| surface_data(scanout
->ds
)
585 != ((uint8_t *)pixman_image_get_data(res
->image
) + offset
) ||
586 scanout
->width
!= ss
.r
.width
||
587 scanout
->height
!= ss
.r
.height
) {
588 pixman_image_t
*rect
;
589 void *ptr
= (uint8_t *)pixman_image_get_data(res
->image
) + offset
;
590 rect
= pixman_image_create_bits(format
, ss
.r
.width
, ss
.r
.height
, ptr
,
591 pixman_image_get_stride(res
->image
));
592 pixman_image_ref(res
->image
);
593 pixman_image_set_destroy_function(rect
, virtio_unref_resource
,
595 /* realloc the surface ptr */
596 scanout
->ds
= qemu_create_displaysurface_pixman(rect
);
598 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
601 dpy_gfx_replace_surface(g
->scanout
[ss
.scanout_id
].con
, scanout
->ds
);
604 res
->scanout_bitmask
|= (1 << ss
.scanout_id
);
605 scanout
->resource_id
= ss
.resource_id
;
608 scanout
->width
= ss
.r
.width
;
609 scanout
->height
= ss
.r
.height
;
612 int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing
*ab
,
613 struct virtio_gpu_ctrl_command
*cmd
,
614 uint64_t **addr
, struct iovec
**iov
)
616 struct virtio_gpu_mem_entry
*ents
;
620 if (ab
->nr_entries
> 16384) {
621 qemu_log_mask(LOG_GUEST_ERROR
,
622 "%s: nr_entries is too big (%d > 16384)\n",
623 __func__
, ab
->nr_entries
);
627 esize
= sizeof(*ents
) * ab
->nr_entries
;
628 ents
= g_malloc(esize
);
629 s
= iov_to_buf(cmd
->elem
.out_sg
, cmd
->elem
.out_num
,
630 sizeof(*ab
), ents
, esize
);
632 qemu_log_mask(LOG_GUEST_ERROR
,
633 "%s: command data size incorrect %zu vs %zu\n",
639 *iov
= g_malloc0(sizeof(struct iovec
) * ab
->nr_entries
);
641 *addr
= g_malloc0(sizeof(uint64_t) * ab
->nr_entries
);
643 for (i
= 0; i
< ab
->nr_entries
; i
++) {
644 hwaddr len
= ents
[i
].length
;
645 (*iov
)[i
].iov_len
= ents
[i
].length
;
646 (*iov
)[i
].iov_base
= cpu_physical_memory_map(ents
[i
].addr
, &len
, 1);
648 (*addr
)[i
] = ents
[i
].addr
;
650 if (!(*iov
)[i
].iov_base
|| len
!= ents
[i
].length
) {
651 qemu_log_mask(LOG_GUEST_ERROR
, "%s: failed to map MMIO memory for"
652 " resource %d element %d\n",
653 __func__
, ab
->resource_id
, i
);
654 virtio_gpu_cleanup_mapping_iov(*iov
, i
);
668 void virtio_gpu_cleanup_mapping_iov(struct iovec
*iov
, uint32_t count
)
672 for (i
= 0; i
< count
; i
++) {
673 cpu_physical_memory_unmap(iov
[i
].iov_base
, iov
[i
].iov_len
, 1,
679 static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource
*res
)
681 virtio_gpu_cleanup_mapping_iov(res
->iov
, res
->iov_cnt
);
689 virtio_gpu_resource_attach_backing(VirtIOGPU
*g
,
690 struct virtio_gpu_ctrl_command
*cmd
)
692 struct virtio_gpu_simple_resource
*res
;
693 struct virtio_gpu_resource_attach_backing ab
;
696 VIRTIO_GPU_FILL_CMD(ab
);
697 trace_virtio_gpu_cmd_res_back_attach(ab
.resource_id
);
699 res
= virtio_gpu_find_resource(g
, ab
.resource_id
);
701 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
702 __func__
, ab
.resource_id
);
703 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
707 ret
= virtio_gpu_create_mapping_iov(&ab
, cmd
, &res
->addrs
, &res
->iov
);
709 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
713 res
->iov_cnt
= ab
.nr_entries
;
717 virtio_gpu_resource_detach_backing(VirtIOGPU
*g
,
718 struct virtio_gpu_ctrl_command
*cmd
)
720 struct virtio_gpu_simple_resource
*res
;
721 struct virtio_gpu_resource_detach_backing detach
;
723 VIRTIO_GPU_FILL_CMD(detach
);
724 trace_virtio_gpu_cmd_res_back_detach(detach
.resource_id
);
726 res
= virtio_gpu_find_resource(g
, detach
.resource_id
);
727 if (!res
|| !res
->iov
) {
728 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
729 __func__
, detach
.resource_id
);
730 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
733 virtio_gpu_cleanup_mapping(res
);
736 static void virtio_gpu_simple_process_cmd(VirtIOGPU
*g
,
737 struct virtio_gpu_ctrl_command
*cmd
)
739 VIRTIO_GPU_FILL_CMD(cmd
->cmd_hdr
);
741 switch (cmd
->cmd_hdr
.type
) {
742 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO
:
743 virtio_gpu_get_display_info(g
, cmd
);
745 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
:
746 virtio_gpu_resource_create_2d(g
, cmd
);
748 case VIRTIO_GPU_CMD_RESOURCE_UNREF
:
749 virtio_gpu_resource_unref(g
, cmd
);
751 case VIRTIO_GPU_CMD_RESOURCE_FLUSH
:
752 virtio_gpu_resource_flush(g
, cmd
);
754 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
:
755 virtio_gpu_transfer_to_host_2d(g
, cmd
);
757 case VIRTIO_GPU_CMD_SET_SCANOUT
:
758 virtio_gpu_set_scanout(g
, cmd
);
760 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
:
761 virtio_gpu_resource_attach_backing(g
, cmd
);
763 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
:
764 virtio_gpu_resource_detach_backing(g
, cmd
);
767 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
770 if (!cmd
->finished
) {
771 virtio_gpu_ctrl_response_nodata(g
, cmd
, cmd
->error
? cmd
->error
:
772 VIRTIO_GPU_RESP_OK_NODATA
);
776 static void virtio_gpu_handle_ctrl_cb(VirtIODevice
*vdev
, VirtQueue
*vq
)
778 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
779 qemu_bh_schedule(g
->ctrl_bh
);
782 static void virtio_gpu_handle_cursor_cb(VirtIODevice
*vdev
, VirtQueue
*vq
)
784 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
785 qemu_bh_schedule(g
->cursor_bh
);
788 void virtio_gpu_process_cmdq(VirtIOGPU
*g
)
790 struct virtio_gpu_ctrl_command
*cmd
;
792 while (!QTAILQ_EMPTY(&g
->cmdq
)) {
793 cmd
= QTAILQ_FIRST(&g
->cmdq
);
795 /* process command */
796 VIRGL(g
, virtio_gpu_virgl_process_cmd
, virtio_gpu_simple_process_cmd
,
801 QTAILQ_REMOVE(&g
->cmdq
, cmd
, next
);
802 if (virtio_gpu_stats_enabled(g
->conf
)) {
806 if (!cmd
->finished
) {
807 QTAILQ_INSERT_TAIL(&g
->fenceq
, cmd
, next
);
809 if (virtio_gpu_stats_enabled(g
->conf
)) {
810 if (g
->stats
.max_inflight
< g
->inflight
) {
811 g
->stats
.max_inflight
= g
->inflight
;
813 fprintf(stderr
, "inflight: %3d (+)\r", g
->inflight
);
821 static void virtio_gpu_handle_ctrl(VirtIODevice
*vdev
, VirtQueue
*vq
)
823 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
824 struct virtio_gpu_ctrl_command
*cmd
;
826 if (!virtio_queue_ready(vq
)) {
831 if (!g
->renderer_inited
&& g
->use_virgl_renderer
) {
832 virtio_gpu_virgl_init(g
);
833 g
->renderer_inited
= true;
837 cmd
= virtqueue_pop(vq
, sizeof(struct virtio_gpu_ctrl_command
));
841 cmd
->finished
= false;
842 cmd
->waiting
= false;
843 QTAILQ_INSERT_TAIL(&g
->cmdq
, cmd
, next
);
844 cmd
= virtqueue_pop(vq
, sizeof(struct virtio_gpu_ctrl_command
));
847 virtio_gpu_process_cmdq(g
);
850 if (g
->use_virgl_renderer
) {
851 virtio_gpu_virgl_fence_poll(g
);
856 static void virtio_gpu_ctrl_bh(void *opaque
)
858 VirtIOGPU
*g
= opaque
;
859 virtio_gpu_handle_ctrl(&g
->parent_obj
, g
->ctrl_vq
);
862 static void virtio_gpu_handle_cursor(VirtIODevice
*vdev
, VirtQueue
*vq
)
864 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
865 VirtQueueElement
*elem
;
867 struct virtio_gpu_update_cursor cursor_info
;
869 if (!virtio_queue_ready(vq
)) {
873 elem
= virtqueue_pop(vq
, sizeof(VirtQueueElement
));
878 s
= iov_to_buf(elem
->out_sg
, elem
->out_num
, 0,
879 &cursor_info
, sizeof(cursor_info
));
880 if (s
!= sizeof(cursor_info
)) {
881 qemu_log_mask(LOG_GUEST_ERROR
,
882 "%s: cursor size incorrect %zu vs %zu\n",
883 __func__
, s
, sizeof(cursor_info
));
885 update_cursor(g
, &cursor_info
);
887 virtqueue_push(vq
, elem
, 0);
888 virtio_notify(vdev
, vq
);
893 static void virtio_gpu_cursor_bh(void *opaque
)
895 VirtIOGPU
*g
= opaque
;
896 virtio_gpu_handle_cursor(&g
->parent_obj
, g
->cursor_vq
);
899 static void virtio_gpu_invalidate_display(void *opaque
)
903 static void virtio_gpu_update_display(void *opaque
)
907 static void virtio_gpu_text_update(void *opaque
, console_ch_t
*chardata
)
911 static int virtio_gpu_ui_info(void *opaque
, uint32_t idx
, QemuUIInfo
*info
)
913 VirtIOGPU
*g
= opaque
;
915 if (idx
>= g
->conf
.max_outputs
) {
919 g
->req_state
[idx
].x
= info
->xoff
;
920 g
->req_state
[idx
].y
= info
->yoff
;
921 g
->req_state
[idx
].width
= info
->width
;
922 g
->req_state
[idx
].height
= info
->height
;
924 if (info
->width
&& info
->height
) {
925 g
->enabled_output_bitmask
|= (1 << idx
);
927 g
->enabled_output_bitmask
&= ~(1 << idx
);
930 /* send event to guest */
931 virtio_gpu_notify_event(g
, VIRTIO_GPU_EVENT_DISPLAY
);
935 static void virtio_gpu_gl_block(void *opaque
, bool block
)
937 VirtIOGPU
*g
= opaque
;
940 g
->renderer_blocked
++;
942 g
->renderer_blocked
--;
944 assert(g
->renderer_blocked
>= 0);
946 if (g
->renderer_blocked
== 0) {
947 virtio_gpu_process_cmdq(g
);
951 const GraphicHwOps virtio_gpu_ops
= {
952 .invalidate
= virtio_gpu_invalidate_display
,
953 .gfx_update
= virtio_gpu_update_display
,
954 .text_update
= virtio_gpu_text_update
,
955 .ui_info
= virtio_gpu_ui_info
,
956 .gl_block
= virtio_gpu_gl_block
,
959 static const VMStateDescription vmstate_virtio_gpu_scanout
= {
960 .name
= "virtio-gpu-one-scanout",
962 .fields
= (VMStateField
[]) {
963 VMSTATE_UINT32(resource_id
, struct virtio_gpu_scanout
),
964 VMSTATE_UINT32(width
, struct virtio_gpu_scanout
),
965 VMSTATE_UINT32(height
, struct virtio_gpu_scanout
),
966 VMSTATE_INT32(x
, struct virtio_gpu_scanout
),
967 VMSTATE_INT32(y
, struct virtio_gpu_scanout
),
968 VMSTATE_UINT32(cursor
.resource_id
, struct virtio_gpu_scanout
),
969 VMSTATE_UINT32(cursor
.hot_x
, struct virtio_gpu_scanout
),
970 VMSTATE_UINT32(cursor
.hot_y
, struct virtio_gpu_scanout
),
971 VMSTATE_UINT32(cursor
.pos
.x
, struct virtio_gpu_scanout
),
972 VMSTATE_UINT32(cursor
.pos
.y
, struct virtio_gpu_scanout
),
973 VMSTATE_END_OF_LIST()
977 static const VMStateDescription vmstate_virtio_gpu_scanouts
= {
978 .name
= "virtio-gpu-scanouts",
980 .fields
= (VMStateField
[]) {
981 VMSTATE_INT32(enable
, struct VirtIOGPU
),
982 VMSTATE_UINT32_EQUAL(conf
.max_outputs
, struct VirtIOGPU
),
983 VMSTATE_STRUCT_VARRAY_UINT32(scanout
, struct VirtIOGPU
,
985 vmstate_virtio_gpu_scanout
,
986 struct virtio_gpu_scanout
),
987 VMSTATE_END_OF_LIST()
991 static void virtio_gpu_save(QEMUFile
*f
, void *opaque
, size_t size
)
993 VirtIOGPU
*g
= opaque
;
994 struct virtio_gpu_simple_resource
*res
;
997 /* in 2d mode we should never find unprocessed commands here */
998 assert(QTAILQ_EMPTY(&g
->cmdq
));
1000 QTAILQ_FOREACH(res
, &g
->reslist
, next
) {
1001 qemu_put_be32(f
, res
->resource_id
);
1002 qemu_put_be32(f
, res
->width
);
1003 qemu_put_be32(f
, res
->height
);
1004 qemu_put_be32(f
, res
->format
);
1005 qemu_put_be32(f
, res
->iov_cnt
);
1006 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1007 qemu_put_be64(f
, res
->addrs
[i
]);
1008 qemu_put_be32(f
, res
->iov
[i
].iov_len
);
1010 qemu_put_buffer(f
, (void *)pixman_image_get_data(res
->image
),
1011 pixman_image_get_stride(res
->image
) * res
->height
);
1013 qemu_put_be32(f
, 0); /* end of list */
1015 vmstate_save_state(f
, &vmstate_virtio_gpu_scanouts
, g
, NULL
);
1018 static int virtio_gpu_load(QEMUFile
*f
, void *opaque
, size_t size
)
1020 VirtIOGPU
*g
= opaque
;
1021 struct virtio_gpu_simple_resource
*res
;
1022 struct virtio_gpu_scanout
*scanout
;
1023 uint32_t resource_id
, pformat
;
1026 resource_id
= qemu_get_be32(f
);
1027 while (resource_id
!= 0) {
1028 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
1029 res
->resource_id
= resource_id
;
1030 res
->width
= qemu_get_be32(f
);
1031 res
->height
= qemu_get_be32(f
);
1032 res
->format
= qemu_get_be32(f
);
1033 res
->iov_cnt
= qemu_get_be32(f
);
1036 pformat
= get_pixman_format(res
->format
);
1040 res
->image
= pixman_image_create_bits(pformat
,
1041 res
->width
, res
->height
,
1047 res
->addrs
= g_new(uint64_t, res
->iov_cnt
);
1048 res
->iov
= g_new(struct iovec
, res
->iov_cnt
);
1051 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1052 res
->addrs
[i
] = qemu_get_be64(f
);
1053 res
->iov
[i
].iov_len
= qemu_get_be32(f
);
1055 qemu_get_buffer(f
, (void *)pixman_image_get_data(res
->image
),
1056 pixman_image_get_stride(res
->image
) * res
->height
);
1058 /* restore mapping */
1059 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1060 hwaddr len
= res
->iov
[i
].iov_len
;
1061 res
->iov
[i
].iov_base
=
1062 cpu_physical_memory_map(res
->addrs
[i
], &len
, 1);
1063 if (!res
->iov
[i
].iov_base
|| len
!= res
->iov
[i
].iov_len
) {
1068 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
1070 resource_id
= qemu_get_be32(f
);
1073 /* load & apply scanout state */
1074 vmstate_load_state(f
, &vmstate_virtio_gpu_scanouts
, g
, 1);
1075 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
1076 scanout
= &g
->scanout
[i
];
1077 if (!scanout
->resource_id
) {
1080 res
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
1084 scanout
->ds
= qemu_create_displaysurface_pixman(res
->image
);
1089 dpy_gfx_replace_surface(scanout
->con
, scanout
->ds
);
1090 dpy_gfx_update(scanout
->con
, 0, 0, scanout
->width
, scanout
->height
);
1091 update_cursor(g
, &scanout
->cursor
);
1092 res
->scanout_bitmask
|= (1 << i
);
1098 static void virtio_gpu_device_realize(DeviceState
*qdev
, Error
**errp
)
1100 VirtIODevice
*vdev
= VIRTIO_DEVICE(qdev
);
1101 VirtIOGPU
*g
= VIRTIO_GPU(qdev
);
1105 if (g
->conf
.max_outputs
> VIRTIO_GPU_MAX_SCANOUTS
) {
1106 error_setg(errp
, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS
);
1110 g
->config_size
= sizeof(struct virtio_gpu_config
);
1111 g
->virtio_config
.num_scanouts
= g
->conf
.max_outputs
;
1112 virtio_init(VIRTIO_DEVICE(g
), "virtio-gpu", VIRTIO_ID_GPU
,
1115 g
->req_state
[0].width
= 1024;
1116 g
->req_state
[0].height
= 768;
1118 g
->use_virgl_renderer
= false;
1119 #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN)
1122 have_virgl
= display_opengl
;
1125 g
->conf
.flags
&= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED
);
1128 if (virtio_gpu_virgl_enabled(g
->conf
)) {
1129 /* use larger control queue in 3d mode */
1130 g
->ctrl_vq
= virtio_add_queue(vdev
, 256, virtio_gpu_handle_ctrl_cb
);
1131 g
->cursor_vq
= virtio_add_queue(vdev
, 16, virtio_gpu_handle_cursor_cb
);
1132 g
->virtio_config
.num_capsets
= 1;
1134 g
->ctrl_vq
= virtio_add_queue(vdev
, 64, virtio_gpu_handle_ctrl_cb
);
1135 g
->cursor_vq
= virtio_add_queue(vdev
, 16, virtio_gpu_handle_cursor_cb
);
1138 g
->ctrl_bh
= qemu_bh_new(virtio_gpu_ctrl_bh
, g
);
1139 g
->cursor_bh
= qemu_bh_new(virtio_gpu_cursor_bh
, g
);
1140 QTAILQ_INIT(&g
->reslist
);
1141 QTAILQ_INIT(&g
->cmdq
);
1142 QTAILQ_INIT(&g
->fenceq
);
1144 g
->enabled_output_bitmask
= 1;
1147 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
1149 graphic_console_init(DEVICE(g
), i
, &virtio_gpu_ops
, g
);
1151 dpy_gfx_replace_surface(g
->scanout
[i
].con
, NULL
);
1155 if (virtio_gpu_virgl_enabled(g
->conf
)) {
1156 error_setg(&g
->migration_blocker
, "virgl is not yet migratable");
1157 migrate_add_blocker(g
->migration_blocker
);
1161 static void virtio_gpu_device_unrealize(DeviceState
*qdev
, Error
**errp
)
1163 VirtIOGPU
*g
= VIRTIO_GPU(qdev
);
1164 if (g
->migration_blocker
) {
1165 migrate_del_blocker(g
->migration_blocker
);
1166 error_free(g
->migration_blocker
);
1170 static void virtio_gpu_instance_init(Object
*obj
)
1174 static void virtio_gpu_reset(VirtIODevice
*vdev
)
1176 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
1177 struct virtio_gpu_simple_resource
*res
, *tmp
;
1182 QTAILQ_FOREACH_SAFE(res
, &g
->reslist
, next
, tmp
) {
1183 virtio_gpu_resource_destroy(g
, res
);
1185 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
1187 g
->req_state
[i
].x
= 0;
1188 g
->req_state
[i
].y
= 0;
1190 g
->req_state
[0].width
= 1024;
1191 g
->req_state
[0].height
= 768;
1193 g
->req_state
[i
].width
= 0;
1194 g
->req_state
[i
].height
= 0;
1197 g
->scanout
[i
].resource_id
= 0;
1198 g
->scanout
[i
].width
= 0;
1199 g
->scanout
[i
].height
= 0;
1200 g
->scanout
[i
].x
= 0;
1201 g
->scanout
[i
].y
= 0;
1202 g
->scanout
[i
].ds
= NULL
;
1204 g
->enabled_output_bitmask
= 1;
1207 if (g
->use_virgl_renderer
) {
1208 virtio_gpu_virgl_reset(g
);
1209 g
->use_virgl_renderer
= 0;
1215 * For historical reasons virtio_gpu does not adhere to virtio migration
1216 * scheme as described in doc/virtio-migration.txt, in a sense that no
1217 * save/load callback are provided to the core. Instead the device data
1218 * is saved/loaded after the core data.
1220 * Because of this we need a special vmsd.
1222 static const VMStateDescription vmstate_virtio_gpu
= {
1223 .name
= "virtio-gpu",
1224 .minimum_version_id
= VIRTIO_GPU_VM_VERSION
,
1225 .version_id
= VIRTIO_GPU_VM_VERSION
,
1226 .fields
= (VMStateField
[]) {
1227 VMSTATE_VIRTIO_DEVICE
/* core */,
1229 .name
= "virtio-gpu",
1230 .info
= &(const VMStateInfo
) {
1231 .name
= "virtio-gpu",
1232 .get
= virtio_gpu_load
,
1233 .put
= virtio_gpu_save
,
1235 .flags
= VMS_SINGLE
,
1237 VMSTATE_END_OF_LIST()
1241 static Property virtio_gpu_properties
[] = {
1242 DEFINE_PROP_UINT32("max_outputs", VirtIOGPU
, conf
.max_outputs
, 1),
1244 DEFINE_PROP_BIT("virgl", VirtIOGPU
, conf
.flags
,
1245 VIRTIO_GPU_FLAG_VIRGL_ENABLED
, true),
1246 DEFINE_PROP_BIT("stats", VirtIOGPU
, conf
.flags
,
1247 VIRTIO_GPU_FLAG_STATS_ENABLED
, false),
1249 DEFINE_PROP_END_OF_LIST(),
1252 static void virtio_gpu_class_init(ObjectClass
*klass
, void *data
)
1254 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1255 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
1257 vdc
->realize
= virtio_gpu_device_realize
;
1258 vdc
->unrealize
= virtio_gpu_device_unrealize
;
1259 vdc
->get_config
= virtio_gpu_get_config
;
1260 vdc
->set_config
= virtio_gpu_set_config
;
1261 vdc
->get_features
= virtio_gpu_get_features
;
1262 vdc
->set_features
= virtio_gpu_set_features
;
1264 vdc
->reset
= virtio_gpu_reset
;
1266 dc
->props
= virtio_gpu_properties
;
1267 dc
->vmsd
= &vmstate_virtio_gpu
;
1270 static const TypeInfo virtio_gpu_info
= {
1271 .name
= TYPE_VIRTIO_GPU
,
1272 .parent
= TYPE_VIRTIO_DEVICE
,
1273 .instance_size
= sizeof(VirtIOGPU
),
1274 .instance_init
= virtio_gpu_instance_init
,
1275 .class_init
= virtio_gpu_class_init
,
1278 static void virtio_register_types(void)
1280 type_register_static(&virtio_gpu_info
);
1283 type_init(virtio_register_types
)
1285 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr
) != 24);
1286 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor
) != 56);
1287 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref
) != 32);
1288 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d
) != 40);
1289 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout
) != 48);
1290 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush
) != 48);
1291 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d
) != 56);
1292 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry
) != 16);
1293 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing
) != 32);
1294 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing
) != 32);
1295 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info
) != 408);
1297 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d
) != 72);
1298 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d
) != 72);
1299 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create
) != 96);
1300 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy
) != 24);
1301 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource
) != 32);
1302 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit
) != 32);
1303 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info
) != 32);
1304 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info
) != 40);
1305 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset
) != 32);
1306 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset
) != 24);