virgl: count the calls to gl_block
[qemu/kevin.git] / hw / display / virtio-gpu.c
blobf8b02747529a183fe2c2223a0c8fa487f8f78b92
1 /*
2 * Virtio GPU Device
4 * Copyright Red Hat, Inc. 2013-2014
6 * Authors:
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu-common.h"
16 #include "qemu/iov.h"
17 #include "ui/console.h"
18 #include "trace.h"
19 #include "hw/virtio/virtio.h"
20 #include "hw/virtio/virtio-gpu.h"
21 #include "hw/virtio/virtio-bus.h"
22 #include "qemu/log.h"
23 #include "qapi/error.h"
25 #define VIRTIO_GPU_VM_VERSION 1
27 static struct virtio_gpu_simple_resource*
28 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
30 #ifdef CONFIG_VIRGL
31 #include "virglrenderer.h"
32 #define VIRGL(_g, _virgl, _simple, ...) \
33 do { \
34 if (_g->use_virgl_renderer) { \
35 _virgl(__VA_ARGS__); \
36 } else { \
37 _simple(__VA_ARGS__); \
38 } \
39 } while (0)
40 #else
41 #define VIRGL(_g, _virgl, _simple, ...) \
42 do { \
43 _simple(__VA_ARGS__); \
44 } while (0)
45 #endif
47 static void update_cursor_data_simple(VirtIOGPU *g,
48 struct virtio_gpu_scanout *s,
49 uint32_t resource_id)
51 struct virtio_gpu_simple_resource *res;
52 uint32_t pixels;
54 res = virtio_gpu_find_resource(g, resource_id);
55 if (!res) {
56 return;
59 if (pixman_image_get_width(res->image) != s->current_cursor->width ||
60 pixman_image_get_height(res->image) != s->current_cursor->height) {
61 return;
64 pixels = s->current_cursor->width * s->current_cursor->height;
65 memcpy(s->current_cursor->data,
66 pixman_image_get_data(res->image),
67 pixels * sizeof(uint32_t));
70 #ifdef CONFIG_VIRGL
72 static void update_cursor_data_virgl(VirtIOGPU *g,
73 struct virtio_gpu_scanout *s,
74 uint32_t resource_id)
76 uint32_t width, height;
77 uint32_t pixels, *data;
79 data = virgl_renderer_get_cursor_data(resource_id, &width, &height);
80 if (!data) {
81 return;
84 if (width != s->current_cursor->width ||
85 height != s->current_cursor->height) {
86 return;
89 pixels = s->current_cursor->width * s->current_cursor->height;
90 memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t));
91 free(data);
94 #endif
96 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
98 struct virtio_gpu_scanout *s;
99 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
101 if (cursor->pos.scanout_id >= g->conf.max_outputs) {
102 return;
104 s = &g->scanout[cursor->pos.scanout_id];
106 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id,
107 cursor->pos.x,
108 cursor->pos.y,
109 move ? "move" : "update",
110 cursor->resource_id);
112 if (!move) {
113 if (!s->current_cursor) {
114 s->current_cursor = cursor_alloc(64, 64);
117 s->current_cursor->hot_x = cursor->hot_x;
118 s->current_cursor->hot_y = cursor->hot_y;
120 if (cursor->resource_id > 0) {
121 VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple,
122 g, s, cursor->resource_id);
124 dpy_cursor_define(s->con, s->current_cursor);
126 s->cursor = *cursor;
127 } else {
128 s->cursor.pos.x = cursor->pos.x;
129 s->cursor.pos.y = cursor->pos.y;
131 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y,
132 cursor->resource_id ? 1 : 0);
135 static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
137 VirtIOGPU *g = VIRTIO_GPU(vdev);
138 memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
141 static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
143 VirtIOGPU *g = VIRTIO_GPU(vdev);
144 struct virtio_gpu_config vgconfig;
146 memcpy(&vgconfig, config, sizeof(g->virtio_config));
148 if (vgconfig.events_clear) {
149 g->virtio_config.events_read &= ~vgconfig.events_clear;
153 static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features,
154 Error **errp)
156 VirtIOGPU *g = VIRTIO_GPU(vdev);
158 if (virtio_gpu_virgl_enabled(g->conf)) {
159 features |= (1 << VIRTIO_GPU_F_VIRGL);
161 return features;
164 static void virtio_gpu_set_features(VirtIODevice *vdev, uint64_t features)
166 static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL);
167 VirtIOGPU *g = VIRTIO_GPU(vdev);
169 g->use_virgl_renderer = ((features & virgl) == virgl);
170 trace_virtio_gpu_features(g->use_virgl_renderer);
173 static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type)
175 g->virtio_config.events_read |= event_type;
176 virtio_notify_config(&g->parent_obj);
179 static struct virtio_gpu_simple_resource *
180 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
182 struct virtio_gpu_simple_resource *res;
184 QTAILQ_FOREACH(res, &g->reslist, next) {
185 if (res->resource_id == resource_id) {
186 return res;
189 return NULL;
192 void virtio_gpu_ctrl_response(VirtIOGPU *g,
193 struct virtio_gpu_ctrl_command *cmd,
194 struct virtio_gpu_ctrl_hdr *resp,
195 size_t resp_len)
197 size_t s;
199 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
200 resp->flags |= VIRTIO_GPU_FLAG_FENCE;
201 resp->fence_id = cmd->cmd_hdr.fence_id;
202 resp->ctx_id = cmd->cmd_hdr.ctx_id;
204 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
205 if (s != resp_len) {
206 qemu_log_mask(LOG_GUEST_ERROR,
207 "%s: response size incorrect %zu vs %zu\n",
208 __func__, s, resp_len);
210 virtqueue_push(cmd->vq, &cmd->elem, s);
211 virtio_notify(VIRTIO_DEVICE(g), cmd->vq);
212 cmd->finished = true;
215 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
216 struct virtio_gpu_ctrl_command *cmd,
217 enum virtio_gpu_ctrl_type type)
219 struct virtio_gpu_ctrl_hdr resp;
221 memset(&resp, 0, sizeof(resp));
222 resp.type = type;
223 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
226 static void
227 virtio_gpu_fill_display_info(VirtIOGPU *g,
228 struct virtio_gpu_resp_display_info *dpy_info)
230 int i;
232 for (i = 0; i < g->conf.max_outputs; i++) {
233 if (g->enabled_output_bitmask & (1 << i)) {
234 dpy_info->pmodes[i].enabled = 1;
235 dpy_info->pmodes[i].r.width = g->req_state[i].width;
236 dpy_info->pmodes[i].r.height = g->req_state[i].height;
241 void virtio_gpu_get_display_info(VirtIOGPU *g,
242 struct virtio_gpu_ctrl_command *cmd)
244 struct virtio_gpu_resp_display_info display_info;
246 trace_virtio_gpu_cmd_get_display_info();
247 memset(&display_info, 0, sizeof(display_info));
248 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
249 virtio_gpu_fill_display_info(g, &display_info);
250 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
251 sizeof(display_info));
254 static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format)
256 switch (virtio_gpu_format) {
257 #ifdef HOST_WORDS_BIGENDIAN
258 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM:
259 return PIXMAN_b8g8r8x8;
260 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM:
261 return PIXMAN_b8g8r8a8;
262 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM:
263 return PIXMAN_x8r8g8b8;
264 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM:
265 return PIXMAN_a8r8g8b8;
266 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM:
267 return PIXMAN_r8g8b8x8;
268 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM:
269 return PIXMAN_r8g8b8a8;
270 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM:
271 return PIXMAN_x8b8g8r8;
272 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM:
273 return PIXMAN_a8b8g8r8;
274 #else
275 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM:
276 return PIXMAN_x8r8g8b8;
277 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM:
278 return PIXMAN_a8r8g8b8;
279 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM:
280 return PIXMAN_b8g8r8x8;
281 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM:
282 return PIXMAN_b8g8r8a8;
283 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM:
284 return PIXMAN_x8b8g8r8;
285 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM:
286 return PIXMAN_a8b8g8r8;
287 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM:
288 return PIXMAN_r8g8b8x8;
289 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM:
290 return PIXMAN_r8g8b8a8;
291 #endif
292 default:
293 return 0;
297 static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
298 struct virtio_gpu_ctrl_command *cmd)
300 pixman_format_code_t pformat;
301 struct virtio_gpu_simple_resource *res;
302 struct virtio_gpu_resource_create_2d c2d;
304 VIRTIO_GPU_FILL_CMD(c2d);
305 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
306 c2d.width, c2d.height);
308 if (c2d.resource_id == 0) {
309 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
310 __func__);
311 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
312 return;
315 res = virtio_gpu_find_resource(g, c2d.resource_id);
316 if (res) {
317 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
318 __func__, c2d.resource_id);
319 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
320 return;
323 res = g_new0(struct virtio_gpu_simple_resource, 1);
325 res->width = c2d.width;
326 res->height = c2d.height;
327 res->format = c2d.format;
328 res->resource_id = c2d.resource_id;
330 pformat = get_pixman_format(c2d.format);
331 if (!pformat) {
332 qemu_log_mask(LOG_GUEST_ERROR,
333 "%s: host couldn't handle guest format %d\n",
334 __func__, c2d.format);
335 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
336 return;
338 res->image = pixman_image_create_bits(pformat,
339 c2d.width,
340 c2d.height,
341 NULL, 0);
343 if (!res->image) {
344 qemu_log_mask(LOG_GUEST_ERROR,
345 "%s: resource creation failed %d %d %d\n",
346 __func__, c2d.resource_id, c2d.width, c2d.height);
347 g_free(res);
348 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
349 return;
352 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
355 static void virtio_gpu_resource_destroy(VirtIOGPU *g,
356 struct virtio_gpu_simple_resource *res)
358 pixman_image_unref(res->image);
359 QTAILQ_REMOVE(&g->reslist, res, next);
360 g_free(res);
363 static void virtio_gpu_resource_unref(VirtIOGPU *g,
364 struct virtio_gpu_ctrl_command *cmd)
366 struct virtio_gpu_simple_resource *res;
367 struct virtio_gpu_resource_unref unref;
369 VIRTIO_GPU_FILL_CMD(unref);
370 trace_virtio_gpu_cmd_res_unref(unref.resource_id);
372 res = virtio_gpu_find_resource(g, unref.resource_id);
373 if (!res) {
374 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
375 __func__, unref.resource_id);
376 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
377 return;
379 virtio_gpu_resource_destroy(g, res);
382 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
383 struct virtio_gpu_ctrl_command *cmd)
385 struct virtio_gpu_simple_resource *res;
386 int h;
387 uint32_t src_offset, dst_offset, stride;
388 int bpp;
389 pixman_format_code_t format;
390 struct virtio_gpu_transfer_to_host_2d t2d;
392 VIRTIO_GPU_FILL_CMD(t2d);
393 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
395 res = virtio_gpu_find_resource(g, t2d.resource_id);
396 if (!res || !res->iov) {
397 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
398 __func__, t2d.resource_id);
399 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
400 return;
403 if (t2d.r.x > res->width ||
404 t2d.r.y > res->height ||
405 t2d.r.width > res->width ||
406 t2d.r.height > res->height ||
407 t2d.r.x + t2d.r.width > res->width ||
408 t2d.r.y + t2d.r.height > res->height) {
409 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource"
410 " bounds for resource %d: %d %d %d %d vs %d %d\n",
411 __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
412 t2d.r.width, t2d.r.height, res->width, res->height);
413 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
414 return;
417 format = pixman_image_get_format(res->image);
418 bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8;
419 stride = pixman_image_get_stride(res->image);
421 if (t2d.offset || t2d.r.x || t2d.r.y ||
422 t2d.r.width != pixman_image_get_width(res->image)) {
423 void *img_data = pixman_image_get_data(res->image);
424 for (h = 0; h < t2d.r.height; h++) {
425 src_offset = t2d.offset + stride * h;
426 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
428 iov_to_buf(res->iov, res->iov_cnt, src_offset,
429 (uint8_t *)img_data
430 + dst_offset, t2d.r.width * bpp);
432 } else {
433 iov_to_buf(res->iov, res->iov_cnt, 0,
434 pixman_image_get_data(res->image),
435 pixman_image_get_stride(res->image)
436 * pixman_image_get_height(res->image));
440 static void virtio_gpu_resource_flush(VirtIOGPU *g,
441 struct virtio_gpu_ctrl_command *cmd)
443 struct virtio_gpu_simple_resource *res;
444 struct virtio_gpu_resource_flush rf;
445 pixman_region16_t flush_region;
446 int i;
448 VIRTIO_GPU_FILL_CMD(rf);
449 trace_virtio_gpu_cmd_res_flush(rf.resource_id,
450 rf.r.width, rf.r.height, rf.r.x, rf.r.y);
452 res = virtio_gpu_find_resource(g, rf.resource_id);
453 if (!res) {
454 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
455 __func__, rf.resource_id);
456 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
457 return;
460 if (rf.r.x > res->width ||
461 rf.r.y > res->height ||
462 rf.r.width > res->width ||
463 rf.r.height > res->height ||
464 rf.r.x + rf.r.width > res->width ||
465 rf.r.y + rf.r.height > res->height) {
466 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource"
467 " bounds for resource %d: %d %d %d %d vs %d %d\n",
468 __func__, rf.resource_id, rf.r.x, rf.r.y,
469 rf.r.width, rf.r.height, res->width, res->height);
470 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
471 return;
474 pixman_region_init_rect(&flush_region,
475 rf.r.x, rf.r.y, rf.r.width, rf.r.height);
476 for (i = 0; i < g->conf.max_outputs; i++) {
477 struct virtio_gpu_scanout *scanout;
478 pixman_region16_t region, finalregion;
479 pixman_box16_t *extents;
481 if (!(res->scanout_bitmask & (1 << i))) {
482 continue;
484 scanout = &g->scanout[i];
486 pixman_region_init(&finalregion);
487 pixman_region_init_rect(&region, scanout->x, scanout->y,
488 scanout->width, scanout->height);
490 pixman_region_intersect(&finalregion, &flush_region, &region);
491 pixman_region_translate(&finalregion, -scanout->x, -scanout->y);
492 extents = pixman_region_extents(&finalregion);
493 /* work out the area we need to update for each console */
494 dpy_gfx_update(g->scanout[i].con,
495 extents->x1, extents->y1,
496 extents->x2 - extents->x1,
497 extents->y2 - extents->y1);
499 pixman_region_fini(&region);
500 pixman_region_fini(&finalregion);
502 pixman_region_fini(&flush_region);
505 static void virtio_unref_resource(pixman_image_t *image, void *data)
507 pixman_image_unref(data);
510 static void virtio_gpu_set_scanout(VirtIOGPU *g,
511 struct virtio_gpu_ctrl_command *cmd)
513 struct virtio_gpu_simple_resource *res;
514 struct virtio_gpu_scanout *scanout;
515 pixman_format_code_t format;
516 uint32_t offset;
517 int bpp;
518 struct virtio_gpu_set_scanout ss;
520 VIRTIO_GPU_FILL_CMD(ss);
521 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
522 ss.r.width, ss.r.height, ss.r.x, ss.r.y);
524 if (ss.scanout_id >= g->conf.max_outputs) {
525 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
526 __func__, ss.scanout_id);
527 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
528 return;
531 g->enable = 1;
532 if (ss.resource_id == 0) {
533 scanout = &g->scanout[ss.scanout_id];
534 if (scanout->resource_id) {
535 res = virtio_gpu_find_resource(g, scanout->resource_id);
536 if (res) {
537 res->scanout_bitmask &= ~(1 << ss.scanout_id);
540 if (ss.scanout_id == 0) {
541 qemu_log_mask(LOG_GUEST_ERROR,
542 "%s: illegal scanout id specified %d",
543 __func__, ss.scanout_id);
544 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
545 return;
547 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL);
548 scanout->ds = NULL;
549 scanout->width = 0;
550 scanout->height = 0;
551 return;
554 /* create a surface for this scanout */
555 res = virtio_gpu_find_resource(g, ss.resource_id);
556 if (!res) {
557 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
558 __func__, ss.resource_id);
559 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
560 return;
563 if (ss.r.x > res->width ||
564 ss.r.y > res->height ||
565 ss.r.width > res->width ||
566 ss.r.height > res->height ||
567 ss.r.x + ss.r.width > res->width ||
568 ss.r.y + ss.r.height > res->height) {
569 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
570 " resource %d, (%d,%d)+%d,%d vs %d %d\n",
571 __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y,
572 ss.r.width, ss.r.height, res->width, res->height);
573 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
574 return;
577 scanout = &g->scanout[ss.scanout_id];
579 format = pixman_image_get_format(res->image);
580 bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8;
581 offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image);
582 if (!scanout->ds || surface_data(scanout->ds)
583 != ((uint8_t *)pixman_image_get_data(res->image) + offset) ||
584 scanout->width != ss.r.width ||
585 scanout->height != ss.r.height) {
586 pixman_image_t *rect;
587 void *ptr = (uint8_t *)pixman_image_get_data(res->image) + offset;
588 rect = pixman_image_create_bits(format, ss.r.width, ss.r.height, ptr,
589 pixman_image_get_stride(res->image));
590 pixman_image_ref(res->image);
591 pixman_image_set_destroy_function(rect, virtio_unref_resource,
592 res->image);
593 /* realloc the surface ptr */
594 scanout->ds = qemu_create_displaysurface_pixman(rect);
595 if (!scanout->ds) {
596 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
597 return;
599 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds);
602 res->scanout_bitmask |= (1 << ss.scanout_id);
603 scanout->resource_id = ss.resource_id;
604 scanout->x = ss.r.x;
605 scanout->y = ss.r.y;
606 scanout->width = ss.r.width;
607 scanout->height = ss.r.height;
610 int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab,
611 struct virtio_gpu_ctrl_command *cmd,
612 uint64_t **addr, struct iovec **iov)
614 struct virtio_gpu_mem_entry *ents;
615 size_t esize, s;
616 int i;
618 if (ab->nr_entries > 16384) {
619 qemu_log_mask(LOG_GUEST_ERROR,
620 "%s: nr_entries is too big (%d > 16384)\n",
621 __func__, ab->nr_entries);
622 return -1;
625 esize = sizeof(*ents) * ab->nr_entries;
626 ents = g_malloc(esize);
627 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
628 sizeof(*ab), ents, esize);
629 if (s != esize) {
630 qemu_log_mask(LOG_GUEST_ERROR,
631 "%s: command data size incorrect %zu vs %zu\n",
632 __func__, s, esize);
633 g_free(ents);
634 return -1;
637 *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries);
638 if (addr) {
639 *addr = g_malloc0(sizeof(uint64_t) * ab->nr_entries);
641 for (i = 0; i < ab->nr_entries; i++) {
642 hwaddr len = ents[i].length;
643 (*iov)[i].iov_len = ents[i].length;
644 (*iov)[i].iov_base = cpu_physical_memory_map(ents[i].addr, &len, 1);
645 if (addr) {
646 (*addr)[i] = ents[i].addr;
648 if (!(*iov)[i].iov_base || len != ents[i].length) {
649 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
650 " resource %d element %d\n",
651 __func__, ab->resource_id, i);
652 virtio_gpu_cleanup_mapping_iov(*iov, i);
653 g_free(ents);
654 *iov = NULL;
655 if (addr) {
656 g_free(*addr);
657 *addr = NULL;
659 return -1;
662 g_free(ents);
663 return 0;
666 void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count)
668 int i;
670 for (i = 0; i < count; i++) {
671 cpu_physical_memory_unmap(iov[i].iov_base, iov[i].iov_len, 1,
672 iov[i].iov_len);
674 g_free(iov);
677 static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res)
679 virtio_gpu_cleanup_mapping_iov(res->iov, res->iov_cnt);
680 res->iov = NULL;
681 res->iov_cnt = 0;
682 g_free(res->addrs);
683 res->addrs = NULL;
686 static void
687 virtio_gpu_resource_attach_backing(VirtIOGPU *g,
688 struct virtio_gpu_ctrl_command *cmd)
690 struct virtio_gpu_simple_resource *res;
691 struct virtio_gpu_resource_attach_backing ab;
692 int ret;
694 VIRTIO_GPU_FILL_CMD(ab);
695 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
697 res = virtio_gpu_find_resource(g, ab.resource_id);
698 if (!res) {
699 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
700 __func__, ab.resource_id);
701 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
702 return;
705 ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->addrs, &res->iov);
706 if (ret != 0) {
707 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
708 return;
711 res->iov_cnt = ab.nr_entries;
714 static void
715 virtio_gpu_resource_detach_backing(VirtIOGPU *g,
716 struct virtio_gpu_ctrl_command *cmd)
718 struct virtio_gpu_simple_resource *res;
719 struct virtio_gpu_resource_detach_backing detach;
721 VIRTIO_GPU_FILL_CMD(detach);
722 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id);
724 res = virtio_gpu_find_resource(g, detach.resource_id);
725 if (!res || !res->iov) {
726 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
727 __func__, detach.resource_id);
728 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
729 return;
731 virtio_gpu_cleanup_mapping(res);
734 static void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
735 struct virtio_gpu_ctrl_command *cmd)
737 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
739 switch (cmd->cmd_hdr.type) {
740 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
741 virtio_gpu_get_display_info(g, cmd);
742 break;
743 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
744 virtio_gpu_resource_create_2d(g, cmd);
745 break;
746 case VIRTIO_GPU_CMD_RESOURCE_UNREF:
747 virtio_gpu_resource_unref(g, cmd);
748 break;
749 case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
750 virtio_gpu_resource_flush(g, cmd);
751 break;
752 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
753 virtio_gpu_transfer_to_host_2d(g, cmd);
754 break;
755 case VIRTIO_GPU_CMD_SET_SCANOUT:
756 virtio_gpu_set_scanout(g, cmd);
757 break;
758 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
759 virtio_gpu_resource_attach_backing(g, cmd);
760 break;
761 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
762 virtio_gpu_resource_detach_backing(g, cmd);
763 break;
764 default:
765 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
766 break;
768 if (!cmd->finished) {
769 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
770 VIRTIO_GPU_RESP_OK_NODATA);
774 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq)
776 VirtIOGPU *g = VIRTIO_GPU(vdev);
777 qemu_bh_schedule(g->ctrl_bh);
780 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq)
782 VirtIOGPU *g = VIRTIO_GPU(vdev);
783 qemu_bh_schedule(g->cursor_bh);
786 void virtio_gpu_process_cmdq(VirtIOGPU *g)
788 struct virtio_gpu_ctrl_command *cmd;
790 while (!QTAILQ_EMPTY(&g->cmdq)) {
791 cmd = QTAILQ_FIRST(&g->cmdq);
793 /* process command */
794 VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd,
795 g, cmd);
796 if (cmd->waiting) {
797 break;
799 QTAILQ_REMOVE(&g->cmdq, cmd, next);
800 if (virtio_gpu_stats_enabled(g->conf)) {
801 g->stats.requests++;
804 if (!cmd->finished) {
805 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
806 g->inflight++;
807 if (virtio_gpu_stats_enabled(g->conf)) {
808 if (g->stats.max_inflight < g->inflight) {
809 g->stats.max_inflight = g->inflight;
811 fprintf(stderr, "inflight: %3d (+)\r", g->inflight);
813 } else {
814 g_free(cmd);
819 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
821 VirtIOGPU *g = VIRTIO_GPU(vdev);
822 struct virtio_gpu_ctrl_command *cmd;
824 if (!virtio_queue_ready(vq)) {
825 return;
828 #ifdef CONFIG_VIRGL
829 if (!g->renderer_inited && g->use_virgl_renderer) {
830 virtio_gpu_virgl_init(g);
831 g->renderer_inited = true;
833 #endif
835 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
836 while (cmd) {
837 cmd->vq = vq;
838 cmd->error = 0;
839 cmd->finished = false;
840 cmd->waiting = false;
841 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
842 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
845 virtio_gpu_process_cmdq(g);
847 #ifdef CONFIG_VIRGL
848 if (g->use_virgl_renderer) {
849 virtio_gpu_virgl_fence_poll(g);
851 #endif
854 static void virtio_gpu_ctrl_bh(void *opaque)
856 VirtIOGPU *g = opaque;
857 virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq);
860 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
862 VirtIOGPU *g = VIRTIO_GPU(vdev);
863 VirtQueueElement *elem;
864 size_t s;
865 struct virtio_gpu_update_cursor cursor_info;
867 if (!virtio_queue_ready(vq)) {
868 return;
870 for (;;) {
871 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
872 if (!elem) {
873 break;
876 s = iov_to_buf(elem->out_sg, elem->out_num, 0,
877 &cursor_info, sizeof(cursor_info));
878 if (s != sizeof(cursor_info)) {
879 qemu_log_mask(LOG_GUEST_ERROR,
880 "%s: cursor size incorrect %zu vs %zu\n",
881 __func__, s, sizeof(cursor_info));
882 } else {
883 update_cursor(g, &cursor_info);
885 virtqueue_push(vq, elem, 0);
886 virtio_notify(vdev, vq);
887 g_free(elem);
891 static void virtio_gpu_cursor_bh(void *opaque)
893 VirtIOGPU *g = opaque;
894 virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq);
897 static void virtio_gpu_invalidate_display(void *opaque)
901 static void virtio_gpu_update_display(void *opaque)
905 static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata)
909 static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info)
911 VirtIOGPU *g = opaque;
913 if (idx >= g->conf.max_outputs) {
914 return -1;
917 g->req_state[idx].x = info->xoff;
918 g->req_state[idx].y = info->yoff;
919 g->req_state[idx].width = info->width;
920 g->req_state[idx].height = info->height;
922 if (info->width && info->height) {
923 g->enabled_output_bitmask |= (1 << idx);
924 } else {
925 g->enabled_output_bitmask &= ~(1 << idx);
928 /* send event to guest */
929 virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY);
930 return 0;
933 static void virtio_gpu_gl_block(void *opaque, bool block)
935 VirtIOGPU *g = opaque;
937 if (block) {
938 g->renderer_blocked++;
939 } else {
940 g->renderer_blocked--;
942 assert(g->renderer_blocked >= 0);
944 if (g->renderer_blocked == 0) {
945 virtio_gpu_process_cmdq(g);
949 const GraphicHwOps virtio_gpu_ops = {
950 .invalidate = virtio_gpu_invalidate_display,
951 .gfx_update = virtio_gpu_update_display,
952 .text_update = virtio_gpu_text_update,
953 .ui_info = virtio_gpu_ui_info,
954 .gl_block = virtio_gpu_gl_block,
957 static const VMStateDescription vmstate_virtio_gpu_scanout = {
958 .name = "virtio-gpu-one-scanout",
959 .version_id = 1,
960 .fields = (VMStateField[]) {
961 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout),
962 VMSTATE_UINT32(width, struct virtio_gpu_scanout),
963 VMSTATE_UINT32(height, struct virtio_gpu_scanout),
964 VMSTATE_INT32(x, struct virtio_gpu_scanout),
965 VMSTATE_INT32(y, struct virtio_gpu_scanout),
966 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout),
967 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout),
968 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout),
969 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout),
970 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout),
971 VMSTATE_END_OF_LIST()
975 static const VMStateDescription vmstate_virtio_gpu_scanouts = {
976 .name = "virtio-gpu-scanouts",
977 .version_id = 1,
978 .fields = (VMStateField[]) {
979 VMSTATE_INT32(enable, struct VirtIOGPU),
980 VMSTATE_UINT32_EQUAL(conf.max_outputs, struct VirtIOGPU),
981 VMSTATE_STRUCT_VARRAY_UINT32(scanout, struct VirtIOGPU,
982 conf.max_outputs, 1,
983 vmstate_virtio_gpu_scanout,
984 struct virtio_gpu_scanout),
985 VMSTATE_END_OF_LIST()
989 static const VMStateDescription vmstate_virtio_gpu_unmigratable = {
990 .name = "virtio-gpu-with-virgl",
991 .unmigratable = 1,
994 static void virtio_gpu_save(QEMUFile *f, void *opaque)
996 VirtIOGPU *g = opaque;
997 VirtIODevice *vdev = VIRTIO_DEVICE(g);
998 struct virtio_gpu_simple_resource *res;
999 int i;
1001 virtio_save(vdev, f);
1003 /* in 2d mode we should never find unprocessed commands here */
1004 assert(QTAILQ_EMPTY(&g->cmdq));
1006 QTAILQ_FOREACH(res, &g->reslist, next) {
1007 qemu_put_be32(f, res->resource_id);
1008 qemu_put_be32(f, res->width);
1009 qemu_put_be32(f, res->height);
1010 qemu_put_be32(f, res->format);
1011 qemu_put_be32(f, res->iov_cnt);
1012 for (i = 0; i < res->iov_cnt; i++) {
1013 qemu_put_be64(f, res->addrs[i]);
1014 qemu_put_be32(f, res->iov[i].iov_len);
1016 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image),
1017 pixman_image_get_stride(res->image) * res->height);
1019 qemu_put_be32(f, 0); /* end of list */
1021 vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL);
1024 static int virtio_gpu_load(QEMUFile *f, void *opaque, int version_id)
1026 VirtIOGPU *g = opaque;
1027 VirtIODevice *vdev = VIRTIO_DEVICE(g);
1028 struct virtio_gpu_simple_resource *res;
1029 struct virtio_gpu_scanout *scanout;
1030 uint32_t resource_id, pformat;
1031 int i, ret;
1033 if (version_id != VIRTIO_GPU_VM_VERSION) {
1034 return -EINVAL;
1037 ret = virtio_load(vdev, f, version_id);
1038 if (ret) {
1039 return ret;
1042 resource_id = qemu_get_be32(f);
1043 while (resource_id != 0) {
1044 res = g_new0(struct virtio_gpu_simple_resource, 1);
1045 res->resource_id = resource_id;
1046 res->width = qemu_get_be32(f);
1047 res->height = qemu_get_be32(f);
1048 res->format = qemu_get_be32(f);
1049 res->iov_cnt = qemu_get_be32(f);
1051 /* allocate */
1052 pformat = get_pixman_format(res->format);
1053 if (!pformat) {
1054 return -EINVAL;
1056 res->image = pixman_image_create_bits(pformat,
1057 res->width, res->height,
1058 NULL, 0);
1059 if (!res->image) {
1060 return -EINVAL;
1063 res->addrs = g_new(uint64_t, res->iov_cnt);
1064 res->iov = g_new(struct iovec, res->iov_cnt);
1066 /* read data */
1067 for (i = 0; i < res->iov_cnt; i++) {
1068 res->addrs[i] = qemu_get_be64(f);
1069 res->iov[i].iov_len = qemu_get_be32(f);
1071 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image),
1072 pixman_image_get_stride(res->image) * res->height);
1074 /* restore mapping */
1075 for (i = 0; i < res->iov_cnt; i++) {
1076 hwaddr len = res->iov[i].iov_len;
1077 res->iov[i].iov_base =
1078 cpu_physical_memory_map(res->addrs[i], &len, 1);
1079 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
1080 return -EINVAL;
1084 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
1086 resource_id = qemu_get_be32(f);
1089 /* load & apply scanout state */
1090 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
1091 for (i = 0; i < g->conf.max_outputs; i++) {
1092 scanout = &g->scanout[i];
1093 if (!scanout->resource_id) {
1094 continue;
1096 res = virtio_gpu_find_resource(g, scanout->resource_id);
1097 if (!res) {
1098 return -EINVAL;
1100 scanout->ds = qemu_create_displaysurface_pixman(res->image);
1101 if (!scanout->ds) {
1102 return -EINVAL;
1105 dpy_gfx_replace_surface(scanout->con, scanout->ds);
1106 dpy_gfx_update(scanout->con, 0, 0, scanout->width, scanout->height);
1107 update_cursor(g, &scanout->cursor);
1108 res->scanout_bitmask |= (1 << i);
1111 return 0;
1114 static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
1116 VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
1117 VirtIOGPU *g = VIRTIO_GPU(qdev);
1118 bool have_virgl;
1119 int i;
1121 if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) {
1122 error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS);
1123 return;
1126 g->config_size = sizeof(struct virtio_gpu_config);
1127 g->virtio_config.num_scanouts = g->conf.max_outputs;
1128 virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU,
1129 g->config_size);
1131 g->req_state[0].width = 1024;
1132 g->req_state[0].height = 768;
1134 g->use_virgl_renderer = false;
1135 #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN)
1136 have_virgl = false;
1137 #else
1138 have_virgl = display_opengl;
1139 #endif
1140 if (!have_virgl) {
1141 g->conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED);
1144 if (virtio_gpu_virgl_enabled(g->conf)) {
1145 /* use larger control queue in 3d mode */
1146 g->ctrl_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb);
1147 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
1148 g->virtio_config.num_capsets = 1;
1149 } else {
1150 g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb);
1151 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
1154 g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g);
1155 g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g);
1156 QTAILQ_INIT(&g->reslist);
1157 QTAILQ_INIT(&g->cmdq);
1158 QTAILQ_INIT(&g->fenceq);
1160 g->enabled_output_bitmask = 1;
1161 g->qdev = qdev;
1163 for (i = 0; i < g->conf.max_outputs; i++) {
1164 g->scanout[i].con =
1165 graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g);
1166 if (i > 0) {
1167 dpy_gfx_replace_surface(g->scanout[i].con, NULL);
1171 if (virtio_gpu_virgl_enabled(g->conf)) {
1172 vmstate_register(qdev, -1, &vmstate_virtio_gpu_unmigratable, g);
1173 } else {
1174 register_savevm(qdev, "virtio-gpu", -1, VIRTIO_GPU_VM_VERSION,
1175 virtio_gpu_save, virtio_gpu_load, g);
1179 static void virtio_gpu_instance_init(Object *obj)
1183 static void virtio_gpu_reset(VirtIODevice *vdev)
1185 VirtIOGPU *g = VIRTIO_GPU(vdev);
1186 struct virtio_gpu_simple_resource *res, *tmp;
1187 int i;
1189 g->enable = 0;
1191 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
1192 virtio_gpu_resource_destroy(g, res);
1194 for (i = 0; i < g->conf.max_outputs; i++) {
1195 #if 0
1196 g->req_state[i].x = 0;
1197 g->req_state[i].y = 0;
1198 if (i == 0) {
1199 g->req_state[0].width = 1024;
1200 g->req_state[0].height = 768;
1201 } else {
1202 g->req_state[i].width = 0;
1203 g->req_state[i].height = 0;
1205 #endif
1206 g->scanout[i].resource_id = 0;
1207 g->scanout[i].width = 0;
1208 g->scanout[i].height = 0;
1209 g->scanout[i].x = 0;
1210 g->scanout[i].y = 0;
1211 g->scanout[i].ds = NULL;
1213 g->enabled_output_bitmask = 1;
1215 #ifdef CONFIG_VIRGL
1216 if (g->use_virgl_renderer) {
1217 virtio_gpu_virgl_reset(g);
1218 g->use_virgl_renderer = 0;
1220 #endif
1223 static Property virtio_gpu_properties[] = {
1224 DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1),
1225 #ifdef CONFIG_VIRGL
1226 DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags,
1227 VIRTIO_GPU_FLAG_VIRGL_ENABLED, true),
1228 DEFINE_PROP_BIT("stats", VirtIOGPU, conf.flags,
1229 VIRTIO_GPU_FLAG_STATS_ENABLED, false),
1230 #endif
1231 DEFINE_PROP_END_OF_LIST(),
1234 static void virtio_gpu_class_init(ObjectClass *klass, void *data)
1236 DeviceClass *dc = DEVICE_CLASS(klass);
1237 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1239 vdc->realize = virtio_gpu_device_realize;
1240 vdc->get_config = virtio_gpu_get_config;
1241 vdc->set_config = virtio_gpu_set_config;
1242 vdc->get_features = virtio_gpu_get_features;
1243 vdc->set_features = virtio_gpu_set_features;
1245 vdc->reset = virtio_gpu_reset;
1247 dc->props = virtio_gpu_properties;
1250 static const TypeInfo virtio_gpu_info = {
1251 .name = TYPE_VIRTIO_GPU,
1252 .parent = TYPE_VIRTIO_DEVICE,
1253 .instance_size = sizeof(VirtIOGPU),
1254 .instance_init = virtio_gpu_instance_init,
1255 .class_init = virtio_gpu_class_init,
1258 static void virtio_register_types(void)
1260 type_register_static(&virtio_gpu_info);
1263 type_init(virtio_register_types)
1265 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24);
1266 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56);
1267 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32);
1268 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40);
1269 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48);
1270 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48);
1271 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56);
1272 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16);
1273 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32);
1274 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32);
1275 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408);
1277 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72);
1278 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72);
1279 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96);
1280 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24);
1281 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32);
1282 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32);
1283 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32);
1284 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40);
1285 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32);
1286 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24);