4 * Copyright Red Hat, Inc. 2013-2014
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu-common.h"
17 #include "ui/console.h"
19 #include "hw/virtio/virtio.h"
20 #include "hw/virtio/virtio-gpu.h"
21 #include "hw/virtio/virtio-bus.h"
22 #include "migration/migration.h"
24 #include "qapi/error.h"
26 #define VIRTIO_GPU_VM_VERSION 1
28 static struct virtio_gpu_simple_resource
*
29 virtio_gpu_find_resource(VirtIOGPU
*g
, uint32_t resource_id
);
31 static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource
*res
);
34 #include <virglrenderer.h>
35 #define VIRGL(_g, _virgl, _simple, ...) \
37 if (_g->use_virgl_renderer) { \
38 _virgl(__VA_ARGS__); \
40 _simple(__VA_ARGS__); \
44 #define VIRGL(_g, _virgl, _simple, ...) \
46 _simple(__VA_ARGS__); \
50 static void update_cursor_data_simple(VirtIOGPU
*g
,
51 struct virtio_gpu_scanout
*s
,
54 struct virtio_gpu_simple_resource
*res
;
57 res
= virtio_gpu_find_resource(g
, resource_id
);
62 if (pixman_image_get_width(res
->image
) != s
->current_cursor
->width
||
63 pixman_image_get_height(res
->image
) != s
->current_cursor
->height
) {
67 pixels
= s
->current_cursor
->width
* s
->current_cursor
->height
;
68 memcpy(s
->current_cursor
->data
,
69 pixman_image_get_data(res
->image
),
70 pixels
* sizeof(uint32_t));
75 static void update_cursor_data_virgl(VirtIOGPU
*g
,
76 struct virtio_gpu_scanout
*s
,
79 uint32_t width
, height
;
80 uint32_t pixels
, *data
;
82 data
= virgl_renderer_get_cursor_data(resource_id
, &width
, &height
);
87 if (width
!= s
->current_cursor
->width
||
88 height
!= s
->current_cursor
->height
) {
93 pixels
= s
->current_cursor
->width
* s
->current_cursor
->height
;
94 memcpy(s
->current_cursor
->data
, data
, pixels
* sizeof(uint32_t));
100 static void update_cursor(VirtIOGPU
*g
, struct virtio_gpu_update_cursor
*cursor
)
102 struct virtio_gpu_scanout
*s
;
103 bool move
= cursor
->hdr
.type
== VIRTIO_GPU_CMD_MOVE_CURSOR
;
105 if (cursor
->pos
.scanout_id
>= g
->conf
.max_outputs
) {
108 s
= &g
->scanout
[cursor
->pos
.scanout_id
];
110 trace_virtio_gpu_update_cursor(cursor
->pos
.scanout_id
,
113 move
? "move" : "update",
114 cursor
->resource_id
);
117 if (!s
->current_cursor
) {
118 s
->current_cursor
= cursor_alloc(64, 64);
121 s
->current_cursor
->hot_x
= cursor
->hot_x
;
122 s
->current_cursor
->hot_y
= cursor
->hot_y
;
124 if (cursor
->resource_id
> 0) {
125 VIRGL(g
, update_cursor_data_virgl
, update_cursor_data_simple
,
126 g
, s
, cursor
->resource_id
);
128 dpy_cursor_define(s
->con
, s
->current_cursor
);
132 s
->cursor
.pos
.x
= cursor
->pos
.x
;
133 s
->cursor
.pos
.y
= cursor
->pos
.y
;
135 dpy_mouse_set(s
->con
, cursor
->pos
.x
, cursor
->pos
.y
,
136 cursor
->resource_id
? 1 : 0);
139 static void virtio_gpu_get_config(VirtIODevice
*vdev
, uint8_t *config
)
141 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
142 memcpy(config
, &g
->virtio_config
, sizeof(g
->virtio_config
));
145 static void virtio_gpu_set_config(VirtIODevice
*vdev
, const uint8_t *config
)
147 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
148 struct virtio_gpu_config vgconfig
;
150 memcpy(&vgconfig
, config
, sizeof(g
->virtio_config
));
152 if (vgconfig
.events_clear
) {
153 g
->virtio_config
.events_read
&= ~vgconfig
.events_clear
;
157 static uint64_t virtio_gpu_get_features(VirtIODevice
*vdev
, uint64_t features
,
160 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
162 if (virtio_gpu_virgl_enabled(g
->conf
)) {
163 features
|= (1 << VIRTIO_GPU_F_VIRGL
);
168 static void virtio_gpu_set_features(VirtIODevice
*vdev
, uint64_t features
)
170 static const uint32_t virgl
= (1 << VIRTIO_GPU_F_VIRGL
);
171 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
173 g
->use_virgl_renderer
= ((features
& virgl
) == virgl
);
174 trace_virtio_gpu_features(g
->use_virgl_renderer
);
177 static void virtio_gpu_notify_event(VirtIOGPU
*g
, uint32_t event_type
)
179 g
->virtio_config
.events_read
|= event_type
;
180 virtio_notify_config(&g
->parent_obj
);
183 static struct virtio_gpu_simple_resource
*
184 virtio_gpu_find_resource(VirtIOGPU
*g
, uint32_t resource_id
)
186 struct virtio_gpu_simple_resource
*res
;
188 QTAILQ_FOREACH(res
, &g
->reslist
, next
) {
189 if (res
->resource_id
== resource_id
) {
196 void virtio_gpu_ctrl_response(VirtIOGPU
*g
,
197 struct virtio_gpu_ctrl_command
*cmd
,
198 struct virtio_gpu_ctrl_hdr
*resp
,
203 if (cmd
->cmd_hdr
.flags
& VIRTIO_GPU_FLAG_FENCE
) {
204 resp
->flags
|= VIRTIO_GPU_FLAG_FENCE
;
205 resp
->fence_id
= cmd
->cmd_hdr
.fence_id
;
206 resp
->ctx_id
= cmd
->cmd_hdr
.ctx_id
;
208 s
= iov_from_buf(cmd
->elem
.in_sg
, cmd
->elem
.in_num
, 0, resp
, resp_len
);
210 qemu_log_mask(LOG_GUEST_ERROR
,
211 "%s: response size incorrect %zu vs %zu\n",
212 __func__
, s
, resp_len
);
214 virtqueue_push(cmd
->vq
, &cmd
->elem
, s
);
215 virtio_notify(VIRTIO_DEVICE(g
), cmd
->vq
);
216 cmd
->finished
= true;
219 void virtio_gpu_ctrl_response_nodata(VirtIOGPU
*g
,
220 struct virtio_gpu_ctrl_command
*cmd
,
221 enum virtio_gpu_ctrl_type type
)
223 struct virtio_gpu_ctrl_hdr resp
;
225 memset(&resp
, 0, sizeof(resp
));
227 virtio_gpu_ctrl_response(g
, cmd
, &resp
, sizeof(resp
));
231 virtio_gpu_fill_display_info(VirtIOGPU
*g
,
232 struct virtio_gpu_resp_display_info
*dpy_info
)
236 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
237 if (g
->enabled_output_bitmask
& (1 << i
)) {
238 dpy_info
->pmodes
[i
].enabled
= 1;
239 dpy_info
->pmodes
[i
].r
.width
= g
->req_state
[i
].width
;
240 dpy_info
->pmodes
[i
].r
.height
= g
->req_state
[i
].height
;
245 void virtio_gpu_get_display_info(VirtIOGPU
*g
,
246 struct virtio_gpu_ctrl_command
*cmd
)
248 struct virtio_gpu_resp_display_info display_info
;
250 trace_virtio_gpu_cmd_get_display_info();
251 memset(&display_info
, 0, sizeof(display_info
));
252 display_info
.hdr
.type
= VIRTIO_GPU_RESP_OK_DISPLAY_INFO
;
253 virtio_gpu_fill_display_info(g
, &display_info
);
254 virtio_gpu_ctrl_response(g
, cmd
, &display_info
.hdr
,
255 sizeof(display_info
));
258 static pixman_format_code_t
get_pixman_format(uint32_t virtio_gpu_format
)
260 switch (virtio_gpu_format
) {
261 #ifdef HOST_WORDS_BIGENDIAN
262 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM
:
263 return PIXMAN_b8g8r8x8
;
264 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM
:
265 return PIXMAN_b8g8r8a8
;
266 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM
:
267 return PIXMAN_x8r8g8b8
;
268 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM
:
269 return PIXMAN_a8r8g8b8
;
270 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM
:
271 return PIXMAN_r8g8b8x8
;
272 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM
:
273 return PIXMAN_r8g8b8a8
;
274 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM
:
275 return PIXMAN_x8b8g8r8
;
276 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM
:
277 return PIXMAN_a8b8g8r8
;
279 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM
:
280 return PIXMAN_x8r8g8b8
;
281 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM
:
282 return PIXMAN_a8r8g8b8
;
283 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM
:
284 return PIXMAN_b8g8r8x8
;
285 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM
:
286 return PIXMAN_b8g8r8a8
;
287 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM
:
288 return PIXMAN_x8b8g8r8
;
289 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM
:
290 return PIXMAN_a8b8g8r8
;
291 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM
:
292 return PIXMAN_r8g8b8x8
;
293 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM
:
294 return PIXMAN_r8g8b8a8
;
301 static void virtio_gpu_resource_create_2d(VirtIOGPU
*g
,
302 struct virtio_gpu_ctrl_command
*cmd
)
304 pixman_format_code_t pformat
;
305 struct virtio_gpu_simple_resource
*res
;
306 struct virtio_gpu_resource_create_2d c2d
;
308 VIRTIO_GPU_FILL_CMD(c2d
);
309 trace_virtio_gpu_cmd_res_create_2d(c2d
.resource_id
, c2d
.format
,
310 c2d
.width
, c2d
.height
);
312 if (c2d
.resource_id
== 0) {
313 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource id 0 is not allowed\n",
315 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
319 res
= virtio_gpu_find_resource(g
, c2d
.resource_id
);
321 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource already exists %d\n",
322 __func__
, c2d
.resource_id
);
323 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
327 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
329 res
->width
= c2d
.width
;
330 res
->height
= c2d
.height
;
331 res
->format
= c2d
.format
;
332 res
->resource_id
= c2d
.resource_id
;
334 pformat
= get_pixman_format(c2d
.format
);
336 qemu_log_mask(LOG_GUEST_ERROR
,
337 "%s: host couldn't handle guest format %d\n",
338 __func__
, c2d
.format
);
340 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
344 res
->hostmem
= PIXMAN_FORMAT_BPP(pformat
) * c2d
.width
* c2d
.height
;
345 if (res
->hostmem
+ g
->hostmem
< g
->conf
.max_hostmem
) {
346 res
->image
= pixman_image_create_bits(pformat
,
353 qemu_log_mask(LOG_GUEST_ERROR
,
354 "%s: resource creation failed %d %d %d\n",
355 __func__
, c2d
.resource_id
, c2d
.width
, c2d
.height
);
357 cmd
->error
= VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY
;
361 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
362 g
->hostmem
+= res
->hostmem
;
365 static void virtio_gpu_resource_destroy(VirtIOGPU
*g
,
366 struct virtio_gpu_simple_resource
*res
)
368 pixman_image_unref(res
->image
);
369 virtio_gpu_cleanup_mapping(res
);
370 QTAILQ_REMOVE(&g
->reslist
, res
, next
);
371 g
->hostmem
-= res
->hostmem
;
375 static void virtio_gpu_resource_unref(VirtIOGPU
*g
,
376 struct virtio_gpu_ctrl_command
*cmd
)
378 struct virtio_gpu_simple_resource
*res
;
379 struct virtio_gpu_resource_unref unref
;
381 VIRTIO_GPU_FILL_CMD(unref
);
382 trace_virtio_gpu_cmd_res_unref(unref
.resource_id
);
384 res
= virtio_gpu_find_resource(g
, unref
.resource_id
);
386 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
387 __func__
, unref
.resource_id
);
388 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
391 virtio_gpu_resource_destroy(g
, res
);
394 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU
*g
,
395 struct virtio_gpu_ctrl_command
*cmd
)
397 struct virtio_gpu_simple_resource
*res
;
399 uint32_t src_offset
, dst_offset
, stride
;
401 pixman_format_code_t format
;
402 struct virtio_gpu_transfer_to_host_2d t2d
;
404 VIRTIO_GPU_FILL_CMD(t2d
);
405 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d
.resource_id
);
407 res
= virtio_gpu_find_resource(g
, t2d
.resource_id
);
408 if (!res
|| !res
->iov
) {
409 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
410 __func__
, t2d
.resource_id
);
411 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
415 if (t2d
.r
.x
> res
->width
||
416 t2d
.r
.y
> res
->height
||
417 t2d
.r
.width
> res
->width
||
418 t2d
.r
.height
> res
->height
||
419 t2d
.r
.x
+ t2d
.r
.width
> res
->width
||
420 t2d
.r
.y
+ t2d
.r
.height
> res
->height
) {
421 qemu_log_mask(LOG_GUEST_ERROR
, "%s: transfer bounds outside resource"
422 " bounds for resource %d: %d %d %d %d vs %d %d\n",
423 __func__
, t2d
.resource_id
, t2d
.r
.x
, t2d
.r
.y
,
424 t2d
.r
.width
, t2d
.r
.height
, res
->width
, res
->height
);
425 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
429 format
= pixman_image_get_format(res
->image
);
430 bpp
= (PIXMAN_FORMAT_BPP(format
) + 7) / 8;
431 stride
= pixman_image_get_stride(res
->image
);
433 if (t2d
.offset
|| t2d
.r
.x
|| t2d
.r
.y
||
434 t2d
.r
.width
!= pixman_image_get_width(res
->image
)) {
435 void *img_data
= pixman_image_get_data(res
->image
);
436 for (h
= 0; h
< t2d
.r
.height
; h
++) {
437 src_offset
= t2d
.offset
+ stride
* h
;
438 dst_offset
= (t2d
.r
.y
+ h
) * stride
+ (t2d
.r
.x
* bpp
);
440 iov_to_buf(res
->iov
, res
->iov_cnt
, src_offset
,
442 + dst_offset
, t2d
.r
.width
* bpp
);
445 iov_to_buf(res
->iov
, res
->iov_cnt
, 0,
446 pixman_image_get_data(res
->image
),
447 pixman_image_get_stride(res
->image
)
448 * pixman_image_get_height(res
->image
));
452 static void virtio_gpu_resource_flush(VirtIOGPU
*g
,
453 struct virtio_gpu_ctrl_command
*cmd
)
455 struct virtio_gpu_simple_resource
*res
;
456 struct virtio_gpu_resource_flush rf
;
457 pixman_region16_t flush_region
;
460 VIRTIO_GPU_FILL_CMD(rf
);
461 trace_virtio_gpu_cmd_res_flush(rf
.resource_id
,
462 rf
.r
.width
, rf
.r
.height
, rf
.r
.x
, rf
.r
.y
);
464 res
= virtio_gpu_find_resource(g
, rf
.resource_id
);
466 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
467 __func__
, rf
.resource_id
);
468 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
472 if (rf
.r
.x
> res
->width
||
473 rf
.r
.y
> res
->height
||
474 rf
.r
.width
> res
->width
||
475 rf
.r
.height
> res
->height
||
476 rf
.r
.x
+ rf
.r
.width
> res
->width
||
477 rf
.r
.y
+ rf
.r
.height
> res
->height
) {
478 qemu_log_mask(LOG_GUEST_ERROR
, "%s: flush bounds outside resource"
479 " bounds for resource %d: %d %d %d %d vs %d %d\n",
480 __func__
, rf
.resource_id
, rf
.r
.x
, rf
.r
.y
,
481 rf
.r
.width
, rf
.r
.height
, res
->width
, res
->height
);
482 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
486 pixman_region_init_rect(&flush_region
,
487 rf
.r
.x
, rf
.r
.y
, rf
.r
.width
, rf
.r
.height
);
488 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
489 struct virtio_gpu_scanout
*scanout
;
490 pixman_region16_t region
, finalregion
;
491 pixman_box16_t
*extents
;
493 if (!(res
->scanout_bitmask
& (1 << i
))) {
496 scanout
= &g
->scanout
[i
];
498 pixman_region_init(&finalregion
);
499 pixman_region_init_rect(®ion
, scanout
->x
, scanout
->y
,
500 scanout
->width
, scanout
->height
);
502 pixman_region_intersect(&finalregion
, &flush_region
, ®ion
);
503 pixman_region_translate(&finalregion
, -scanout
->x
, -scanout
->y
);
504 extents
= pixman_region_extents(&finalregion
);
505 /* work out the area we need to update for each console */
506 dpy_gfx_update(g
->scanout
[i
].con
,
507 extents
->x1
, extents
->y1
,
508 extents
->x2
- extents
->x1
,
509 extents
->y2
- extents
->y1
);
511 pixman_region_fini(®ion
);
512 pixman_region_fini(&finalregion
);
514 pixman_region_fini(&flush_region
);
517 static void virtio_unref_resource(pixman_image_t
*image
, void *data
)
519 pixman_image_unref(data
);
522 static void virtio_gpu_set_scanout(VirtIOGPU
*g
,
523 struct virtio_gpu_ctrl_command
*cmd
)
525 struct virtio_gpu_simple_resource
*res
;
526 struct virtio_gpu_scanout
*scanout
;
527 pixman_format_code_t format
;
530 struct virtio_gpu_set_scanout ss
;
532 VIRTIO_GPU_FILL_CMD(ss
);
533 trace_virtio_gpu_cmd_set_scanout(ss
.scanout_id
, ss
.resource_id
,
534 ss
.r
.width
, ss
.r
.height
, ss
.r
.x
, ss
.r
.y
);
536 if (ss
.scanout_id
>= g
->conf
.max_outputs
) {
537 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout id specified %d",
538 __func__
, ss
.scanout_id
);
539 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
544 if (ss
.resource_id
== 0) {
545 scanout
= &g
->scanout
[ss
.scanout_id
];
546 if (scanout
->resource_id
) {
547 res
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
549 res
->scanout_bitmask
&= ~(1 << ss
.scanout_id
);
552 if (ss
.scanout_id
== 0) {
553 qemu_log_mask(LOG_GUEST_ERROR
,
554 "%s: illegal scanout id specified %d",
555 __func__
, ss
.scanout_id
);
556 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
559 dpy_gfx_replace_surface(g
->scanout
[ss
.scanout_id
].con
, NULL
);
566 /* create a surface for this scanout */
567 res
= virtio_gpu_find_resource(g
, ss
.resource_id
);
569 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
570 __func__
, ss
.resource_id
);
571 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
575 if (ss
.r
.x
> res
->width
||
576 ss
.r
.y
> res
->height
||
577 ss
.r
.width
> res
->width
||
578 ss
.r
.height
> res
->height
||
579 ss
.r
.x
+ ss
.r
.width
> res
->width
||
580 ss
.r
.y
+ ss
.r
.height
> res
->height
) {
581 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout %d bounds for"
582 " resource %d, (%d,%d)+%d,%d vs %d %d\n",
583 __func__
, ss
.scanout_id
, ss
.resource_id
, ss
.r
.x
, ss
.r
.y
,
584 ss
.r
.width
, ss
.r
.height
, res
->width
, res
->height
);
585 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
589 scanout
= &g
->scanout
[ss
.scanout_id
];
591 format
= pixman_image_get_format(res
->image
);
592 bpp
= (PIXMAN_FORMAT_BPP(format
) + 7) / 8;
593 offset
= (ss
.r
.x
* bpp
) + ss
.r
.y
* pixman_image_get_stride(res
->image
);
594 if (!scanout
->ds
|| surface_data(scanout
->ds
)
595 != ((uint8_t *)pixman_image_get_data(res
->image
) + offset
) ||
596 scanout
->width
!= ss
.r
.width
||
597 scanout
->height
!= ss
.r
.height
) {
598 pixman_image_t
*rect
;
599 void *ptr
= (uint8_t *)pixman_image_get_data(res
->image
) + offset
;
600 rect
= pixman_image_create_bits(format
, ss
.r
.width
, ss
.r
.height
, ptr
,
601 pixman_image_get_stride(res
->image
));
602 pixman_image_ref(res
->image
);
603 pixman_image_set_destroy_function(rect
, virtio_unref_resource
,
605 /* realloc the surface ptr */
606 scanout
->ds
= qemu_create_displaysurface_pixman(rect
);
608 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
611 dpy_gfx_replace_surface(g
->scanout
[ss
.scanout_id
].con
, scanout
->ds
);
614 res
->scanout_bitmask
|= (1 << ss
.scanout_id
);
615 scanout
->resource_id
= ss
.resource_id
;
618 scanout
->width
= ss
.r
.width
;
619 scanout
->height
= ss
.r
.height
;
622 int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing
*ab
,
623 struct virtio_gpu_ctrl_command
*cmd
,
624 uint64_t **addr
, struct iovec
**iov
)
626 struct virtio_gpu_mem_entry
*ents
;
630 if (ab
->nr_entries
> 16384) {
631 qemu_log_mask(LOG_GUEST_ERROR
,
632 "%s: nr_entries is too big (%d > 16384)\n",
633 __func__
, ab
->nr_entries
);
637 esize
= sizeof(*ents
) * ab
->nr_entries
;
638 ents
= g_malloc(esize
);
639 s
= iov_to_buf(cmd
->elem
.out_sg
, cmd
->elem
.out_num
,
640 sizeof(*ab
), ents
, esize
);
642 qemu_log_mask(LOG_GUEST_ERROR
,
643 "%s: command data size incorrect %zu vs %zu\n",
649 *iov
= g_malloc0(sizeof(struct iovec
) * ab
->nr_entries
);
651 *addr
= g_malloc0(sizeof(uint64_t) * ab
->nr_entries
);
653 for (i
= 0; i
< ab
->nr_entries
; i
++) {
654 hwaddr len
= ents
[i
].length
;
655 (*iov
)[i
].iov_len
= ents
[i
].length
;
656 (*iov
)[i
].iov_base
= cpu_physical_memory_map(ents
[i
].addr
, &len
, 1);
658 (*addr
)[i
] = ents
[i
].addr
;
660 if (!(*iov
)[i
].iov_base
|| len
!= ents
[i
].length
) {
661 qemu_log_mask(LOG_GUEST_ERROR
, "%s: failed to map MMIO memory for"
662 " resource %d element %d\n",
663 __func__
, ab
->resource_id
, i
);
664 virtio_gpu_cleanup_mapping_iov(*iov
, i
);
678 void virtio_gpu_cleanup_mapping_iov(struct iovec
*iov
, uint32_t count
)
682 for (i
= 0; i
< count
; i
++) {
683 cpu_physical_memory_unmap(iov
[i
].iov_base
, iov
[i
].iov_len
, 1,
689 static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource
*res
)
691 virtio_gpu_cleanup_mapping_iov(res
->iov
, res
->iov_cnt
);
699 virtio_gpu_resource_attach_backing(VirtIOGPU
*g
,
700 struct virtio_gpu_ctrl_command
*cmd
)
702 struct virtio_gpu_simple_resource
*res
;
703 struct virtio_gpu_resource_attach_backing ab
;
706 VIRTIO_GPU_FILL_CMD(ab
);
707 trace_virtio_gpu_cmd_res_back_attach(ab
.resource_id
);
709 res
= virtio_gpu_find_resource(g
, ab
.resource_id
);
711 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
712 __func__
, ab
.resource_id
);
713 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
718 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
722 ret
= virtio_gpu_create_mapping_iov(&ab
, cmd
, &res
->addrs
, &res
->iov
);
724 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
728 res
->iov_cnt
= ab
.nr_entries
;
732 virtio_gpu_resource_detach_backing(VirtIOGPU
*g
,
733 struct virtio_gpu_ctrl_command
*cmd
)
735 struct virtio_gpu_simple_resource
*res
;
736 struct virtio_gpu_resource_detach_backing detach
;
738 VIRTIO_GPU_FILL_CMD(detach
);
739 trace_virtio_gpu_cmd_res_back_detach(detach
.resource_id
);
741 res
= virtio_gpu_find_resource(g
, detach
.resource_id
);
742 if (!res
|| !res
->iov
) {
743 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
744 __func__
, detach
.resource_id
);
745 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
748 virtio_gpu_cleanup_mapping(res
);
751 static void virtio_gpu_simple_process_cmd(VirtIOGPU
*g
,
752 struct virtio_gpu_ctrl_command
*cmd
)
754 VIRTIO_GPU_FILL_CMD(cmd
->cmd_hdr
);
756 switch (cmd
->cmd_hdr
.type
) {
757 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO
:
758 virtio_gpu_get_display_info(g
, cmd
);
760 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
:
761 virtio_gpu_resource_create_2d(g
, cmd
);
763 case VIRTIO_GPU_CMD_RESOURCE_UNREF
:
764 virtio_gpu_resource_unref(g
, cmd
);
766 case VIRTIO_GPU_CMD_RESOURCE_FLUSH
:
767 virtio_gpu_resource_flush(g
, cmd
);
769 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
:
770 virtio_gpu_transfer_to_host_2d(g
, cmd
);
772 case VIRTIO_GPU_CMD_SET_SCANOUT
:
773 virtio_gpu_set_scanout(g
, cmd
);
775 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
:
776 virtio_gpu_resource_attach_backing(g
, cmd
);
778 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
:
779 virtio_gpu_resource_detach_backing(g
, cmd
);
782 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
785 if (!cmd
->finished
) {
786 virtio_gpu_ctrl_response_nodata(g
, cmd
, cmd
->error
? cmd
->error
:
787 VIRTIO_GPU_RESP_OK_NODATA
);
791 static void virtio_gpu_handle_ctrl_cb(VirtIODevice
*vdev
, VirtQueue
*vq
)
793 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
794 qemu_bh_schedule(g
->ctrl_bh
);
797 static void virtio_gpu_handle_cursor_cb(VirtIODevice
*vdev
, VirtQueue
*vq
)
799 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
800 qemu_bh_schedule(g
->cursor_bh
);
803 void virtio_gpu_process_cmdq(VirtIOGPU
*g
)
805 struct virtio_gpu_ctrl_command
*cmd
;
807 while (!QTAILQ_EMPTY(&g
->cmdq
)) {
808 cmd
= QTAILQ_FIRST(&g
->cmdq
);
810 /* process command */
811 VIRGL(g
, virtio_gpu_virgl_process_cmd
, virtio_gpu_simple_process_cmd
,
816 QTAILQ_REMOVE(&g
->cmdq
, cmd
, next
);
817 if (virtio_gpu_stats_enabled(g
->conf
)) {
821 if (!cmd
->finished
) {
822 QTAILQ_INSERT_TAIL(&g
->fenceq
, cmd
, next
);
824 if (virtio_gpu_stats_enabled(g
->conf
)) {
825 if (g
->stats
.max_inflight
< g
->inflight
) {
826 g
->stats
.max_inflight
= g
->inflight
;
828 fprintf(stderr
, "inflight: %3d (+)\r", g
->inflight
);
836 static void virtio_gpu_handle_ctrl(VirtIODevice
*vdev
, VirtQueue
*vq
)
838 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
839 struct virtio_gpu_ctrl_command
*cmd
;
841 if (!virtio_queue_ready(vq
)) {
846 if (!g
->renderer_inited
&& g
->use_virgl_renderer
) {
847 virtio_gpu_virgl_init(g
);
848 g
->renderer_inited
= true;
852 cmd
= virtqueue_pop(vq
, sizeof(struct virtio_gpu_ctrl_command
));
856 cmd
->finished
= false;
857 cmd
->waiting
= false;
858 QTAILQ_INSERT_TAIL(&g
->cmdq
, cmd
, next
);
859 cmd
= virtqueue_pop(vq
, sizeof(struct virtio_gpu_ctrl_command
));
862 virtio_gpu_process_cmdq(g
);
865 if (g
->use_virgl_renderer
) {
866 virtio_gpu_virgl_fence_poll(g
);
871 static void virtio_gpu_ctrl_bh(void *opaque
)
873 VirtIOGPU
*g
= opaque
;
874 virtio_gpu_handle_ctrl(&g
->parent_obj
, g
->ctrl_vq
);
877 static void virtio_gpu_handle_cursor(VirtIODevice
*vdev
, VirtQueue
*vq
)
879 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
880 VirtQueueElement
*elem
;
882 struct virtio_gpu_update_cursor cursor_info
;
884 if (!virtio_queue_ready(vq
)) {
888 elem
= virtqueue_pop(vq
, sizeof(VirtQueueElement
));
893 s
= iov_to_buf(elem
->out_sg
, elem
->out_num
, 0,
894 &cursor_info
, sizeof(cursor_info
));
895 if (s
!= sizeof(cursor_info
)) {
896 qemu_log_mask(LOG_GUEST_ERROR
,
897 "%s: cursor size incorrect %zu vs %zu\n",
898 __func__
, s
, sizeof(cursor_info
));
900 update_cursor(g
, &cursor_info
);
902 virtqueue_push(vq
, elem
, 0);
903 virtio_notify(vdev
, vq
);
908 static void virtio_gpu_cursor_bh(void *opaque
)
910 VirtIOGPU
*g
= opaque
;
911 virtio_gpu_handle_cursor(&g
->parent_obj
, g
->cursor_vq
);
914 static void virtio_gpu_invalidate_display(void *opaque
)
918 static void virtio_gpu_update_display(void *opaque
)
922 static void virtio_gpu_text_update(void *opaque
, console_ch_t
*chardata
)
926 static int virtio_gpu_ui_info(void *opaque
, uint32_t idx
, QemuUIInfo
*info
)
928 VirtIOGPU
*g
= opaque
;
930 if (idx
>= g
->conf
.max_outputs
) {
934 g
->req_state
[idx
].x
= info
->xoff
;
935 g
->req_state
[idx
].y
= info
->yoff
;
936 g
->req_state
[idx
].width
= info
->width
;
937 g
->req_state
[idx
].height
= info
->height
;
939 if (info
->width
&& info
->height
) {
940 g
->enabled_output_bitmask
|= (1 << idx
);
942 g
->enabled_output_bitmask
&= ~(1 << idx
);
945 /* send event to guest */
946 virtio_gpu_notify_event(g
, VIRTIO_GPU_EVENT_DISPLAY
);
950 static void virtio_gpu_gl_block(void *opaque
, bool block
)
952 VirtIOGPU
*g
= opaque
;
955 g
->renderer_blocked
++;
957 g
->renderer_blocked
--;
959 assert(g
->renderer_blocked
>= 0);
961 if (g
->renderer_blocked
== 0) {
962 virtio_gpu_process_cmdq(g
);
966 const GraphicHwOps virtio_gpu_ops
= {
967 .invalidate
= virtio_gpu_invalidate_display
,
968 .gfx_update
= virtio_gpu_update_display
,
969 .text_update
= virtio_gpu_text_update
,
970 .ui_info
= virtio_gpu_ui_info
,
971 .gl_block
= virtio_gpu_gl_block
,
974 static const VMStateDescription vmstate_virtio_gpu_scanout
= {
975 .name
= "virtio-gpu-one-scanout",
977 .fields
= (VMStateField
[]) {
978 VMSTATE_UINT32(resource_id
, struct virtio_gpu_scanout
),
979 VMSTATE_UINT32(width
, struct virtio_gpu_scanout
),
980 VMSTATE_UINT32(height
, struct virtio_gpu_scanout
),
981 VMSTATE_INT32(x
, struct virtio_gpu_scanout
),
982 VMSTATE_INT32(y
, struct virtio_gpu_scanout
),
983 VMSTATE_UINT32(cursor
.resource_id
, struct virtio_gpu_scanout
),
984 VMSTATE_UINT32(cursor
.hot_x
, struct virtio_gpu_scanout
),
985 VMSTATE_UINT32(cursor
.hot_y
, struct virtio_gpu_scanout
),
986 VMSTATE_UINT32(cursor
.pos
.x
, struct virtio_gpu_scanout
),
987 VMSTATE_UINT32(cursor
.pos
.y
, struct virtio_gpu_scanout
),
988 VMSTATE_END_OF_LIST()
992 static const VMStateDescription vmstate_virtio_gpu_scanouts
= {
993 .name
= "virtio-gpu-scanouts",
995 .fields
= (VMStateField
[]) {
996 VMSTATE_INT32(enable
, struct VirtIOGPU
),
997 VMSTATE_UINT32_EQUAL(conf
.max_outputs
, struct VirtIOGPU
),
998 VMSTATE_STRUCT_VARRAY_UINT32(scanout
, struct VirtIOGPU
,
1000 vmstate_virtio_gpu_scanout
,
1001 struct virtio_gpu_scanout
),
1002 VMSTATE_END_OF_LIST()
1006 static int virtio_gpu_save(QEMUFile
*f
, void *opaque
, size_t size
,
1007 VMStateField
*field
, QJSON
*vmdesc
)
1009 VirtIOGPU
*g
= opaque
;
1010 struct virtio_gpu_simple_resource
*res
;
1013 /* in 2d mode we should never find unprocessed commands here */
1014 assert(QTAILQ_EMPTY(&g
->cmdq
));
1016 QTAILQ_FOREACH(res
, &g
->reslist
, next
) {
1017 qemu_put_be32(f
, res
->resource_id
);
1018 qemu_put_be32(f
, res
->width
);
1019 qemu_put_be32(f
, res
->height
);
1020 qemu_put_be32(f
, res
->format
);
1021 qemu_put_be32(f
, res
->iov_cnt
);
1022 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1023 qemu_put_be64(f
, res
->addrs
[i
]);
1024 qemu_put_be32(f
, res
->iov
[i
].iov_len
);
1026 qemu_put_buffer(f
, (void *)pixman_image_get_data(res
->image
),
1027 pixman_image_get_stride(res
->image
) * res
->height
);
1029 qemu_put_be32(f
, 0); /* end of list */
1031 vmstate_save_state(f
, &vmstate_virtio_gpu_scanouts
, g
, NULL
);
1036 static int virtio_gpu_load(QEMUFile
*f
, void *opaque
, size_t size
,
1037 VMStateField
*field
)
1039 VirtIOGPU
*g
= opaque
;
1040 struct virtio_gpu_simple_resource
*res
;
1041 struct virtio_gpu_scanout
*scanout
;
1042 uint32_t resource_id
, pformat
;
1047 resource_id
= qemu_get_be32(f
);
1048 while (resource_id
!= 0) {
1049 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
1050 res
->resource_id
= resource_id
;
1051 res
->width
= qemu_get_be32(f
);
1052 res
->height
= qemu_get_be32(f
);
1053 res
->format
= qemu_get_be32(f
);
1054 res
->iov_cnt
= qemu_get_be32(f
);
1057 pformat
= get_pixman_format(res
->format
);
1062 res
->image
= pixman_image_create_bits(pformat
,
1063 res
->width
, res
->height
,
1070 res
->hostmem
= PIXMAN_FORMAT_BPP(pformat
) * res
->width
* res
->height
;
1072 res
->addrs
= g_new(uint64_t, res
->iov_cnt
);
1073 res
->iov
= g_new(struct iovec
, res
->iov_cnt
);
1076 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1077 res
->addrs
[i
] = qemu_get_be64(f
);
1078 res
->iov
[i
].iov_len
= qemu_get_be32(f
);
1080 qemu_get_buffer(f
, (void *)pixman_image_get_data(res
->image
),
1081 pixman_image_get_stride(res
->image
) * res
->height
);
1083 /* restore mapping */
1084 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1085 hwaddr len
= res
->iov
[i
].iov_len
;
1086 res
->iov
[i
].iov_base
=
1087 cpu_physical_memory_map(res
->addrs
[i
], &len
, 1);
1088 if (!res
->iov
[i
].iov_base
|| len
!= res
->iov
[i
].iov_len
) {
1089 /* Clean up the half-a-mapping we just created... */
1090 if (res
->iov
[i
].iov_base
) {
1091 cpu_physical_memory_unmap(res
->iov
[i
].iov_base
,
1094 /* ...and the mappings for previous loop iterations */
1096 virtio_gpu_cleanup_mapping(res
);
1097 pixman_image_unref(res
->image
);
1103 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
1104 g
->hostmem
+= res
->hostmem
;
1106 resource_id
= qemu_get_be32(f
);
1109 /* load & apply scanout state */
1110 vmstate_load_state(f
, &vmstate_virtio_gpu_scanouts
, g
, 1);
1111 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
1112 scanout
= &g
->scanout
[i
];
1113 if (!scanout
->resource_id
) {
1116 res
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
1120 scanout
->ds
= qemu_create_displaysurface_pixman(res
->image
);
1125 dpy_gfx_replace_surface(scanout
->con
, scanout
->ds
);
1126 dpy_gfx_update(scanout
->con
, 0, 0, scanout
->width
, scanout
->height
);
1127 update_cursor(g
, &scanout
->cursor
);
1128 res
->scanout_bitmask
|= (1 << i
);
1134 static void virtio_gpu_device_realize(DeviceState
*qdev
, Error
**errp
)
1136 VirtIODevice
*vdev
= VIRTIO_DEVICE(qdev
);
1137 VirtIOGPU
*g
= VIRTIO_GPU(qdev
);
1141 if (g
->conf
.max_outputs
> VIRTIO_GPU_MAX_SCANOUTS
) {
1142 error_setg(errp
, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS
);
1146 g
->config_size
= sizeof(struct virtio_gpu_config
);
1147 g
->virtio_config
.num_scanouts
= g
->conf
.max_outputs
;
1148 virtio_init(VIRTIO_DEVICE(g
), "virtio-gpu", VIRTIO_ID_GPU
,
1151 g
->req_state
[0].width
= 1024;
1152 g
->req_state
[0].height
= 768;
1154 g
->use_virgl_renderer
= false;
1155 #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN)
1158 have_virgl
= display_opengl
;
1161 g
->conf
.flags
&= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED
);
1164 if (virtio_gpu_virgl_enabled(g
->conf
)) {
1165 /* use larger control queue in 3d mode */
1166 g
->ctrl_vq
= virtio_add_queue(vdev
, 256, virtio_gpu_handle_ctrl_cb
);
1167 g
->cursor_vq
= virtio_add_queue(vdev
, 16, virtio_gpu_handle_cursor_cb
);
1168 g
->virtio_config
.num_capsets
= 1;
1170 g
->ctrl_vq
= virtio_add_queue(vdev
, 64, virtio_gpu_handle_ctrl_cb
);
1171 g
->cursor_vq
= virtio_add_queue(vdev
, 16, virtio_gpu_handle_cursor_cb
);
1174 g
->ctrl_bh
= qemu_bh_new(virtio_gpu_ctrl_bh
, g
);
1175 g
->cursor_bh
= qemu_bh_new(virtio_gpu_cursor_bh
, g
);
1176 QTAILQ_INIT(&g
->reslist
);
1177 QTAILQ_INIT(&g
->cmdq
);
1178 QTAILQ_INIT(&g
->fenceq
);
1180 g
->enabled_output_bitmask
= 1;
1183 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
1185 graphic_console_init(DEVICE(g
), i
, &virtio_gpu_ops
, g
);
1187 dpy_gfx_replace_surface(g
->scanout
[i
].con
, NULL
);
1191 if (virtio_gpu_virgl_enabled(g
->conf
)) {
1192 error_setg(&g
->migration_blocker
, "virgl is not yet migratable");
1193 migrate_add_blocker(g
->migration_blocker
);
1197 static void virtio_gpu_device_unrealize(DeviceState
*qdev
, Error
**errp
)
1199 VirtIOGPU
*g
= VIRTIO_GPU(qdev
);
1200 if (g
->migration_blocker
) {
1201 migrate_del_blocker(g
->migration_blocker
);
1202 error_free(g
->migration_blocker
);
1206 static void virtio_gpu_instance_init(Object
*obj
)
1210 static void virtio_gpu_reset(VirtIODevice
*vdev
)
1212 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
1213 struct virtio_gpu_simple_resource
*res
, *tmp
;
1218 QTAILQ_FOREACH_SAFE(res
, &g
->reslist
, next
, tmp
) {
1219 virtio_gpu_resource_destroy(g
, res
);
1221 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
1223 g
->req_state
[i
].x
= 0;
1224 g
->req_state
[i
].y
= 0;
1226 g
->req_state
[0].width
= 1024;
1227 g
->req_state
[0].height
= 768;
1229 g
->req_state
[i
].width
= 0;
1230 g
->req_state
[i
].height
= 0;
1233 g
->scanout
[i
].resource_id
= 0;
1234 g
->scanout
[i
].width
= 0;
1235 g
->scanout
[i
].height
= 0;
1236 g
->scanout
[i
].x
= 0;
1237 g
->scanout
[i
].y
= 0;
1238 g
->scanout
[i
].ds
= NULL
;
1240 g
->enabled_output_bitmask
= 1;
1243 if (g
->use_virgl_renderer
) {
1244 virtio_gpu_virgl_reset(g
);
1245 g
->use_virgl_renderer
= 0;
1251 * For historical reasons virtio_gpu does not adhere to virtio migration
1252 * scheme as described in doc/virtio-migration.txt, in a sense that no
1253 * save/load callback are provided to the core. Instead the device data
1254 * is saved/loaded after the core data.
1256 * Because of this we need a special vmsd.
1258 static const VMStateDescription vmstate_virtio_gpu
= {
1259 .name
= "virtio-gpu",
1260 .minimum_version_id
= VIRTIO_GPU_VM_VERSION
,
1261 .version_id
= VIRTIO_GPU_VM_VERSION
,
1262 .fields
= (VMStateField
[]) {
1263 VMSTATE_VIRTIO_DEVICE
/* core */,
1265 .name
= "virtio-gpu",
1266 .info
= &(const VMStateInfo
) {
1267 .name
= "virtio-gpu",
1268 .get
= virtio_gpu_load
,
1269 .put
= virtio_gpu_save
,
1271 .flags
= VMS_SINGLE
,
1273 VMSTATE_END_OF_LIST()
1277 static Property virtio_gpu_properties
[] = {
1278 DEFINE_PROP_UINT32("max_outputs", VirtIOGPU
, conf
.max_outputs
, 1),
1279 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU
, conf
.max_hostmem
,
1282 DEFINE_PROP_BIT("virgl", VirtIOGPU
, conf
.flags
,
1283 VIRTIO_GPU_FLAG_VIRGL_ENABLED
, true),
1284 DEFINE_PROP_BIT("stats", VirtIOGPU
, conf
.flags
,
1285 VIRTIO_GPU_FLAG_STATS_ENABLED
, false),
1287 DEFINE_PROP_END_OF_LIST(),
1290 static void virtio_gpu_class_init(ObjectClass
*klass
, void *data
)
1292 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1293 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
1295 vdc
->realize
= virtio_gpu_device_realize
;
1296 vdc
->unrealize
= virtio_gpu_device_unrealize
;
1297 vdc
->get_config
= virtio_gpu_get_config
;
1298 vdc
->set_config
= virtio_gpu_set_config
;
1299 vdc
->get_features
= virtio_gpu_get_features
;
1300 vdc
->set_features
= virtio_gpu_set_features
;
1302 vdc
->reset
= virtio_gpu_reset
;
1304 dc
->props
= virtio_gpu_properties
;
1305 dc
->vmsd
= &vmstate_virtio_gpu
;
1306 dc
->hotpluggable
= false;
1309 static const TypeInfo virtio_gpu_info
= {
1310 .name
= TYPE_VIRTIO_GPU
,
1311 .parent
= TYPE_VIRTIO_DEVICE
,
1312 .instance_size
= sizeof(VirtIOGPU
),
1313 .instance_init
= virtio_gpu_instance_init
,
1314 .class_init
= virtio_gpu_class_init
,
1317 static void virtio_register_types(void)
1319 type_register_static(&virtio_gpu_info
);
1322 type_init(virtio_register_types
)
1324 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr
) != 24);
1325 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor
) != 56);
1326 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref
) != 32);
1327 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d
) != 40);
1328 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout
) != 48);
1329 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush
) != 48);
1330 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d
) != 56);
1331 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry
) != 16);
1332 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing
) != 32);
1333 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing
) != 32);
1334 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info
) != 408);
1336 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d
) != 72);
1337 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d
) != 72);
1338 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create
) != 96);
1339 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy
) != 24);
1340 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource
) != 32);
1341 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit
) != 32);
1342 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info
) != 32);
1343 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info
) != 40);
1344 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset
) != 32);
1345 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset
) != 24);