4 * Copyright Red Hat, Inc. 2013-2014
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/units.h"
16 #include "qemu-common.h"
18 #include "ui/console.h"
20 #include "sysemu/dma.h"
21 #include "hw/virtio/virtio.h"
22 #include "hw/virtio/virtio-gpu.h"
23 #include "hw/virtio/virtio-bus.h"
24 #include "migration/blocker.h"
26 #include "qapi/error.h"
28 #define VIRTIO_GPU_VM_VERSION 1
30 static struct virtio_gpu_simple_resource
*
31 virtio_gpu_find_resource(VirtIOGPU
*g
, uint32_t resource_id
);
33 static void virtio_gpu_cleanup_mapping(VirtIOGPU
*g
,
34 struct virtio_gpu_simple_resource
*res
);
37 virtio_gpu_ctrl_hdr_bswap(struct virtio_gpu_ctrl_hdr
*hdr
)
39 le32_to_cpus(&hdr
->type
);
40 le32_to_cpus(&hdr
->flags
);
41 le64_to_cpus(&hdr
->fence_id
);
42 le32_to_cpus(&hdr
->ctx_id
);
43 le32_to_cpus(&hdr
->padding
);
46 static void virtio_gpu_bswap_32(void *ptr
,
49 #ifdef HOST_WORDS_BIGENDIAN
52 struct virtio_gpu_ctrl_hdr
*hdr
= (struct virtio_gpu_ctrl_hdr
*) ptr
;
54 virtio_gpu_ctrl_hdr_bswap(hdr
);
56 i
= sizeof(struct virtio_gpu_ctrl_hdr
);
58 le32_to_cpus((uint32_t *)(ptr
+ i
));
59 i
= i
+ sizeof(uint32_t);
66 virtio_gpu_t2d_bswap(struct virtio_gpu_transfer_to_host_2d
*t2d
)
68 virtio_gpu_ctrl_hdr_bswap(&t2d
->hdr
);
69 le32_to_cpus(&t2d
->r
.x
);
70 le32_to_cpus(&t2d
->r
.y
);
71 le32_to_cpus(&t2d
->r
.width
);
72 le32_to_cpus(&t2d
->r
.height
);
73 le64_to_cpus(&t2d
->offset
);
74 le32_to_cpus(&t2d
->resource_id
);
75 le32_to_cpus(&t2d
->padding
);
79 #include <virglrenderer.h>
80 #define VIRGL(_g, _virgl, _simple, ...) \
82 if (_g->use_virgl_renderer) { \
83 _virgl(__VA_ARGS__); \
85 _simple(__VA_ARGS__); \
89 #define VIRGL(_g, _virgl, _simple, ...) \
91 _simple(__VA_ARGS__); \
95 static void update_cursor_data_simple(VirtIOGPU
*g
,
96 struct virtio_gpu_scanout
*s
,
99 struct virtio_gpu_simple_resource
*res
;
102 res
= virtio_gpu_find_resource(g
, resource_id
);
107 if (pixman_image_get_width(res
->image
) != s
->current_cursor
->width
||
108 pixman_image_get_height(res
->image
) != s
->current_cursor
->height
) {
112 pixels
= s
->current_cursor
->width
* s
->current_cursor
->height
;
113 memcpy(s
->current_cursor
->data
,
114 pixman_image_get_data(res
->image
),
115 pixels
* sizeof(uint32_t));
120 static void update_cursor_data_virgl(VirtIOGPU
*g
,
121 struct virtio_gpu_scanout
*s
,
122 uint32_t resource_id
)
124 uint32_t width
, height
;
125 uint32_t pixels
, *data
;
127 data
= virgl_renderer_get_cursor_data(resource_id
, &width
, &height
);
132 if (width
!= s
->current_cursor
->width
||
133 height
!= s
->current_cursor
->height
) {
138 pixels
= s
->current_cursor
->width
* s
->current_cursor
->height
;
139 memcpy(s
->current_cursor
->data
, data
, pixels
* sizeof(uint32_t));
145 static void update_cursor(VirtIOGPU
*g
, struct virtio_gpu_update_cursor
*cursor
)
147 struct virtio_gpu_scanout
*s
;
148 bool move
= cursor
->hdr
.type
== VIRTIO_GPU_CMD_MOVE_CURSOR
;
150 if (cursor
->pos
.scanout_id
>= g
->conf
.max_outputs
) {
153 s
= &g
->scanout
[cursor
->pos
.scanout_id
];
155 trace_virtio_gpu_update_cursor(cursor
->pos
.scanout_id
,
158 move
? "move" : "update",
159 cursor
->resource_id
);
162 if (!s
->current_cursor
) {
163 s
->current_cursor
= cursor_alloc(64, 64);
166 s
->current_cursor
->hot_x
= cursor
->hot_x
;
167 s
->current_cursor
->hot_y
= cursor
->hot_y
;
169 if (cursor
->resource_id
> 0) {
170 VIRGL(g
, update_cursor_data_virgl
, update_cursor_data_simple
,
171 g
, s
, cursor
->resource_id
);
173 dpy_cursor_define(s
->con
, s
->current_cursor
);
177 s
->cursor
.pos
.x
= cursor
->pos
.x
;
178 s
->cursor
.pos
.y
= cursor
->pos
.y
;
180 dpy_mouse_set(s
->con
, cursor
->pos
.x
, cursor
->pos
.y
,
181 cursor
->resource_id
? 1 : 0);
184 static void virtio_gpu_get_config(VirtIODevice
*vdev
, uint8_t *config
)
186 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
187 memcpy(config
, &g
->virtio_config
, sizeof(g
->virtio_config
));
190 static void virtio_gpu_set_config(VirtIODevice
*vdev
, const uint8_t *config
)
192 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
193 struct virtio_gpu_config vgconfig
;
195 memcpy(&vgconfig
, config
, sizeof(g
->virtio_config
));
197 if (vgconfig
.events_clear
) {
198 g
->virtio_config
.events_read
&= ~vgconfig
.events_clear
;
202 static uint64_t virtio_gpu_get_features(VirtIODevice
*vdev
, uint64_t features
,
205 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
207 if (virtio_gpu_virgl_enabled(g
->conf
)) {
208 features
|= (1 << VIRTIO_GPU_F_VIRGL
);
213 static void virtio_gpu_set_features(VirtIODevice
*vdev
, uint64_t features
)
215 static const uint32_t virgl
= (1 << VIRTIO_GPU_F_VIRGL
);
216 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
218 g
->use_virgl_renderer
= ((features
& virgl
) == virgl
);
219 trace_virtio_gpu_features(g
->use_virgl_renderer
);
222 static void virtio_gpu_notify_event(VirtIOGPU
*g
, uint32_t event_type
)
224 g
->virtio_config
.events_read
|= event_type
;
225 virtio_notify_config(&g
->parent_obj
);
228 static struct virtio_gpu_simple_resource
*
229 virtio_gpu_find_resource(VirtIOGPU
*g
, uint32_t resource_id
)
231 struct virtio_gpu_simple_resource
*res
;
233 QTAILQ_FOREACH(res
, &g
->reslist
, next
) {
234 if (res
->resource_id
== resource_id
) {
241 void virtio_gpu_ctrl_response(VirtIOGPU
*g
,
242 struct virtio_gpu_ctrl_command
*cmd
,
243 struct virtio_gpu_ctrl_hdr
*resp
,
248 if (cmd
->cmd_hdr
.flags
& VIRTIO_GPU_FLAG_FENCE
) {
249 resp
->flags
|= VIRTIO_GPU_FLAG_FENCE
;
250 resp
->fence_id
= cmd
->cmd_hdr
.fence_id
;
251 resp
->ctx_id
= cmd
->cmd_hdr
.ctx_id
;
253 virtio_gpu_ctrl_hdr_bswap(resp
);
254 s
= iov_from_buf(cmd
->elem
.in_sg
, cmd
->elem
.in_num
, 0, resp
, resp_len
);
256 qemu_log_mask(LOG_GUEST_ERROR
,
257 "%s: response size incorrect %zu vs %zu\n",
258 __func__
, s
, resp_len
);
260 virtqueue_push(cmd
->vq
, &cmd
->elem
, s
);
261 virtio_notify(VIRTIO_DEVICE(g
), cmd
->vq
);
262 cmd
->finished
= true;
265 void virtio_gpu_ctrl_response_nodata(VirtIOGPU
*g
,
266 struct virtio_gpu_ctrl_command
*cmd
,
267 enum virtio_gpu_ctrl_type type
)
269 struct virtio_gpu_ctrl_hdr resp
;
271 memset(&resp
, 0, sizeof(resp
));
273 virtio_gpu_ctrl_response(g
, cmd
, &resp
, sizeof(resp
));
277 virtio_gpu_fill_display_info(VirtIOGPU
*g
,
278 struct virtio_gpu_resp_display_info
*dpy_info
)
282 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
283 if (g
->enabled_output_bitmask
& (1 << i
)) {
284 dpy_info
->pmodes
[i
].enabled
= 1;
285 dpy_info
->pmodes
[i
].r
.width
= cpu_to_le32(g
->req_state
[i
].width
);
286 dpy_info
->pmodes
[i
].r
.height
= cpu_to_le32(g
->req_state
[i
].height
);
291 void virtio_gpu_get_display_info(VirtIOGPU
*g
,
292 struct virtio_gpu_ctrl_command
*cmd
)
294 struct virtio_gpu_resp_display_info display_info
;
296 trace_virtio_gpu_cmd_get_display_info();
297 memset(&display_info
, 0, sizeof(display_info
));
298 display_info
.hdr
.type
= VIRTIO_GPU_RESP_OK_DISPLAY_INFO
;
299 virtio_gpu_fill_display_info(g
, &display_info
);
300 virtio_gpu_ctrl_response(g
, cmd
, &display_info
.hdr
,
301 sizeof(display_info
));
304 static pixman_format_code_t
get_pixman_format(uint32_t virtio_gpu_format
)
306 switch (virtio_gpu_format
) {
307 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM
:
308 return PIXMAN_BE_b8g8r8x8
;
309 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM
:
310 return PIXMAN_BE_b8g8r8a8
;
311 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM
:
312 return PIXMAN_BE_x8r8g8b8
;
313 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM
:
314 return PIXMAN_BE_a8r8g8b8
;
315 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM
:
316 return PIXMAN_BE_r8g8b8x8
;
317 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM
:
318 return PIXMAN_BE_r8g8b8a8
;
319 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM
:
320 return PIXMAN_BE_x8b8g8r8
;
321 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM
:
322 return PIXMAN_BE_a8b8g8r8
;
328 static uint32_t calc_image_hostmem(pixman_format_code_t pformat
,
329 uint32_t width
, uint32_t height
)
331 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check.
332 * pixman_image_create_bits will fail in case it overflow.
335 int bpp
= PIXMAN_FORMAT_BPP(pformat
);
336 int stride
= ((width
* bpp
+ 0x1f) >> 5) * sizeof(uint32_t);
337 return height
* stride
;
340 static void virtio_gpu_resource_create_2d(VirtIOGPU
*g
,
341 struct virtio_gpu_ctrl_command
*cmd
)
343 pixman_format_code_t pformat
;
344 struct virtio_gpu_simple_resource
*res
;
345 struct virtio_gpu_resource_create_2d c2d
;
347 VIRTIO_GPU_FILL_CMD(c2d
);
348 virtio_gpu_bswap_32(&c2d
, sizeof(c2d
));
349 trace_virtio_gpu_cmd_res_create_2d(c2d
.resource_id
, c2d
.format
,
350 c2d
.width
, c2d
.height
);
352 if (c2d
.resource_id
== 0) {
353 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource id 0 is not allowed\n",
355 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
359 res
= virtio_gpu_find_resource(g
, c2d
.resource_id
);
361 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource already exists %d\n",
362 __func__
, c2d
.resource_id
);
363 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
367 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
369 res
->width
= c2d
.width
;
370 res
->height
= c2d
.height
;
371 res
->format
= c2d
.format
;
372 res
->resource_id
= c2d
.resource_id
;
374 pformat
= get_pixman_format(c2d
.format
);
376 qemu_log_mask(LOG_GUEST_ERROR
,
377 "%s: host couldn't handle guest format %d\n",
378 __func__
, c2d
.format
);
380 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
384 res
->hostmem
= calc_image_hostmem(pformat
, c2d
.width
, c2d
.height
);
385 if (res
->hostmem
+ g
->hostmem
< g
->conf
.max_hostmem
) {
386 res
->image
= pixman_image_create_bits(pformat
,
393 qemu_log_mask(LOG_GUEST_ERROR
,
394 "%s: resource creation failed %d %d %d\n",
395 __func__
, c2d
.resource_id
, c2d
.width
, c2d
.height
);
397 cmd
->error
= VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY
;
401 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
402 g
->hostmem
+= res
->hostmem
;
405 static void virtio_gpu_disable_scanout(VirtIOGPU
*g
, int scanout_id
)
407 struct virtio_gpu_scanout
*scanout
= &g
->scanout
[scanout_id
];
408 struct virtio_gpu_simple_resource
*res
;
409 DisplaySurface
*ds
= NULL
;
411 if (scanout
->resource_id
== 0) {
415 res
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
417 res
->scanout_bitmask
&= ~(1 << scanout_id
);
420 if (scanout_id
== 0) {
422 ds
= qemu_create_message_surface(scanout
->width
?: 640,
423 scanout
->height
?: 480,
424 "Guest disabled display.");
426 dpy_gfx_replace_surface(scanout
->con
, ds
);
427 scanout
->resource_id
= 0;
433 static void virtio_gpu_resource_destroy(VirtIOGPU
*g
,
434 struct virtio_gpu_simple_resource
*res
)
438 if (res
->scanout_bitmask
) {
439 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
440 if (res
->scanout_bitmask
& (1 << i
)) {
441 virtio_gpu_disable_scanout(g
, i
);
446 pixman_image_unref(res
->image
);
447 virtio_gpu_cleanup_mapping(g
, res
);
448 QTAILQ_REMOVE(&g
->reslist
, res
, next
);
449 g
->hostmem
-= res
->hostmem
;
453 static void virtio_gpu_resource_unref(VirtIOGPU
*g
,
454 struct virtio_gpu_ctrl_command
*cmd
)
456 struct virtio_gpu_simple_resource
*res
;
457 struct virtio_gpu_resource_unref unref
;
459 VIRTIO_GPU_FILL_CMD(unref
);
460 virtio_gpu_bswap_32(&unref
, sizeof(unref
));
461 trace_virtio_gpu_cmd_res_unref(unref
.resource_id
);
463 res
= virtio_gpu_find_resource(g
, unref
.resource_id
);
465 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
466 __func__
, unref
.resource_id
);
467 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
470 virtio_gpu_resource_destroy(g
, res
);
473 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU
*g
,
474 struct virtio_gpu_ctrl_command
*cmd
)
476 struct virtio_gpu_simple_resource
*res
;
478 uint32_t src_offset
, dst_offset
, stride
;
480 pixman_format_code_t format
;
481 struct virtio_gpu_transfer_to_host_2d t2d
;
483 VIRTIO_GPU_FILL_CMD(t2d
);
484 virtio_gpu_t2d_bswap(&t2d
);
485 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d
.resource_id
);
487 res
= virtio_gpu_find_resource(g
, t2d
.resource_id
);
488 if (!res
|| !res
->iov
) {
489 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
490 __func__
, t2d
.resource_id
);
491 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
495 if (t2d
.r
.x
> res
->width
||
496 t2d
.r
.y
> res
->height
||
497 t2d
.r
.width
> res
->width
||
498 t2d
.r
.height
> res
->height
||
499 t2d
.r
.x
+ t2d
.r
.width
> res
->width
||
500 t2d
.r
.y
+ t2d
.r
.height
> res
->height
) {
501 qemu_log_mask(LOG_GUEST_ERROR
, "%s: transfer bounds outside resource"
502 " bounds for resource %d: %d %d %d %d vs %d %d\n",
503 __func__
, t2d
.resource_id
, t2d
.r
.x
, t2d
.r
.y
,
504 t2d
.r
.width
, t2d
.r
.height
, res
->width
, res
->height
);
505 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
509 format
= pixman_image_get_format(res
->image
);
510 bpp
= DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format
), 8);
511 stride
= pixman_image_get_stride(res
->image
);
513 if (t2d
.offset
|| t2d
.r
.x
|| t2d
.r
.y
||
514 t2d
.r
.width
!= pixman_image_get_width(res
->image
)) {
515 void *img_data
= pixman_image_get_data(res
->image
);
516 for (h
= 0; h
< t2d
.r
.height
; h
++) {
517 src_offset
= t2d
.offset
+ stride
* h
;
518 dst_offset
= (t2d
.r
.y
+ h
) * stride
+ (t2d
.r
.x
* bpp
);
520 iov_to_buf(res
->iov
, res
->iov_cnt
, src_offset
,
522 + dst_offset
, t2d
.r
.width
* bpp
);
525 iov_to_buf(res
->iov
, res
->iov_cnt
, 0,
526 pixman_image_get_data(res
->image
),
527 pixman_image_get_stride(res
->image
)
528 * pixman_image_get_height(res
->image
));
532 static void virtio_gpu_resource_flush(VirtIOGPU
*g
,
533 struct virtio_gpu_ctrl_command
*cmd
)
535 struct virtio_gpu_simple_resource
*res
;
536 struct virtio_gpu_resource_flush rf
;
537 pixman_region16_t flush_region
;
540 VIRTIO_GPU_FILL_CMD(rf
);
541 virtio_gpu_bswap_32(&rf
, sizeof(rf
));
542 trace_virtio_gpu_cmd_res_flush(rf
.resource_id
,
543 rf
.r
.width
, rf
.r
.height
, rf
.r
.x
, rf
.r
.y
);
545 res
= virtio_gpu_find_resource(g
, rf
.resource_id
);
547 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
548 __func__
, rf
.resource_id
);
549 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
553 if (rf
.r
.x
> res
->width
||
554 rf
.r
.y
> res
->height
||
555 rf
.r
.width
> res
->width
||
556 rf
.r
.height
> res
->height
||
557 rf
.r
.x
+ rf
.r
.width
> res
->width
||
558 rf
.r
.y
+ rf
.r
.height
> res
->height
) {
559 qemu_log_mask(LOG_GUEST_ERROR
, "%s: flush bounds outside resource"
560 " bounds for resource %d: %d %d %d %d vs %d %d\n",
561 __func__
, rf
.resource_id
, rf
.r
.x
, rf
.r
.y
,
562 rf
.r
.width
, rf
.r
.height
, res
->width
, res
->height
);
563 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
567 pixman_region_init_rect(&flush_region
,
568 rf
.r
.x
, rf
.r
.y
, rf
.r
.width
, rf
.r
.height
);
569 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
570 struct virtio_gpu_scanout
*scanout
;
571 pixman_region16_t region
, finalregion
;
572 pixman_box16_t
*extents
;
574 if (!(res
->scanout_bitmask
& (1 << i
))) {
577 scanout
= &g
->scanout
[i
];
579 pixman_region_init(&finalregion
);
580 pixman_region_init_rect(®ion
, scanout
->x
, scanout
->y
,
581 scanout
->width
, scanout
->height
);
583 pixman_region_intersect(&finalregion
, &flush_region
, ®ion
);
584 pixman_region_translate(&finalregion
, -scanout
->x
, -scanout
->y
);
585 extents
= pixman_region_extents(&finalregion
);
586 /* work out the area we need to update for each console */
587 dpy_gfx_update(g
->scanout
[i
].con
,
588 extents
->x1
, extents
->y1
,
589 extents
->x2
- extents
->x1
,
590 extents
->y2
- extents
->y1
);
592 pixman_region_fini(®ion
);
593 pixman_region_fini(&finalregion
);
595 pixman_region_fini(&flush_region
);
598 static void virtio_unref_resource(pixman_image_t
*image
, void *data
)
600 pixman_image_unref(data
);
603 static void virtio_gpu_set_scanout(VirtIOGPU
*g
,
604 struct virtio_gpu_ctrl_command
*cmd
)
606 struct virtio_gpu_simple_resource
*res
, *ores
;
607 struct virtio_gpu_scanout
*scanout
;
608 pixman_format_code_t format
;
611 struct virtio_gpu_set_scanout ss
;
613 VIRTIO_GPU_FILL_CMD(ss
);
614 virtio_gpu_bswap_32(&ss
, sizeof(ss
));
615 trace_virtio_gpu_cmd_set_scanout(ss
.scanout_id
, ss
.resource_id
,
616 ss
.r
.width
, ss
.r
.height
, ss
.r
.x
, ss
.r
.y
);
618 if (ss
.scanout_id
>= g
->conf
.max_outputs
) {
619 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout id specified %d",
620 __func__
, ss
.scanout_id
);
621 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
626 if (ss
.resource_id
== 0) {
627 virtio_gpu_disable_scanout(g
, ss
.scanout_id
);
631 /* create a surface for this scanout */
632 res
= virtio_gpu_find_resource(g
, ss
.resource_id
);
634 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
635 __func__
, ss
.resource_id
);
636 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
640 if (ss
.r
.x
> res
->width
||
641 ss
.r
.y
> res
->height
||
642 ss
.r
.width
> res
->width
||
643 ss
.r
.height
> res
->height
||
644 ss
.r
.x
+ ss
.r
.width
> res
->width
||
645 ss
.r
.y
+ ss
.r
.height
> res
->height
) {
646 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout %d bounds for"
647 " resource %d, (%d,%d)+%d,%d vs %d %d\n",
648 __func__
, ss
.scanout_id
, ss
.resource_id
, ss
.r
.x
, ss
.r
.y
,
649 ss
.r
.width
, ss
.r
.height
, res
->width
, res
->height
);
650 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
654 scanout
= &g
->scanout
[ss
.scanout_id
];
656 format
= pixman_image_get_format(res
->image
);
657 bpp
= DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format
), 8);
658 offset
= (ss
.r
.x
* bpp
) + ss
.r
.y
* pixman_image_get_stride(res
->image
);
659 if (!scanout
->ds
|| surface_data(scanout
->ds
)
660 != ((uint8_t *)pixman_image_get_data(res
->image
) + offset
) ||
661 scanout
->width
!= ss
.r
.width
||
662 scanout
->height
!= ss
.r
.height
) {
663 pixman_image_t
*rect
;
664 void *ptr
= (uint8_t *)pixman_image_get_data(res
->image
) + offset
;
665 rect
= pixman_image_create_bits(format
, ss
.r
.width
, ss
.r
.height
, ptr
,
666 pixman_image_get_stride(res
->image
));
667 pixman_image_ref(res
->image
);
668 pixman_image_set_destroy_function(rect
, virtio_unref_resource
,
670 /* realloc the surface ptr */
671 scanout
->ds
= qemu_create_displaysurface_pixman(rect
);
673 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
676 pixman_image_unref(rect
);
677 dpy_gfx_replace_surface(g
->scanout
[ss
.scanout_id
].con
, scanout
->ds
);
680 ores
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
682 ores
->scanout_bitmask
&= ~(1 << ss
.scanout_id
);
685 res
->scanout_bitmask
|= (1 << ss
.scanout_id
);
686 scanout
->resource_id
= ss
.resource_id
;
689 scanout
->width
= ss
.r
.width
;
690 scanout
->height
= ss
.r
.height
;
693 int virtio_gpu_create_mapping_iov(VirtIOGPU
*g
,
694 struct virtio_gpu_resource_attach_backing
*ab
,
695 struct virtio_gpu_ctrl_command
*cmd
,
696 uint64_t **addr
, struct iovec
**iov
)
698 struct virtio_gpu_mem_entry
*ents
;
702 if (ab
->nr_entries
> 16384) {
703 qemu_log_mask(LOG_GUEST_ERROR
,
704 "%s: nr_entries is too big (%d > 16384)\n",
705 __func__
, ab
->nr_entries
);
709 esize
= sizeof(*ents
) * ab
->nr_entries
;
710 ents
= g_malloc(esize
);
711 s
= iov_to_buf(cmd
->elem
.out_sg
, cmd
->elem
.out_num
,
712 sizeof(*ab
), ents
, esize
);
714 qemu_log_mask(LOG_GUEST_ERROR
,
715 "%s: command data size incorrect %zu vs %zu\n",
721 *iov
= g_malloc0(sizeof(struct iovec
) * ab
->nr_entries
);
723 *addr
= g_malloc0(sizeof(uint64_t) * ab
->nr_entries
);
725 for (i
= 0; i
< ab
->nr_entries
; i
++) {
726 uint64_t a
= le64_to_cpu(ents
[i
].addr
);
727 uint32_t l
= le32_to_cpu(ents
[i
].length
);
729 (*iov
)[i
].iov_len
= l
;
730 (*iov
)[i
].iov_base
= dma_memory_map(VIRTIO_DEVICE(g
)->dma_as
,
731 a
, &len
, DMA_DIRECTION_TO_DEVICE
);
735 if (!(*iov
)[i
].iov_base
|| len
!= l
) {
736 qemu_log_mask(LOG_GUEST_ERROR
, "%s: failed to map MMIO memory for"
737 " resource %d element %d\n",
738 __func__
, ab
->resource_id
, i
);
739 virtio_gpu_cleanup_mapping_iov(g
, *iov
, i
);
753 void virtio_gpu_cleanup_mapping_iov(VirtIOGPU
*g
,
754 struct iovec
*iov
, uint32_t count
)
758 for (i
= 0; i
< count
; i
++) {
759 dma_memory_unmap(VIRTIO_DEVICE(g
)->dma_as
,
760 iov
[i
].iov_base
, iov
[i
].iov_len
,
761 DMA_DIRECTION_TO_DEVICE
,
767 static void virtio_gpu_cleanup_mapping(VirtIOGPU
*g
,
768 struct virtio_gpu_simple_resource
*res
)
770 virtio_gpu_cleanup_mapping_iov(g
, res
->iov
, res
->iov_cnt
);
778 virtio_gpu_resource_attach_backing(VirtIOGPU
*g
,
779 struct virtio_gpu_ctrl_command
*cmd
)
781 struct virtio_gpu_simple_resource
*res
;
782 struct virtio_gpu_resource_attach_backing ab
;
785 VIRTIO_GPU_FILL_CMD(ab
);
786 virtio_gpu_bswap_32(&ab
, sizeof(ab
));
787 trace_virtio_gpu_cmd_res_back_attach(ab
.resource_id
);
789 res
= virtio_gpu_find_resource(g
, ab
.resource_id
);
791 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
792 __func__
, ab
.resource_id
);
793 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
798 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
802 ret
= virtio_gpu_create_mapping_iov(g
, &ab
, cmd
, &res
->addrs
, &res
->iov
);
804 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
808 res
->iov_cnt
= ab
.nr_entries
;
812 virtio_gpu_resource_detach_backing(VirtIOGPU
*g
,
813 struct virtio_gpu_ctrl_command
*cmd
)
815 struct virtio_gpu_simple_resource
*res
;
816 struct virtio_gpu_resource_detach_backing detach
;
818 VIRTIO_GPU_FILL_CMD(detach
);
819 virtio_gpu_bswap_32(&detach
, sizeof(detach
));
820 trace_virtio_gpu_cmd_res_back_detach(detach
.resource_id
);
822 res
= virtio_gpu_find_resource(g
, detach
.resource_id
);
823 if (!res
|| !res
->iov
) {
824 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
825 __func__
, detach
.resource_id
);
826 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
829 virtio_gpu_cleanup_mapping(g
, res
);
832 static void virtio_gpu_simple_process_cmd(VirtIOGPU
*g
,
833 struct virtio_gpu_ctrl_command
*cmd
)
835 VIRTIO_GPU_FILL_CMD(cmd
->cmd_hdr
);
836 virtio_gpu_ctrl_hdr_bswap(&cmd
->cmd_hdr
);
838 switch (cmd
->cmd_hdr
.type
) {
839 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO
:
840 virtio_gpu_get_display_info(g
, cmd
);
842 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
:
843 virtio_gpu_resource_create_2d(g
, cmd
);
845 case VIRTIO_GPU_CMD_RESOURCE_UNREF
:
846 virtio_gpu_resource_unref(g
, cmd
);
848 case VIRTIO_GPU_CMD_RESOURCE_FLUSH
:
849 virtio_gpu_resource_flush(g
, cmd
);
851 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
:
852 virtio_gpu_transfer_to_host_2d(g
, cmd
);
854 case VIRTIO_GPU_CMD_SET_SCANOUT
:
855 virtio_gpu_set_scanout(g
, cmd
);
857 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
:
858 virtio_gpu_resource_attach_backing(g
, cmd
);
860 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
:
861 virtio_gpu_resource_detach_backing(g
, cmd
);
864 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
867 if (!cmd
->finished
) {
868 virtio_gpu_ctrl_response_nodata(g
, cmd
, cmd
->error
? cmd
->error
:
869 VIRTIO_GPU_RESP_OK_NODATA
);
873 static void virtio_gpu_handle_ctrl_cb(VirtIODevice
*vdev
, VirtQueue
*vq
)
875 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
876 qemu_bh_schedule(g
->ctrl_bh
);
879 static void virtio_gpu_handle_cursor_cb(VirtIODevice
*vdev
, VirtQueue
*vq
)
881 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
882 qemu_bh_schedule(g
->cursor_bh
);
885 void virtio_gpu_process_cmdq(VirtIOGPU
*g
)
887 struct virtio_gpu_ctrl_command
*cmd
;
889 while (!QTAILQ_EMPTY(&g
->cmdq
)) {
890 cmd
= QTAILQ_FIRST(&g
->cmdq
);
892 /* process command */
893 VIRGL(g
, virtio_gpu_virgl_process_cmd
, virtio_gpu_simple_process_cmd
,
898 QTAILQ_REMOVE(&g
->cmdq
, cmd
, next
);
899 if (virtio_gpu_stats_enabled(g
->conf
)) {
903 if (!cmd
->finished
) {
904 QTAILQ_INSERT_TAIL(&g
->fenceq
, cmd
, next
);
906 if (virtio_gpu_stats_enabled(g
->conf
)) {
907 if (g
->stats
.max_inflight
< g
->inflight
) {
908 g
->stats
.max_inflight
= g
->inflight
;
910 fprintf(stderr
, "inflight: %3d (+)\r", g
->inflight
);
918 static void virtio_gpu_handle_ctrl(VirtIODevice
*vdev
, VirtQueue
*vq
)
920 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
921 struct virtio_gpu_ctrl_command
*cmd
;
923 if (!virtio_queue_ready(vq
)) {
928 if (!g
->renderer_inited
&& g
->use_virgl_renderer
) {
929 virtio_gpu_virgl_init(g
);
930 g
->renderer_inited
= true;
934 cmd
= virtqueue_pop(vq
, sizeof(struct virtio_gpu_ctrl_command
));
938 cmd
->finished
= false;
939 cmd
->waiting
= false;
940 QTAILQ_INSERT_TAIL(&g
->cmdq
, cmd
, next
);
941 cmd
= virtqueue_pop(vq
, sizeof(struct virtio_gpu_ctrl_command
));
944 virtio_gpu_process_cmdq(g
);
947 if (g
->use_virgl_renderer
) {
948 virtio_gpu_virgl_fence_poll(g
);
953 static void virtio_gpu_ctrl_bh(void *opaque
)
955 VirtIOGPU
*g
= opaque
;
956 virtio_gpu_handle_ctrl(&g
->parent_obj
, g
->ctrl_vq
);
959 static void virtio_gpu_handle_cursor(VirtIODevice
*vdev
, VirtQueue
*vq
)
961 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
962 VirtQueueElement
*elem
;
964 struct virtio_gpu_update_cursor cursor_info
;
966 if (!virtio_queue_ready(vq
)) {
970 elem
= virtqueue_pop(vq
, sizeof(VirtQueueElement
));
975 s
= iov_to_buf(elem
->out_sg
, elem
->out_num
, 0,
976 &cursor_info
, sizeof(cursor_info
));
977 if (s
!= sizeof(cursor_info
)) {
978 qemu_log_mask(LOG_GUEST_ERROR
,
979 "%s: cursor size incorrect %zu vs %zu\n",
980 __func__
, s
, sizeof(cursor_info
));
982 virtio_gpu_bswap_32(&cursor_info
, sizeof(cursor_info
));
983 update_cursor(g
, &cursor_info
);
985 virtqueue_push(vq
, elem
, 0);
986 virtio_notify(vdev
, vq
);
991 static void virtio_gpu_cursor_bh(void *opaque
)
993 VirtIOGPU
*g
= opaque
;
994 virtio_gpu_handle_cursor(&g
->parent_obj
, g
->cursor_vq
);
997 static void virtio_gpu_invalidate_display(void *opaque
)
1001 static void virtio_gpu_update_display(void *opaque
)
1005 static void virtio_gpu_text_update(void *opaque
, console_ch_t
*chardata
)
1009 static int virtio_gpu_ui_info(void *opaque
, uint32_t idx
, QemuUIInfo
*info
)
1011 VirtIOGPU
*g
= opaque
;
1013 if (idx
>= g
->conf
.max_outputs
) {
1017 g
->req_state
[idx
].x
= info
->xoff
;
1018 g
->req_state
[idx
].y
= info
->yoff
;
1019 g
->req_state
[idx
].width
= info
->width
;
1020 g
->req_state
[idx
].height
= info
->height
;
1022 if (info
->width
&& info
->height
) {
1023 g
->enabled_output_bitmask
|= (1 << idx
);
1025 g
->enabled_output_bitmask
&= ~(1 << idx
);
1028 /* send event to guest */
1029 virtio_gpu_notify_event(g
, VIRTIO_GPU_EVENT_DISPLAY
);
1033 const GraphicHwOps virtio_gpu_ops
= {
1034 .invalidate
= virtio_gpu_invalidate_display
,
1035 .gfx_update
= virtio_gpu_update_display
,
1036 .text_update
= virtio_gpu_text_update
,
1037 .ui_info
= virtio_gpu_ui_info
,
1039 .gl_block
= virtio_gpu_gl_block
,
1043 static const VMStateDescription vmstate_virtio_gpu_scanout
= {
1044 .name
= "virtio-gpu-one-scanout",
1046 .fields
= (VMStateField
[]) {
1047 VMSTATE_UINT32(resource_id
, struct virtio_gpu_scanout
),
1048 VMSTATE_UINT32(width
, struct virtio_gpu_scanout
),
1049 VMSTATE_UINT32(height
, struct virtio_gpu_scanout
),
1050 VMSTATE_INT32(x
, struct virtio_gpu_scanout
),
1051 VMSTATE_INT32(y
, struct virtio_gpu_scanout
),
1052 VMSTATE_UINT32(cursor
.resource_id
, struct virtio_gpu_scanout
),
1053 VMSTATE_UINT32(cursor
.hot_x
, struct virtio_gpu_scanout
),
1054 VMSTATE_UINT32(cursor
.hot_y
, struct virtio_gpu_scanout
),
1055 VMSTATE_UINT32(cursor
.pos
.x
, struct virtio_gpu_scanout
),
1056 VMSTATE_UINT32(cursor
.pos
.y
, struct virtio_gpu_scanout
),
1057 VMSTATE_END_OF_LIST()
1061 static const VMStateDescription vmstate_virtio_gpu_scanouts
= {
1062 .name
= "virtio-gpu-scanouts",
1064 .fields
= (VMStateField
[]) {
1065 VMSTATE_INT32(enable
, struct VirtIOGPU
),
1066 VMSTATE_UINT32_EQUAL(conf
.max_outputs
, struct VirtIOGPU
, NULL
),
1067 VMSTATE_STRUCT_VARRAY_UINT32(scanout
, struct VirtIOGPU
,
1068 conf
.max_outputs
, 1,
1069 vmstate_virtio_gpu_scanout
,
1070 struct virtio_gpu_scanout
),
1071 VMSTATE_END_OF_LIST()
1075 static int virtio_gpu_save(QEMUFile
*f
, void *opaque
, size_t size
,
1076 const VMStateField
*field
, QJSON
*vmdesc
)
1078 VirtIOGPU
*g
= opaque
;
1079 struct virtio_gpu_simple_resource
*res
;
1082 /* in 2d mode we should never find unprocessed commands here */
1083 assert(QTAILQ_EMPTY(&g
->cmdq
));
1085 QTAILQ_FOREACH(res
, &g
->reslist
, next
) {
1086 qemu_put_be32(f
, res
->resource_id
);
1087 qemu_put_be32(f
, res
->width
);
1088 qemu_put_be32(f
, res
->height
);
1089 qemu_put_be32(f
, res
->format
);
1090 qemu_put_be32(f
, res
->iov_cnt
);
1091 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1092 qemu_put_be64(f
, res
->addrs
[i
]);
1093 qemu_put_be32(f
, res
->iov
[i
].iov_len
);
1095 qemu_put_buffer(f
, (void *)pixman_image_get_data(res
->image
),
1096 pixman_image_get_stride(res
->image
) * res
->height
);
1098 qemu_put_be32(f
, 0); /* end of list */
1100 return vmstate_save_state(f
, &vmstate_virtio_gpu_scanouts
, g
, NULL
);
1103 static int virtio_gpu_load(QEMUFile
*f
, void *opaque
, size_t size
,
1104 const VMStateField
*field
)
1106 VirtIOGPU
*g
= opaque
;
1107 struct virtio_gpu_simple_resource
*res
;
1108 struct virtio_gpu_scanout
*scanout
;
1109 uint32_t resource_id
, pformat
;
1114 resource_id
= qemu_get_be32(f
);
1115 while (resource_id
!= 0) {
1116 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
1117 res
->resource_id
= resource_id
;
1118 res
->width
= qemu_get_be32(f
);
1119 res
->height
= qemu_get_be32(f
);
1120 res
->format
= qemu_get_be32(f
);
1121 res
->iov_cnt
= qemu_get_be32(f
);
1124 pformat
= get_pixman_format(res
->format
);
1129 res
->image
= pixman_image_create_bits(pformat
,
1130 res
->width
, res
->height
,
1137 res
->hostmem
= calc_image_hostmem(pformat
, res
->width
, res
->height
);
1139 res
->addrs
= g_new(uint64_t, res
->iov_cnt
);
1140 res
->iov
= g_new(struct iovec
, res
->iov_cnt
);
1143 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1144 res
->addrs
[i
] = qemu_get_be64(f
);
1145 res
->iov
[i
].iov_len
= qemu_get_be32(f
);
1147 qemu_get_buffer(f
, (void *)pixman_image_get_data(res
->image
),
1148 pixman_image_get_stride(res
->image
) * res
->height
);
1150 /* restore mapping */
1151 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1152 hwaddr len
= res
->iov
[i
].iov_len
;
1153 res
->iov
[i
].iov_base
=
1154 dma_memory_map(VIRTIO_DEVICE(g
)->dma_as
,
1155 res
->addrs
[i
], &len
, DMA_DIRECTION_TO_DEVICE
);
1157 if (!res
->iov
[i
].iov_base
|| len
!= res
->iov
[i
].iov_len
) {
1158 /* Clean up the half-a-mapping we just created... */
1159 if (res
->iov
[i
].iov_base
) {
1160 dma_memory_unmap(VIRTIO_DEVICE(g
)->dma_as
,
1161 res
->iov
[i
].iov_base
,
1162 res
->iov
[i
].iov_len
,
1163 DMA_DIRECTION_TO_DEVICE
,
1164 res
->iov
[i
].iov_len
);
1166 /* ...and the mappings for previous loop iterations */
1168 virtio_gpu_cleanup_mapping(g
, res
);
1169 pixman_image_unref(res
->image
);
1175 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
1176 g
->hostmem
+= res
->hostmem
;
1178 resource_id
= qemu_get_be32(f
);
1181 /* load & apply scanout state */
1182 vmstate_load_state(f
, &vmstate_virtio_gpu_scanouts
, g
, 1);
1183 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
1184 scanout
= &g
->scanout
[i
];
1185 if (!scanout
->resource_id
) {
1188 res
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
1192 scanout
->ds
= qemu_create_displaysurface_pixman(res
->image
);
1197 dpy_gfx_replace_surface(scanout
->con
, scanout
->ds
);
1198 dpy_gfx_update_full(scanout
->con
);
1199 if (scanout
->cursor
.resource_id
) {
1200 update_cursor(g
, &scanout
->cursor
);
1202 res
->scanout_bitmask
|= (1 << i
);
1208 static void virtio_gpu_device_realize(DeviceState
*qdev
, Error
**errp
)
1210 VirtIODevice
*vdev
= VIRTIO_DEVICE(qdev
);
1211 VirtIOGPU
*g
= VIRTIO_GPU(qdev
);
1213 Error
*local_err
= NULL
;
1216 if (g
->conf
.max_outputs
> VIRTIO_GPU_MAX_SCANOUTS
) {
1217 error_setg(errp
, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS
);
1221 g
->use_virgl_renderer
= false;
1222 #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN)
1225 have_virgl
= display_opengl
;
1228 g
->conf
.flags
&= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED
);
1231 if (virtio_gpu_virgl_enabled(g
->conf
)) {
1232 error_setg(&g
->migration_blocker
, "virgl is not yet migratable");
1233 migrate_add_blocker(g
->migration_blocker
, &local_err
);
1235 error_propagate(errp
, local_err
);
1236 error_free(g
->migration_blocker
);
1241 g
->config_size
= sizeof(struct virtio_gpu_config
);
1242 g
->virtio_config
.num_scanouts
= cpu_to_le32(g
->conf
.max_outputs
);
1243 virtio_init(VIRTIO_DEVICE(g
), "virtio-gpu", VIRTIO_ID_GPU
,
1246 g
->req_state
[0].width
= g
->conf
.xres
;
1247 g
->req_state
[0].height
= g
->conf
.yres
;
1249 if (virtio_gpu_virgl_enabled(g
->conf
)) {
1250 /* use larger control queue in 3d mode */
1251 g
->ctrl_vq
= virtio_add_queue(vdev
, 256, virtio_gpu_handle_ctrl_cb
);
1252 g
->cursor_vq
= virtio_add_queue(vdev
, 16, virtio_gpu_handle_cursor_cb
);
1254 #if defined(CONFIG_VIRGL)
1255 g
->virtio_config
.num_capsets
= virtio_gpu_virgl_get_num_capsets(g
);
1257 g
->virtio_config
.num_capsets
= 0;
1260 g
->ctrl_vq
= virtio_add_queue(vdev
, 64, virtio_gpu_handle_ctrl_cb
);
1261 g
->cursor_vq
= virtio_add_queue(vdev
, 16, virtio_gpu_handle_cursor_cb
);
1264 g
->ctrl_bh
= qemu_bh_new(virtio_gpu_ctrl_bh
, g
);
1265 g
->cursor_bh
= qemu_bh_new(virtio_gpu_cursor_bh
, g
);
1266 QTAILQ_INIT(&g
->reslist
);
1267 QTAILQ_INIT(&g
->cmdq
);
1268 QTAILQ_INIT(&g
->fenceq
);
1270 g
->enabled_output_bitmask
= 1;
1273 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
1275 graphic_console_init(DEVICE(g
), i
, &virtio_gpu_ops
, g
);
1277 dpy_gfx_replace_surface(g
->scanout
[i
].con
, NULL
);
1282 static void virtio_gpu_device_unrealize(DeviceState
*qdev
, Error
**errp
)
1284 VirtIOGPU
*g
= VIRTIO_GPU(qdev
);
1285 if (g
->migration_blocker
) {
1286 migrate_del_blocker(g
->migration_blocker
);
1287 error_free(g
->migration_blocker
);
1291 static void virtio_gpu_instance_init(Object
*obj
)
1295 void virtio_gpu_reset(VirtIODevice
*vdev
)
1297 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
1298 struct virtio_gpu_simple_resource
*res
, *tmp
;
1303 QTAILQ_FOREACH_SAFE(res
, &g
->reslist
, next
, tmp
) {
1304 virtio_gpu_resource_destroy(g
, res
);
1306 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
1307 g
->scanout
[i
].resource_id
= 0;
1308 g
->scanout
[i
].width
= 0;
1309 g
->scanout
[i
].height
= 0;
1310 g
->scanout
[i
].x
= 0;
1311 g
->scanout
[i
].y
= 0;
1312 g
->scanout
[i
].ds
= NULL
;
1316 if (g
->use_virgl_renderer
) {
1317 virtio_gpu_virgl_reset(g
);
1318 g
->use_virgl_renderer
= 0;
1324 * For historical reasons virtio_gpu does not adhere to virtio migration
1325 * scheme as described in doc/virtio-migration.txt, in a sense that no
1326 * save/load callback are provided to the core. Instead the device data
1327 * is saved/loaded after the core data.
1329 * Because of this we need a special vmsd.
1331 static const VMStateDescription vmstate_virtio_gpu
= {
1332 .name
= "virtio-gpu",
1333 .minimum_version_id
= VIRTIO_GPU_VM_VERSION
,
1334 .version_id
= VIRTIO_GPU_VM_VERSION
,
1335 .fields
= (VMStateField
[]) {
1336 VMSTATE_VIRTIO_DEVICE
/* core */,
1338 .name
= "virtio-gpu",
1339 .info
= &(const VMStateInfo
) {
1340 .name
= "virtio-gpu",
1341 .get
= virtio_gpu_load
,
1342 .put
= virtio_gpu_save
,
1344 .flags
= VMS_SINGLE
,
1346 VMSTATE_END_OF_LIST()
1350 static Property virtio_gpu_properties
[] = {
1351 DEFINE_PROP_UINT32("max_outputs", VirtIOGPU
, conf
.max_outputs
, 1),
1352 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU
, conf
.max_hostmem
, 256 * MiB
),
1354 DEFINE_PROP_BIT("virgl", VirtIOGPU
, conf
.flags
,
1355 VIRTIO_GPU_FLAG_VIRGL_ENABLED
, true),
1356 DEFINE_PROP_BIT("stats", VirtIOGPU
, conf
.flags
,
1357 VIRTIO_GPU_FLAG_STATS_ENABLED
, false),
1359 DEFINE_PROP_UINT32("xres", VirtIOGPU
, conf
.xres
, 1024),
1360 DEFINE_PROP_UINT32("yres", VirtIOGPU
, conf
.yres
, 768),
1361 DEFINE_PROP_END_OF_LIST(),
1364 static void virtio_gpu_class_init(ObjectClass
*klass
, void *data
)
1366 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1367 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
1369 vdc
->realize
= virtio_gpu_device_realize
;
1370 vdc
->unrealize
= virtio_gpu_device_unrealize
;
1371 vdc
->get_config
= virtio_gpu_get_config
;
1372 vdc
->set_config
= virtio_gpu_set_config
;
1373 vdc
->get_features
= virtio_gpu_get_features
;
1374 vdc
->set_features
= virtio_gpu_set_features
;
1376 vdc
->reset
= virtio_gpu_reset
;
1378 set_bit(DEVICE_CATEGORY_DISPLAY
, dc
->categories
);
1379 dc
->props
= virtio_gpu_properties
;
1380 dc
->vmsd
= &vmstate_virtio_gpu
;
1381 dc
->hotpluggable
= false;
1384 static const TypeInfo virtio_gpu_info
= {
1385 .name
= TYPE_VIRTIO_GPU
,
1386 .parent
= TYPE_VIRTIO_DEVICE
,
1387 .instance_size
= sizeof(VirtIOGPU
),
1388 .instance_init
= virtio_gpu_instance_init
,
1389 .class_init
= virtio_gpu_class_init
,
1392 static void virtio_register_types(void)
1394 type_register_static(&virtio_gpu_info
);
1397 type_init(virtio_register_types
)
1399 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr
) != 24);
1400 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor
) != 56);
1401 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref
) != 32);
1402 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d
) != 40);
1403 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout
) != 48);
1404 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush
) != 48);
1405 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d
) != 56);
1406 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry
) != 16);
1407 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing
) != 32);
1408 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing
) != 32);
1409 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info
) != 408);
1411 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d
) != 72);
1412 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d
) != 72);
1413 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create
) != 96);
1414 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy
) != 24);
1415 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource
) != 32);
1416 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit
) != 32);
1417 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info
) != 32);
1418 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info
) != 40);
1419 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset
) != 32);
1420 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset
) != 24);