4 * Copyright Red Hat, Inc. 2013-2014
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/units.h"
17 #include "sysemu/cpus.h"
18 #include "ui/console.h"
21 #include "sysemu/dma.h"
22 #include "sysemu/sysemu.h"
23 #include "hw/virtio/virtio.h"
24 #include "migration/qemu-file-types.h"
25 #include "hw/virtio/virtio-gpu.h"
26 #include "hw/virtio/virtio-gpu-bswap.h"
27 #include "hw/virtio/virtio-gpu-pixman.h"
28 #include "hw/virtio/virtio-bus.h"
29 #include "hw/qdev-properties.h"
31 #include "qemu/module.h"
32 #include "qapi/error.h"
33 #include "qemu/error-report.h"
35 #define VIRTIO_GPU_VM_VERSION 1
37 static struct virtio_gpu_simple_resource
*
38 virtio_gpu_find_check_resource(VirtIOGPU
*g
, uint32_t resource_id
,
40 const char *caller
, uint32_t *error
);
42 static void virtio_gpu_reset_bh(void *opaque
);
44 void virtio_gpu_update_cursor_data(VirtIOGPU
*g
,
45 struct virtio_gpu_scanout
*s
,
48 struct virtio_gpu_simple_resource
*res
;
52 res
= virtio_gpu_find_check_resource(g
, resource_id
, false,
59 if (res
->blob_size
< (s
->current_cursor
->width
*
60 s
->current_cursor
->height
* 4)) {
65 if (pixman_image_get_width(res
->image
) != s
->current_cursor
->width
||
66 pixman_image_get_height(res
->image
) != s
->current_cursor
->height
) {
69 data
= pixman_image_get_data(res
->image
);
72 pixels
= s
->current_cursor
->width
* s
->current_cursor
->height
;
73 memcpy(s
->current_cursor
->data
, data
,
74 pixels
* sizeof(uint32_t));
77 static void update_cursor(VirtIOGPU
*g
, struct virtio_gpu_update_cursor
*cursor
)
79 struct virtio_gpu_scanout
*s
;
80 VirtIOGPUClass
*vgc
= VIRTIO_GPU_GET_CLASS(g
);
81 bool move
= cursor
->hdr
.type
== VIRTIO_GPU_CMD_MOVE_CURSOR
;
83 if (cursor
->pos
.scanout_id
>= g
->parent_obj
.conf
.max_outputs
) {
86 s
= &g
->parent_obj
.scanout
[cursor
->pos
.scanout_id
];
88 trace_virtio_gpu_update_cursor(cursor
->pos
.scanout_id
,
91 move
? "move" : "update",
95 if (!s
->current_cursor
) {
96 s
->current_cursor
= cursor_alloc(64, 64);
99 s
->current_cursor
->hot_x
= cursor
->hot_x
;
100 s
->current_cursor
->hot_y
= cursor
->hot_y
;
102 if (cursor
->resource_id
> 0) {
103 vgc
->update_cursor_data(g
, s
, cursor
->resource_id
);
105 dpy_cursor_define(s
->con
, s
->current_cursor
);
109 s
->cursor
.pos
.x
= cursor
->pos
.x
;
110 s
->cursor
.pos
.y
= cursor
->pos
.y
;
112 dpy_mouse_set(s
->con
, cursor
->pos
.x
, cursor
->pos
.y
,
113 cursor
->resource_id
? 1 : 0);
116 struct virtio_gpu_simple_resource
*
117 virtio_gpu_find_resource(VirtIOGPU
*g
, uint32_t resource_id
)
119 struct virtio_gpu_simple_resource
*res
;
121 QTAILQ_FOREACH(res
, &g
->reslist
, next
) {
122 if (res
->resource_id
== resource_id
) {
129 static struct virtio_gpu_simple_resource
*
130 virtio_gpu_find_check_resource(VirtIOGPU
*g
, uint32_t resource_id
,
131 bool require_backing
,
132 const char *caller
, uint32_t *error
)
134 struct virtio_gpu_simple_resource
*res
;
136 res
= virtio_gpu_find_resource(g
, resource_id
);
138 qemu_log_mask(LOG_GUEST_ERROR
, "%s: invalid resource specified %d\n",
139 caller
, resource_id
);
141 *error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
146 if (require_backing
) {
147 if (!res
->iov
|| (!res
->image
&& !res
->blob
)) {
148 qemu_log_mask(LOG_GUEST_ERROR
, "%s: no backing storage %d\n",
149 caller
, resource_id
);
151 *error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
160 void virtio_gpu_ctrl_response(VirtIOGPU
*g
,
161 struct virtio_gpu_ctrl_command
*cmd
,
162 struct virtio_gpu_ctrl_hdr
*resp
,
167 if (cmd
->cmd_hdr
.flags
& VIRTIO_GPU_FLAG_FENCE
) {
168 resp
->flags
|= VIRTIO_GPU_FLAG_FENCE
;
169 resp
->fence_id
= cmd
->cmd_hdr
.fence_id
;
170 resp
->ctx_id
= cmd
->cmd_hdr
.ctx_id
;
172 virtio_gpu_ctrl_hdr_bswap(resp
);
173 s
= iov_from_buf(cmd
->elem
.in_sg
, cmd
->elem
.in_num
, 0, resp
, resp_len
);
175 qemu_log_mask(LOG_GUEST_ERROR
,
176 "%s: response size incorrect %zu vs %zu\n",
177 __func__
, s
, resp_len
);
179 virtqueue_push(cmd
->vq
, &cmd
->elem
, s
);
180 virtio_notify(VIRTIO_DEVICE(g
), cmd
->vq
);
181 cmd
->finished
= true;
184 void virtio_gpu_ctrl_response_nodata(VirtIOGPU
*g
,
185 struct virtio_gpu_ctrl_command
*cmd
,
186 enum virtio_gpu_ctrl_type type
)
188 struct virtio_gpu_ctrl_hdr resp
;
190 memset(&resp
, 0, sizeof(resp
));
192 virtio_gpu_ctrl_response(g
, cmd
, &resp
, sizeof(resp
));
195 void virtio_gpu_get_display_info(VirtIOGPU
*g
,
196 struct virtio_gpu_ctrl_command
*cmd
)
198 struct virtio_gpu_resp_display_info display_info
;
200 trace_virtio_gpu_cmd_get_display_info();
201 memset(&display_info
, 0, sizeof(display_info
));
202 display_info
.hdr
.type
= VIRTIO_GPU_RESP_OK_DISPLAY_INFO
;
203 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g
), &display_info
);
204 virtio_gpu_ctrl_response(g
, cmd
, &display_info
.hdr
,
205 sizeof(display_info
));
208 void virtio_gpu_get_edid(VirtIOGPU
*g
,
209 struct virtio_gpu_ctrl_command
*cmd
)
211 struct virtio_gpu_resp_edid edid
;
212 struct virtio_gpu_cmd_get_edid get_edid
;
213 VirtIOGPUBase
*b
= VIRTIO_GPU_BASE(g
);
215 VIRTIO_GPU_FILL_CMD(get_edid
);
216 virtio_gpu_bswap_32(&get_edid
, sizeof(get_edid
));
218 if (get_edid
.scanout
>= b
->conf
.max_outputs
) {
219 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
223 trace_virtio_gpu_cmd_get_edid(get_edid
.scanout
);
224 memset(&edid
, 0, sizeof(edid
));
225 edid
.hdr
.type
= VIRTIO_GPU_RESP_OK_EDID
;
226 virtio_gpu_base_generate_edid(VIRTIO_GPU_BASE(g
), get_edid
.scanout
, &edid
);
227 virtio_gpu_ctrl_response(g
, cmd
, &edid
.hdr
, sizeof(edid
));
230 static uint32_t calc_image_hostmem(pixman_format_code_t pformat
,
231 uint32_t width
, uint32_t height
)
233 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check.
234 * pixman_image_create_bits will fail in case it overflow.
237 int bpp
= PIXMAN_FORMAT_BPP(pformat
);
238 int stride
= ((width
* bpp
+ 0x1f) >> 5) * sizeof(uint32_t);
239 return height
* stride
;
244 win32_pixman_image_destroy(pixman_image_t
*image
, void *data
)
246 HANDLE handle
= data
;
248 qemu_win32_map_free(pixman_image_get_data(image
), handle
, &error_warn
);
252 static void virtio_gpu_resource_create_2d(VirtIOGPU
*g
,
253 struct virtio_gpu_ctrl_command
*cmd
)
255 pixman_format_code_t pformat
;
256 struct virtio_gpu_simple_resource
*res
;
257 struct virtio_gpu_resource_create_2d c2d
;
259 VIRTIO_GPU_FILL_CMD(c2d
);
260 virtio_gpu_bswap_32(&c2d
, sizeof(c2d
));
261 trace_virtio_gpu_cmd_res_create_2d(c2d
.resource_id
, c2d
.format
,
262 c2d
.width
, c2d
.height
);
264 if (c2d
.resource_id
== 0) {
265 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource id 0 is not allowed\n",
267 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
271 res
= virtio_gpu_find_resource(g
, c2d
.resource_id
);
273 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource already exists %d\n",
274 __func__
, c2d
.resource_id
);
275 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
279 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
281 res
->width
= c2d
.width
;
282 res
->height
= c2d
.height
;
283 res
->format
= c2d
.format
;
284 res
->resource_id
= c2d
.resource_id
;
286 pformat
= virtio_gpu_get_pixman_format(c2d
.format
);
288 qemu_log_mask(LOG_GUEST_ERROR
,
289 "%s: host couldn't handle guest format %d\n",
290 __func__
, c2d
.format
);
292 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
296 res
->hostmem
= calc_image_hostmem(pformat
, c2d
.width
, c2d
.height
);
297 if (res
->hostmem
+ g
->hostmem
< g
->conf_max_hostmem
) {
300 bits
= qemu_win32_map_alloc(res
->hostmem
, &res
->handle
, &error_warn
);
305 res
->image
= pixman_image_create_bits(
309 bits
, c2d
.height
? res
->hostmem
/ c2d
.height
: 0);
312 pixman_image_set_destroy_function(res
->image
, win32_pixman_image_destroy
, res
->handle
);
321 qemu_log_mask(LOG_GUEST_ERROR
,
322 "%s: resource creation failed %d %d %d\n",
323 __func__
, c2d
.resource_id
, c2d
.width
, c2d
.height
);
325 cmd
->error
= VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY
;
329 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
330 g
->hostmem
+= res
->hostmem
;
333 static void virtio_gpu_resource_create_blob(VirtIOGPU
*g
,
334 struct virtio_gpu_ctrl_command
*cmd
)
336 struct virtio_gpu_simple_resource
*res
;
337 struct virtio_gpu_resource_create_blob cblob
;
340 VIRTIO_GPU_FILL_CMD(cblob
);
341 virtio_gpu_create_blob_bswap(&cblob
);
342 trace_virtio_gpu_cmd_res_create_blob(cblob
.resource_id
, cblob
.size
);
344 if (cblob
.resource_id
== 0) {
345 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource id 0 is not allowed\n",
347 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
351 if (cblob
.blob_mem
!= VIRTIO_GPU_BLOB_MEM_GUEST
&&
352 cblob
.blob_flags
!= VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE
) {
353 qemu_log_mask(LOG_GUEST_ERROR
, "%s: invalid memory type\n",
355 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
359 if (virtio_gpu_find_resource(g
, cblob
.resource_id
)) {
360 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource already exists %d\n",
361 __func__
, cblob
.resource_id
);
362 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
366 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
367 res
->resource_id
= cblob
.resource_id
;
368 res
->blob_size
= cblob
.size
;
370 ret
= virtio_gpu_create_mapping_iov(g
, cblob
.nr_entries
, sizeof(cblob
),
371 cmd
, &res
->addrs
, &res
->iov
,
374 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
379 virtio_gpu_init_udmabuf(res
);
380 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
383 static void virtio_gpu_disable_scanout(VirtIOGPU
*g
, int scanout_id
)
385 struct virtio_gpu_scanout
*scanout
= &g
->parent_obj
.scanout
[scanout_id
];
386 struct virtio_gpu_simple_resource
*res
;
388 if (scanout
->resource_id
== 0) {
392 res
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
394 res
->scanout_bitmask
&= ~(1 << scanout_id
);
397 dpy_gfx_replace_surface(scanout
->con
, NULL
);
398 scanout
->resource_id
= 0;
404 static void virtio_gpu_resource_destroy(VirtIOGPU
*g
,
405 struct virtio_gpu_simple_resource
*res
,
410 if (res
->scanout_bitmask
) {
411 for (i
= 0; i
< g
->parent_obj
.conf
.max_outputs
; i
++) {
412 if (res
->scanout_bitmask
& (1 << i
)) {
413 virtio_gpu_disable_scanout(g
, i
);
418 qemu_pixman_image_unref(res
->image
);
419 virtio_gpu_cleanup_mapping(g
, res
);
420 QTAILQ_REMOVE(&g
->reslist
, res
, next
);
421 g
->hostmem
-= res
->hostmem
;
425 static void virtio_gpu_resource_unref(VirtIOGPU
*g
,
426 struct virtio_gpu_ctrl_command
*cmd
)
428 struct virtio_gpu_simple_resource
*res
;
429 struct virtio_gpu_resource_unref unref
;
431 VIRTIO_GPU_FILL_CMD(unref
);
432 virtio_gpu_bswap_32(&unref
, sizeof(unref
));
433 trace_virtio_gpu_cmd_res_unref(unref
.resource_id
);
435 res
= virtio_gpu_find_resource(g
, unref
.resource_id
);
437 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
438 __func__
, unref
.resource_id
);
439 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
443 * virtio_gpu_resource_destroy does not set any errors, so pass a NULL errp
446 virtio_gpu_resource_destroy(g
, res
, NULL
);
449 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU
*g
,
450 struct virtio_gpu_ctrl_command
*cmd
)
452 struct virtio_gpu_simple_resource
*res
;
454 uint32_t src_offset
, dst_offset
, stride
;
455 pixman_format_code_t format
;
456 struct virtio_gpu_transfer_to_host_2d t2d
;
459 VIRTIO_GPU_FILL_CMD(t2d
);
460 virtio_gpu_t2d_bswap(&t2d
);
461 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d
.resource_id
);
463 res
= virtio_gpu_find_check_resource(g
, t2d
.resource_id
, true,
464 __func__
, &cmd
->error
);
465 if (!res
|| res
->blob
) {
469 if (t2d
.r
.x
> res
->width
||
470 t2d
.r
.y
> res
->height
||
471 t2d
.r
.width
> res
->width
||
472 t2d
.r
.height
> res
->height
||
473 t2d
.r
.x
+ t2d
.r
.width
> res
->width
||
474 t2d
.r
.y
+ t2d
.r
.height
> res
->height
) {
475 qemu_log_mask(LOG_GUEST_ERROR
, "%s: transfer bounds outside resource"
476 " bounds for resource %d: %d %d %d %d vs %d %d\n",
477 __func__
, t2d
.resource_id
, t2d
.r
.x
, t2d
.r
.y
,
478 t2d
.r
.width
, t2d
.r
.height
, res
->width
, res
->height
);
479 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
483 format
= pixman_image_get_format(res
->image
);
484 bpp
= DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format
), 8);
485 stride
= pixman_image_get_stride(res
->image
);
486 img_data
= pixman_image_get_data(res
->image
);
488 if (t2d
.r
.x
|| t2d
.r
.width
!= pixman_image_get_width(res
->image
)) {
489 for (h
= 0; h
< t2d
.r
.height
; h
++) {
490 src_offset
= t2d
.offset
+ stride
* h
;
491 dst_offset
= (t2d
.r
.y
+ h
) * stride
+ (t2d
.r
.x
* bpp
);
493 iov_to_buf(res
->iov
, res
->iov_cnt
, src_offset
,
494 (uint8_t *)img_data
+ dst_offset
,
498 src_offset
= t2d
.offset
;
499 dst_offset
= t2d
.r
.y
* stride
+ t2d
.r
.x
* bpp
;
500 iov_to_buf(res
->iov
, res
->iov_cnt
, src_offset
,
501 (uint8_t *)img_data
+ dst_offset
,
502 stride
* t2d
.r
.height
);
506 static void virtio_gpu_resource_flush(VirtIOGPU
*g
,
507 struct virtio_gpu_ctrl_command
*cmd
)
509 struct virtio_gpu_simple_resource
*res
;
510 struct virtio_gpu_resource_flush rf
;
511 struct virtio_gpu_scanout
*scanout
;
513 bool within_bounds
= false;
514 bool update_submitted
= false;
517 VIRTIO_GPU_FILL_CMD(rf
);
518 virtio_gpu_bswap_32(&rf
, sizeof(rf
));
519 trace_virtio_gpu_cmd_res_flush(rf
.resource_id
,
520 rf
.r
.width
, rf
.r
.height
, rf
.r
.x
, rf
.r
.y
);
522 res
= virtio_gpu_find_check_resource(g
, rf
.resource_id
, false,
523 __func__
, &cmd
->error
);
529 for (i
= 0; i
< g
->parent_obj
.conf
.max_outputs
; i
++) {
530 scanout
= &g
->parent_obj
.scanout
[i
];
531 if (scanout
->resource_id
== res
->resource_id
&&
532 rf
.r
.x
< scanout
->x
+ scanout
->width
&&
533 rf
.r
.x
+ rf
.r
.width
>= scanout
->x
&&
534 rf
.r
.y
< scanout
->y
+ scanout
->height
&&
535 rf
.r
.y
+ rf
.r
.height
>= scanout
->y
) {
536 within_bounds
= true;
538 if (console_has_gl(scanout
->con
)) {
539 dpy_gl_update(scanout
->con
, 0, 0, scanout
->width
,
541 update_submitted
= true;
546 if (update_submitted
) {
549 if (!within_bounds
) {
550 qemu_log_mask(LOG_GUEST_ERROR
, "%s: flush bounds outside scanouts"
551 " bounds for flush %d: %d %d %d %d\n",
552 __func__
, rf
.resource_id
, rf
.r
.x
, rf
.r
.y
,
553 rf
.r
.width
, rf
.r
.height
);
554 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
560 (rf
.r
.x
> res
->width
||
561 rf
.r
.y
> res
->height
||
562 rf
.r
.width
> res
->width
||
563 rf
.r
.height
> res
->height
||
564 rf
.r
.x
+ rf
.r
.width
> res
->width
||
565 rf
.r
.y
+ rf
.r
.height
> res
->height
)) {
566 qemu_log_mask(LOG_GUEST_ERROR
, "%s: flush bounds outside resource"
567 " bounds for resource %d: %d %d %d %d vs %d %d\n",
568 __func__
, rf
.resource_id
, rf
.r
.x
, rf
.r
.y
,
569 rf
.r
.width
, rf
.r
.height
, res
->width
, res
->height
);
570 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
574 qemu_rect_init(&flush_rect
, rf
.r
.x
, rf
.r
.y
, rf
.r
.width
, rf
.r
.height
);
575 for (i
= 0; i
< g
->parent_obj
.conf
.max_outputs
; i
++) {
578 if (!(res
->scanout_bitmask
& (1 << i
))) {
581 scanout
= &g
->parent_obj
.scanout
[i
];
583 qemu_rect_init(&rect
, scanout
->x
, scanout
->y
,
584 scanout
->width
, scanout
->height
);
586 /* work out the area we need to update for each console */
587 if (qemu_rect_intersect(&flush_rect
, &rect
, &rect
)) {
588 qemu_rect_translate(&rect
, -scanout
->x
, -scanout
->y
);
589 dpy_gfx_update(g
->parent_obj
.scanout
[i
].con
,
590 rect
.x
, rect
.y
, rect
.width
, rect
.height
);
595 static void virtio_unref_resource(pixman_image_t
*image
, void *data
)
597 pixman_image_unref(data
);
600 static void virtio_gpu_update_scanout(VirtIOGPU
*g
,
602 struct virtio_gpu_simple_resource
*res
,
603 struct virtio_gpu_framebuffer
*fb
,
604 struct virtio_gpu_rect
*r
)
606 struct virtio_gpu_simple_resource
*ores
;
607 struct virtio_gpu_scanout
*scanout
;
609 scanout
= &g
->parent_obj
.scanout
[scanout_id
];
610 ores
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
612 ores
->scanout_bitmask
&= ~(1 << scanout_id
);
615 res
->scanout_bitmask
|= (1 << scanout_id
);
616 scanout
->resource_id
= res
->resource_id
;
619 scanout
->width
= r
->width
;
620 scanout
->height
= r
->height
;
624 static bool virtio_gpu_do_set_scanout(VirtIOGPU
*g
,
626 struct virtio_gpu_framebuffer
*fb
,
627 struct virtio_gpu_simple_resource
*res
,
628 struct virtio_gpu_rect
*r
,
631 struct virtio_gpu_scanout
*scanout
;
634 scanout
= &g
->parent_obj
.scanout
[scanout_id
];
636 if (r
->x
> fb
->width
||
640 r
->width
> fb
->width
||
641 r
->height
> fb
->height
||
642 r
->x
+ r
->width
> fb
->width
||
643 r
->y
+ r
->height
> fb
->height
) {
644 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout %d bounds for"
645 " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n",
646 __func__
, scanout_id
, res
->resource_id
,
647 r
->x
, r
->y
, r
->width
, r
->height
,
648 fb
->width
, fb
->height
);
649 *error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
653 g
->parent_obj
.enable
= 1;
656 if (console_has_gl(scanout
->con
)) {
657 if (!virtio_gpu_update_dmabuf(g
, scanout_id
, res
, fb
, r
)) {
658 virtio_gpu_update_scanout(g
, scanout_id
, res
, fb
, r
);
660 *error
= VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY
;
668 data
= (uint8_t *)pixman_image_get_data(res
->image
);
671 /* create a surface for this scanout */
672 if ((res
->blob
&& !console_has_gl(scanout
->con
)) ||
674 surface_data(scanout
->ds
) != data
+ fb
->offset
||
675 scanout
->width
!= r
->width
||
676 scanout
->height
!= r
->height
) {
677 pixman_image_t
*rect
;
678 void *ptr
= data
+ fb
->offset
;
679 rect
= pixman_image_create_bits(fb
->format
, r
->width
, r
->height
,
683 pixman_image_ref(res
->image
);
684 pixman_image_set_destroy_function(rect
, virtio_unref_resource
,
688 /* realloc the surface ptr */
689 scanout
->ds
= qemu_create_displaysurface_pixman(rect
);
691 qemu_displaysurface_win32_set_handle(scanout
->ds
, res
->handle
, fb
->offset
);
694 pixman_image_unref(rect
);
695 dpy_gfx_replace_surface(g
->parent_obj
.scanout
[scanout_id
].con
,
699 virtio_gpu_update_scanout(g
, scanout_id
, res
, fb
, r
);
703 static void virtio_gpu_set_scanout(VirtIOGPU
*g
,
704 struct virtio_gpu_ctrl_command
*cmd
)
706 struct virtio_gpu_simple_resource
*res
;
707 struct virtio_gpu_framebuffer fb
= { 0 };
708 struct virtio_gpu_set_scanout ss
;
710 VIRTIO_GPU_FILL_CMD(ss
);
711 virtio_gpu_bswap_32(&ss
, sizeof(ss
));
712 trace_virtio_gpu_cmd_set_scanout(ss
.scanout_id
, ss
.resource_id
,
713 ss
.r
.width
, ss
.r
.height
, ss
.r
.x
, ss
.r
.y
);
715 if (ss
.scanout_id
>= g
->parent_obj
.conf
.max_outputs
) {
716 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout id specified %d",
717 __func__
, ss
.scanout_id
);
718 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
722 if (ss
.resource_id
== 0) {
723 virtio_gpu_disable_scanout(g
, ss
.scanout_id
);
727 res
= virtio_gpu_find_check_resource(g
, ss
.resource_id
, true,
728 __func__
, &cmd
->error
);
733 fb
.format
= pixman_image_get_format(res
->image
);
734 fb
.bytes_pp
= DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb
.format
), 8);
735 fb
.width
= pixman_image_get_width(res
->image
);
736 fb
.height
= pixman_image_get_height(res
->image
);
737 fb
.stride
= pixman_image_get_stride(res
->image
);
738 fb
.offset
= ss
.r
.x
* fb
.bytes_pp
+ ss
.r
.y
* fb
.stride
;
740 virtio_gpu_do_set_scanout(g
, ss
.scanout_id
,
741 &fb
, res
, &ss
.r
, &cmd
->error
);
744 static void virtio_gpu_set_scanout_blob(VirtIOGPU
*g
,
745 struct virtio_gpu_ctrl_command
*cmd
)
747 struct virtio_gpu_simple_resource
*res
;
748 struct virtio_gpu_framebuffer fb
= { 0 };
749 struct virtio_gpu_set_scanout_blob ss
;
752 VIRTIO_GPU_FILL_CMD(ss
);
753 virtio_gpu_scanout_blob_bswap(&ss
);
754 trace_virtio_gpu_cmd_set_scanout_blob(ss
.scanout_id
, ss
.resource_id
,
755 ss
.r
.width
, ss
.r
.height
, ss
.r
.x
,
758 if (ss
.scanout_id
>= g
->parent_obj
.conf
.max_outputs
) {
759 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout id specified %d",
760 __func__
, ss
.scanout_id
);
761 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
765 if (ss
.resource_id
== 0) {
766 virtio_gpu_disable_scanout(g
, ss
.scanout_id
);
770 res
= virtio_gpu_find_check_resource(g
, ss
.resource_id
, true,
771 __func__
, &cmd
->error
);
776 fb
.format
= virtio_gpu_get_pixman_format(ss
.format
);
778 qemu_log_mask(LOG_GUEST_ERROR
,
779 "%s: host couldn't handle guest format %d\n",
780 __func__
, ss
.format
);
781 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
785 fb
.bytes_pp
= DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb
.format
), 8);
787 fb
.height
= ss
.height
;
788 fb
.stride
= ss
.strides
[0];
789 fb
.offset
= ss
.offsets
[0] + ss
.r
.x
* fb
.bytes_pp
+ ss
.r
.y
* fb
.stride
;
792 fbend
+= fb
.stride
* (ss
.r
.height
- 1);
793 fbend
+= fb
.bytes_pp
* ss
.r
.width
;
794 if (fbend
> res
->blob_size
) {
795 qemu_log_mask(LOG_GUEST_ERROR
,
796 "%s: fb end out of range\n",
798 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
802 virtio_gpu_do_set_scanout(g
, ss
.scanout_id
,
803 &fb
, res
, &ss
.r
, &cmd
->error
);
806 int virtio_gpu_create_mapping_iov(VirtIOGPU
*g
,
807 uint32_t nr_entries
, uint32_t offset
,
808 struct virtio_gpu_ctrl_command
*cmd
,
809 uint64_t **addr
, struct iovec
**iov
,
812 struct virtio_gpu_mem_entry
*ents
;
816 if (nr_entries
> 16384) {
817 qemu_log_mask(LOG_GUEST_ERROR
,
818 "%s: nr_entries is too big (%d > 16384)\n",
819 __func__
, nr_entries
);
823 esize
= sizeof(*ents
) * nr_entries
;
824 ents
= g_malloc(esize
);
825 s
= iov_to_buf(cmd
->elem
.out_sg
, cmd
->elem
.out_num
,
826 offset
, ents
, esize
);
828 qemu_log_mask(LOG_GUEST_ERROR
,
829 "%s: command data size incorrect %zu vs %zu\n",
839 for (e
= 0, v
= 0; e
< nr_entries
; e
++) {
840 uint64_t a
= le64_to_cpu(ents
[e
].addr
);
841 uint32_t l
= le32_to_cpu(ents
[e
].length
);
847 map
= dma_memory_map(VIRTIO_DEVICE(g
)->dma_as
, a
, &len
,
848 DMA_DIRECTION_TO_DEVICE
,
849 MEMTXATTRS_UNSPECIFIED
);
851 qemu_log_mask(LOG_GUEST_ERROR
, "%s: failed to map MMIO memory for"
852 " element %d\n", __func__
, e
);
853 virtio_gpu_cleanup_mapping_iov(g
, *iov
, v
);
864 *iov
= g_renew(struct iovec
, *iov
, v
+ 16);
866 *addr
= g_renew(uint64_t, *addr
, v
+ 16);
869 (*iov
)[v
].iov_base
= map
;
870 (*iov
)[v
].iov_len
= len
;
886 void virtio_gpu_cleanup_mapping_iov(VirtIOGPU
*g
,
887 struct iovec
*iov
, uint32_t count
)
891 for (i
= 0; i
< count
; i
++) {
892 dma_memory_unmap(VIRTIO_DEVICE(g
)->dma_as
,
893 iov
[i
].iov_base
, iov
[i
].iov_len
,
894 DMA_DIRECTION_TO_DEVICE
,
900 void virtio_gpu_cleanup_mapping(VirtIOGPU
*g
,
901 struct virtio_gpu_simple_resource
*res
)
903 virtio_gpu_cleanup_mapping_iov(g
, res
->iov
, res
->iov_cnt
);
910 virtio_gpu_fini_udmabuf(res
);
915 virtio_gpu_resource_attach_backing(VirtIOGPU
*g
,
916 struct virtio_gpu_ctrl_command
*cmd
)
918 struct virtio_gpu_simple_resource
*res
;
919 struct virtio_gpu_resource_attach_backing ab
;
922 VIRTIO_GPU_FILL_CMD(ab
);
923 virtio_gpu_bswap_32(&ab
, sizeof(ab
));
924 trace_virtio_gpu_cmd_res_back_attach(ab
.resource_id
);
926 res
= virtio_gpu_find_resource(g
, ab
.resource_id
);
928 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
929 __func__
, ab
.resource_id
);
930 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
935 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
939 ret
= virtio_gpu_create_mapping_iov(g
, ab
.nr_entries
, sizeof(ab
), cmd
,
940 &res
->addrs
, &res
->iov
, &res
->iov_cnt
);
942 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
948 virtio_gpu_resource_detach_backing(VirtIOGPU
*g
,
949 struct virtio_gpu_ctrl_command
*cmd
)
951 struct virtio_gpu_simple_resource
*res
;
952 struct virtio_gpu_resource_detach_backing detach
;
954 VIRTIO_GPU_FILL_CMD(detach
);
955 virtio_gpu_bswap_32(&detach
, sizeof(detach
));
956 trace_virtio_gpu_cmd_res_back_detach(detach
.resource_id
);
958 res
= virtio_gpu_find_check_resource(g
, detach
.resource_id
, true,
959 __func__
, &cmd
->error
);
963 virtio_gpu_cleanup_mapping(g
, res
);
966 void virtio_gpu_simple_process_cmd(VirtIOGPU
*g
,
967 struct virtio_gpu_ctrl_command
*cmd
)
969 VIRTIO_GPU_FILL_CMD(cmd
->cmd_hdr
);
970 virtio_gpu_ctrl_hdr_bswap(&cmd
->cmd_hdr
);
972 switch (cmd
->cmd_hdr
.type
) {
973 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO
:
974 virtio_gpu_get_display_info(g
, cmd
);
976 case VIRTIO_GPU_CMD_GET_EDID
:
977 virtio_gpu_get_edid(g
, cmd
);
979 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
:
980 virtio_gpu_resource_create_2d(g
, cmd
);
982 case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB
:
983 if (!virtio_gpu_blob_enabled(g
->parent_obj
.conf
)) {
984 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
987 virtio_gpu_resource_create_blob(g
, cmd
);
989 case VIRTIO_GPU_CMD_RESOURCE_UNREF
:
990 virtio_gpu_resource_unref(g
, cmd
);
992 case VIRTIO_GPU_CMD_RESOURCE_FLUSH
:
993 virtio_gpu_resource_flush(g
, cmd
);
995 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
:
996 virtio_gpu_transfer_to_host_2d(g
, cmd
);
998 case VIRTIO_GPU_CMD_SET_SCANOUT
:
999 virtio_gpu_set_scanout(g
, cmd
);
1001 case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB
:
1002 if (!virtio_gpu_blob_enabled(g
->parent_obj
.conf
)) {
1003 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
1006 virtio_gpu_set_scanout_blob(g
, cmd
);
1008 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
:
1009 virtio_gpu_resource_attach_backing(g
, cmd
);
1011 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
:
1012 virtio_gpu_resource_detach_backing(g
, cmd
);
1015 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
1018 if (!cmd
->finished
) {
1019 if (!g
->parent_obj
.renderer_blocked
) {
1020 virtio_gpu_ctrl_response_nodata(g
, cmd
, cmd
->error
? cmd
->error
:
1021 VIRTIO_GPU_RESP_OK_NODATA
);
1026 static void virtio_gpu_handle_ctrl_cb(VirtIODevice
*vdev
, VirtQueue
*vq
)
1028 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
1029 qemu_bh_schedule(g
->ctrl_bh
);
1032 static void virtio_gpu_handle_cursor_cb(VirtIODevice
*vdev
, VirtQueue
*vq
)
1034 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
1035 qemu_bh_schedule(g
->cursor_bh
);
1038 void virtio_gpu_process_cmdq(VirtIOGPU
*g
)
1040 struct virtio_gpu_ctrl_command
*cmd
;
1041 VirtIOGPUClass
*vgc
= VIRTIO_GPU_GET_CLASS(g
);
1043 if (g
->processing_cmdq
) {
1046 g
->processing_cmdq
= true;
1047 while (!QTAILQ_EMPTY(&g
->cmdq
)) {
1048 cmd
= QTAILQ_FIRST(&g
->cmdq
);
1050 if (g
->parent_obj
.renderer_blocked
) {
1054 /* process command */
1055 vgc
->process_cmd(g
, cmd
);
1057 QTAILQ_REMOVE(&g
->cmdq
, cmd
, next
);
1058 if (virtio_gpu_stats_enabled(g
->parent_obj
.conf
)) {
1059 g
->stats
.requests
++;
1062 if (!cmd
->finished
) {
1063 QTAILQ_INSERT_TAIL(&g
->fenceq
, cmd
, next
);
1065 if (virtio_gpu_stats_enabled(g
->parent_obj
.conf
)) {
1066 if (g
->stats
.max_inflight
< g
->inflight
) {
1067 g
->stats
.max_inflight
= g
->inflight
;
1069 fprintf(stderr
, "inflight: %3d (+)\r", g
->inflight
);
1075 g
->processing_cmdq
= false;
1078 static void virtio_gpu_process_fenceq(VirtIOGPU
*g
)
1080 struct virtio_gpu_ctrl_command
*cmd
, *tmp
;
1082 QTAILQ_FOREACH_SAFE(cmd
, &g
->fenceq
, next
, tmp
) {
1083 trace_virtio_gpu_fence_resp(cmd
->cmd_hdr
.fence_id
);
1084 virtio_gpu_ctrl_response_nodata(g
, cmd
, VIRTIO_GPU_RESP_OK_NODATA
);
1085 QTAILQ_REMOVE(&g
->fenceq
, cmd
, next
);
1088 if (virtio_gpu_stats_enabled(g
->parent_obj
.conf
)) {
1089 fprintf(stderr
, "inflight: %3d (-)\r", g
->inflight
);
1094 static void virtio_gpu_handle_gl_flushed(VirtIOGPUBase
*b
)
1096 VirtIOGPU
*g
= container_of(b
, VirtIOGPU
, parent_obj
);
1098 virtio_gpu_process_fenceq(g
);
1099 virtio_gpu_process_cmdq(g
);
1102 static void virtio_gpu_handle_ctrl(VirtIODevice
*vdev
, VirtQueue
*vq
)
1104 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
1105 struct virtio_gpu_ctrl_command
*cmd
;
1107 if (!virtio_queue_ready(vq
)) {
1111 cmd
= virtqueue_pop(vq
, sizeof(struct virtio_gpu_ctrl_command
));
1115 cmd
->finished
= false;
1116 QTAILQ_INSERT_TAIL(&g
->cmdq
, cmd
, next
);
1117 cmd
= virtqueue_pop(vq
, sizeof(struct virtio_gpu_ctrl_command
));
1120 virtio_gpu_process_cmdq(g
);
1123 static void virtio_gpu_ctrl_bh(void *opaque
)
1125 VirtIOGPU
*g
= opaque
;
1126 VirtIOGPUClass
*vgc
= VIRTIO_GPU_GET_CLASS(g
);
1128 vgc
->handle_ctrl(VIRTIO_DEVICE(g
), g
->ctrl_vq
);
1131 static void virtio_gpu_handle_cursor(VirtIODevice
*vdev
, VirtQueue
*vq
)
1133 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
1134 VirtQueueElement
*elem
;
1136 struct virtio_gpu_update_cursor cursor_info
;
1138 if (!virtio_queue_ready(vq
)) {
1142 elem
= virtqueue_pop(vq
, sizeof(VirtQueueElement
));
1147 s
= iov_to_buf(elem
->out_sg
, elem
->out_num
, 0,
1148 &cursor_info
, sizeof(cursor_info
));
1149 if (s
!= sizeof(cursor_info
)) {
1150 qemu_log_mask(LOG_GUEST_ERROR
,
1151 "%s: cursor size incorrect %zu vs %zu\n",
1152 __func__
, s
, sizeof(cursor_info
));
1154 virtio_gpu_bswap_32(&cursor_info
, sizeof(cursor_info
));
1155 update_cursor(g
, &cursor_info
);
1157 virtqueue_push(vq
, elem
, 0);
1158 virtio_notify(vdev
, vq
);
1163 static void virtio_gpu_cursor_bh(void *opaque
)
1165 VirtIOGPU
*g
= opaque
;
1166 virtio_gpu_handle_cursor(&g
->parent_obj
.parent_obj
, g
->cursor_vq
);
1169 static const VMStateDescription vmstate_virtio_gpu_scanout
= {
1170 .name
= "virtio-gpu-one-scanout",
1172 .minimum_version_id
= 1,
1173 .fields
= (const VMStateField
[]) {
1174 VMSTATE_UINT32(resource_id
, struct virtio_gpu_scanout
),
1175 VMSTATE_UINT32(width
, struct virtio_gpu_scanout
),
1176 VMSTATE_UINT32(height
, struct virtio_gpu_scanout
),
1177 VMSTATE_INT32(x
, struct virtio_gpu_scanout
),
1178 VMSTATE_INT32(y
, struct virtio_gpu_scanout
),
1179 VMSTATE_UINT32(cursor
.resource_id
, struct virtio_gpu_scanout
),
1180 VMSTATE_UINT32(cursor
.hot_x
, struct virtio_gpu_scanout
),
1181 VMSTATE_UINT32(cursor
.hot_y
, struct virtio_gpu_scanout
),
1182 VMSTATE_UINT32(cursor
.pos
.x
, struct virtio_gpu_scanout
),
1183 VMSTATE_UINT32(cursor
.pos
.y
, struct virtio_gpu_scanout
),
1184 VMSTATE_UINT32_V(fb
.format
, struct virtio_gpu_scanout
, 2),
1185 VMSTATE_UINT32_V(fb
.bytes_pp
, struct virtio_gpu_scanout
, 2),
1186 VMSTATE_UINT32_V(fb
.width
, struct virtio_gpu_scanout
, 2),
1187 VMSTATE_UINT32_V(fb
.height
, struct virtio_gpu_scanout
, 2),
1188 VMSTATE_UINT32_V(fb
.stride
, struct virtio_gpu_scanout
, 2),
1189 VMSTATE_UINT32_V(fb
.offset
, struct virtio_gpu_scanout
, 2),
1190 VMSTATE_END_OF_LIST()
1194 static const VMStateDescription vmstate_virtio_gpu_scanouts
= {
1195 .name
= "virtio-gpu-scanouts",
1197 .fields
= (const VMStateField
[]) {
1198 VMSTATE_INT32(parent_obj
.enable
, struct VirtIOGPU
),
1199 VMSTATE_UINT32_EQUAL(parent_obj
.conf
.max_outputs
,
1200 struct VirtIOGPU
, NULL
),
1201 VMSTATE_STRUCT_VARRAY_UINT32(parent_obj
.scanout
, struct VirtIOGPU
,
1202 parent_obj
.conf
.max_outputs
, 1,
1203 vmstate_virtio_gpu_scanout
,
1204 struct virtio_gpu_scanout
),
1205 VMSTATE_END_OF_LIST()
1209 static int virtio_gpu_save(QEMUFile
*f
, void *opaque
, size_t size
,
1210 const VMStateField
*field
, JSONWriter
*vmdesc
)
1212 VirtIOGPU
*g
= opaque
;
1213 struct virtio_gpu_simple_resource
*res
;
1216 /* in 2d mode we should never find unprocessed commands here */
1217 assert(QTAILQ_EMPTY(&g
->cmdq
));
1219 QTAILQ_FOREACH(res
, &g
->reslist
, next
) {
1220 if (res
->blob_size
) {
1223 qemu_put_be32(f
, res
->resource_id
);
1224 qemu_put_be32(f
, res
->width
);
1225 qemu_put_be32(f
, res
->height
);
1226 qemu_put_be32(f
, res
->format
);
1227 qemu_put_be32(f
, res
->iov_cnt
);
1228 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1229 qemu_put_be64(f
, res
->addrs
[i
]);
1230 qemu_put_be32(f
, res
->iov
[i
].iov_len
);
1232 qemu_put_buffer(f
, (void *)pixman_image_get_data(res
->image
),
1233 pixman_image_get_stride(res
->image
) * res
->height
);
1235 qemu_put_be32(f
, 0); /* end of list */
1237 return vmstate_save_state(f
, &vmstate_virtio_gpu_scanouts
, g
, NULL
);
1240 static bool virtio_gpu_load_restore_mapping(VirtIOGPU
*g
,
1241 struct virtio_gpu_simple_resource
*res
)
1245 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1246 hwaddr len
= res
->iov
[i
].iov_len
;
1247 res
->iov
[i
].iov_base
=
1248 dma_memory_map(VIRTIO_DEVICE(g
)->dma_as
, res
->addrs
[i
], &len
,
1249 DMA_DIRECTION_TO_DEVICE
, MEMTXATTRS_UNSPECIFIED
);
1251 if (!res
->iov
[i
].iov_base
|| len
!= res
->iov
[i
].iov_len
) {
1252 /* Clean up the half-a-mapping we just created... */
1253 if (res
->iov
[i
].iov_base
) {
1254 dma_memory_unmap(VIRTIO_DEVICE(g
)->dma_as
, res
->iov
[i
].iov_base
,
1255 len
, DMA_DIRECTION_TO_DEVICE
, 0);
1257 /* ...and the mappings for previous loop iterations */
1259 virtio_gpu_cleanup_mapping(g
, res
);
1264 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
1265 g
->hostmem
+= res
->hostmem
;
1269 static int virtio_gpu_load(QEMUFile
*f
, void *opaque
, size_t size
,
1270 const VMStateField
*field
)
1272 VirtIOGPU
*g
= opaque
;
1273 struct virtio_gpu_simple_resource
*res
;
1274 uint32_t resource_id
, pformat
;
1280 resource_id
= qemu_get_be32(f
);
1281 while (resource_id
!= 0) {
1282 res
= virtio_gpu_find_resource(g
, resource_id
);
1287 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
1288 res
->resource_id
= resource_id
;
1289 res
->width
= qemu_get_be32(f
);
1290 res
->height
= qemu_get_be32(f
);
1291 res
->format
= qemu_get_be32(f
);
1292 res
->iov_cnt
= qemu_get_be32(f
);
1295 pformat
= virtio_gpu_get_pixman_format(res
->format
);
1301 res
->hostmem
= calc_image_hostmem(pformat
, res
->width
, res
->height
);
1303 bits
= qemu_win32_map_alloc(res
->hostmem
, &res
->handle
, &error_warn
);
1309 res
->image
= pixman_image_create_bits(
1311 res
->width
, res
->height
,
1312 bits
, res
->height
? res
->hostmem
/ res
->height
: 0);
1318 pixman_image_set_destroy_function(res
->image
, win32_pixman_image_destroy
, res
->handle
);
1321 res
->addrs
= g_new(uint64_t, res
->iov_cnt
);
1322 res
->iov
= g_new(struct iovec
, res
->iov_cnt
);
1325 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1326 res
->addrs
[i
] = qemu_get_be64(f
);
1327 res
->iov
[i
].iov_len
= qemu_get_be32(f
);
1329 qemu_get_buffer(f
, (void *)pixman_image_get_data(res
->image
),
1330 pixman_image_get_stride(res
->image
) * res
->height
);
1332 if (!virtio_gpu_load_restore_mapping(g
, res
)) {
1333 pixman_image_unref(res
->image
);
1338 resource_id
= qemu_get_be32(f
);
1341 /* load & apply scanout state */
1342 vmstate_load_state(f
, &vmstate_virtio_gpu_scanouts
, g
, 1);
1347 static int virtio_gpu_blob_save(QEMUFile
*f
, void *opaque
, size_t size
,
1348 const VMStateField
*field
, JSONWriter
*vmdesc
)
1350 VirtIOGPU
*g
= opaque
;
1351 struct virtio_gpu_simple_resource
*res
;
1354 /* in 2d mode we should never find unprocessed commands here */
1355 assert(QTAILQ_EMPTY(&g
->cmdq
));
1357 QTAILQ_FOREACH(res
, &g
->reslist
, next
) {
1358 if (!res
->blob_size
) {
1361 assert(!res
->image
);
1362 qemu_put_be32(f
, res
->resource_id
);
1363 qemu_put_be32(f
, res
->blob_size
);
1364 qemu_put_be32(f
, res
->iov_cnt
);
1365 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1366 qemu_put_be64(f
, res
->addrs
[i
]);
1367 qemu_put_be32(f
, res
->iov
[i
].iov_len
);
1370 qemu_put_be32(f
, 0); /* end of list */
1375 static int virtio_gpu_blob_load(QEMUFile
*f
, void *opaque
, size_t size
,
1376 const VMStateField
*field
)
1378 VirtIOGPU
*g
= opaque
;
1379 struct virtio_gpu_simple_resource
*res
;
1380 uint32_t resource_id
;
1383 resource_id
= qemu_get_be32(f
);
1384 while (resource_id
!= 0) {
1385 res
= virtio_gpu_find_resource(g
, resource_id
);
1390 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
1391 res
->resource_id
= resource_id
;
1392 res
->blob_size
= qemu_get_be32(f
);
1393 res
->iov_cnt
= qemu_get_be32(f
);
1394 res
->addrs
= g_new(uint64_t, res
->iov_cnt
);
1395 res
->iov
= g_new(struct iovec
, res
->iov_cnt
);
1398 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1399 res
->addrs
[i
] = qemu_get_be64(f
);
1400 res
->iov
[i
].iov_len
= qemu_get_be32(f
);
1403 if (!virtio_gpu_load_restore_mapping(g
, res
)) {
1408 virtio_gpu_init_udmabuf(res
);
1410 resource_id
= qemu_get_be32(f
);
1416 static int virtio_gpu_post_load(void *opaque
, int version_id
)
1418 VirtIOGPU
*g
= opaque
;
1419 struct virtio_gpu_scanout
*scanout
;
1420 struct virtio_gpu_simple_resource
*res
;
1423 for (i
= 0; i
< g
->parent_obj
.conf
.max_outputs
; i
++) {
1424 scanout
= &g
->parent_obj
.scanout
[i
];
1425 if (!scanout
->resource_id
) {
1429 res
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
1434 if (scanout
->fb
.format
!= 0) {
1436 struct virtio_gpu_rect r
= {
1439 .width
= scanout
->width
,
1440 .height
= scanout
->height
1443 if (!virtio_gpu_do_set_scanout(g
, i
, &scanout
->fb
, res
, &r
, &error
)) {
1447 /* legacy v1 migration support */
1451 scanout
->ds
= qemu_create_displaysurface_pixman(res
->image
);
1453 qemu_displaysurface_win32_set_handle(scanout
->ds
, res
->handle
, 0);
1455 dpy_gfx_replace_surface(scanout
->con
, scanout
->ds
);
1458 dpy_gfx_update_full(scanout
->con
);
1459 if (scanout
->cursor
.resource_id
) {
1460 update_cursor(g
, &scanout
->cursor
);
1462 res
->scanout_bitmask
|= (1 << i
);
1468 void virtio_gpu_device_realize(DeviceState
*qdev
, Error
**errp
)
1470 VirtIODevice
*vdev
= VIRTIO_DEVICE(qdev
);
1471 VirtIOGPU
*g
= VIRTIO_GPU(qdev
);
1473 if (virtio_gpu_blob_enabled(g
->parent_obj
.conf
)) {
1474 if (!virtio_gpu_rutabaga_enabled(g
->parent_obj
.conf
) &&
1475 !virtio_gpu_have_udmabuf()) {
1476 error_setg(errp
, "need rutabaga or udmabuf for blob resources");
1480 if (virtio_gpu_virgl_enabled(g
->parent_obj
.conf
)) {
1481 error_setg(errp
, "blobs and virgl are not compatible (yet)");
1486 if (!virtio_gpu_base_device_realize(qdev
,
1487 virtio_gpu_handle_ctrl_cb
,
1488 virtio_gpu_handle_cursor_cb
,
1493 g
->ctrl_vq
= virtio_get_queue(vdev
, 0);
1494 g
->cursor_vq
= virtio_get_queue(vdev
, 1);
1495 g
->ctrl_bh
= virtio_bh_new_guarded(qdev
, virtio_gpu_ctrl_bh
, g
);
1496 g
->cursor_bh
= virtio_bh_new_guarded(qdev
, virtio_gpu_cursor_bh
, g
);
1497 g
->reset_bh
= qemu_bh_new(virtio_gpu_reset_bh
, g
);
1498 qemu_cond_init(&g
->reset_cond
);
1499 QTAILQ_INIT(&g
->reslist
);
1500 QTAILQ_INIT(&g
->cmdq
);
1501 QTAILQ_INIT(&g
->fenceq
);
1504 static void virtio_gpu_device_unrealize(DeviceState
*qdev
)
1506 VirtIOGPU
*g
= VIRTIO_GPU(qdev
);
1508 g_clear_pointer(&g
->ctrl_bh
, qemu_bh_delete
);
1509 g_clear_pointer(&g
->cursor_bh
, qemu_bh_delete
);
1510 g_clear_pointer(&g
->reset_bh
, qemu_bh_delete
);
1511 qemu_cond_destroy(&g
->reset_cond
);
1512 virtio_gpu_base_device_unrealize(qdev
);
1515 static void virtio_gpu_reset_bh(void *opaque
)
1517 VirtIOGPU
*g
= VIRTIO_GPU(opaque
);
1518 VirtIOGPUClass
*vgc
= VIRTIO_GPU_GET_CLASS(g
);
1519 struct virtio_gpu_simple_resource
*res
, *tmp
;
1520 uint32_t resource_id
;
1521 Error
*local_err
= NULL
;
1524 QTAILQ_FOREACH_SAFE(res
, &g
->reslist
, next
, tmp
) {
1525 resource_id
= res
->resource_id
;
1526 vgc
->resource_destroy(g
, res
, &local_err
);
1528 error_append_hint(&local_err
, "%s: %s resource_destroy"
1529 "for resource_id = %"PRIu32
" failed.\n",
1530 __func__
, object_get_typename(OBJECT(g
)),
1532 /* error_report_err frees the error object for us */
1533 error_report_err(local_err
);
1538 for (i
= 0; i
< g
->parent_obj
.conf
.max_outputs
; i
++) {
1539 dpy_gfx_replace_surface(g
->parent_obj
.scanout
[i
].con
, NULL
);
1542 g
->reset_finished
= true;
1543 qemu_cond_signal(&g
->reset_cond
);
1546 void virtio_gpu_reset(VirtIODevice
*vdev
)
1548 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
1549 struct virtio_gpu_ctrl_command
*cmd
;
1551 if (qemu_in_vcpu_thread()) {
1552 g
->reset_finished
= false;
1553 qemu_bh_schedule(g
->reset_bh
);
1554 while (!g
->reset_finished
) {
1555 qemu_cond_wait_bql(&g
->reset_cond
);
1558 aio_bh_call(g
->reset_bh
);
1561 while (!QTAILQ_EMPTY(&g
->cmdq
)) {
1562 cmd
= QTAILQ_FIRST(&g
->cmdq
);
1563 QTAILQ_REMOVE(&g
->cmdq
, cmd
, next
);
1567 while (!QTAILQ_EMPTY(&g
->fenceq
)) {
1568 cmd
= QTAILQ_FIRST(&g
->fenceq
);
1569 QTAILQ_REMOVE(&g
->fenceq
, cmd
, next
);
1574 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev
));
1578 virtio_gpu_get_config(VirtIODevice
*vdev
, uint8_t *config
)
1580 VirtIOGPUBase
*g
= VIRTIO_GPU_BASE(vdev
);
1582 memcpy(config
, &g
->virtio_config
, sizeof(g
->virtio_config
));
1586 virtio_gpu_set_config(VirtIODevice
*vdev
, const uint8_t *config
)
1588 VirtIOGPUBase
*g
= VIRTIO_GPU_BASE(vdev
);
1589 const struct virtio_gpu_config
*vgconfig
=
1590 (const struct virtio_gpu_config
*)config
;
1592 if (vgconfig
->events_clear
) {
1593 g
->virtio_config
.events_read
&= ~vgconfig
->events_clear
;
1597 static bool virtio_gpu_blob_state_needed(void *opaque
)
1599 VirtIOGPU
*g
= VIRTIO_GPU(opaque
);
1601 return virtio_gpu_blob_enabled(g
->parent_obj
.conf
);
1604 const VMStateDescription vmstate_virtio_gpu_blob_state
= {
1605 .name
= "virtio-gpu/blob",
1606 .minimum_version_id
= VIRTIO_GPU_VM_VERSION
,
1607 .version_id
= VIRTIO_GPU_VM_VERSION
,
1608 .needed
= virtio_gpu_blob_state_needed
,
1609 .fields
= (const VMStateField
[]){
1611 .name
= "virtio-gpu/blob",
1612 .info
= &(const VMStateInfo
) {
1614 .get
= virtio_gpu_blob_load
,
1615 .put
= virtio_gpu_blob_save
,
1617 .flags
= VMS_SINGLE
,
1619 VMSTATE_END_OF_LIST()
1624 * For historical reasons virtio_gpu does not adhere to virtio migration
1625 * scheme as described in doc/virtio-migration.txt, in a sense that no
1626 * save/load callback are provided to the core. Instead the device data
1627 * is saved/loaded after the core data.
1629 * Because of this we need a special vmsd.
1631 static const VMStateDescription vmstate_virtio_gpu
= {
1632 .name
= "virtio-gpu",
1633 .minimum_version_id
= VIRTIO_GPU_VM_VERSION
,
1634 .version_id
= VIRTIO_GPU_VM_VERSION
,
1635 .fields
= (const VMStateField
[]) {
1636 VMSTATE_VIRTIO_DEVICE
/* core */,
1638 .name
= "virtio-gpu",
1639 .info
= &(const VMStateInfo
) {
1640 .name
= "virtio-gpu",
1641 .get
= virtio_gpu_load
,
1642 .put
= virtio_gpu_save
,
1644 .flags
= VMS_SINGLE
,
1646 VMSTATE_END_OF_LIST()
1648 .subsections
= (const VMStateDescription
* const []) {
1649 &vmstate_virtio_gpu_blob_state
,
1652 .post_load
= virtio_gpu_post_load
,
1655 static Property virtio_gpu_properties
[] = {
1656 VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU
, parent_obj
.conf
),
1657 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU
, conf_max_hostmem
,
1659 DEFINE_PROP_BIT("blob", VirtIOGPU
, parent_obj
.conf
.flags
,
1660 VIRTIO_GPU_FLAG_BLOB_ENABLED
, false),
1661 DEFINE_PROP_SIZE("hostmem", VirtIOGPU
, parent_obj
.conf
.hostmem
, 0),
1662 DEFINE_PROP_END_OF_LIST(),
1665 static void virtio_gpu_class_init(ObjectClass
*klass
, void *data
)
1667 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1668 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
1669 VirtIOGPUClass
*vgc
= VIRTIO_GPU_CLASS(klass
);
1670 VirtIOGPUBaseClass
*vgbc
= &vgc
->parent
;
1672 vgc
->handle_ctrl
= virtio_gpu_handle_ctrl
;
1673 vgc
->process_cmd
= virtio_gpu_simple_process_cmd
;
1674 vgc
->update_cursor_data
= virtio_gpu_update_cursor_data
;
1675 vgc
->resource_destroy
= virtio_gpu_resource_destroy
;
1676 vgbc
->gl_flushed
= virtio_gpu_handle_gl_flushed
;
1678 vdc
->realize
= virtio_gpu_device_realize
;
1679 vdc
->unrealize
= virtio_gpu_device_unrealize
;
1680 vdc
->reset
= virtio_gpu_reset
;
1681 vdc
->get_config
= virtio_gpu_get_config
;
1682 vdc
->set_config
= virtio_gpu_set_config
;
1684 dc
->vmsd
= &vmstate_virtio_gpu
;
1685 device_class_set_props(dc
, virtio_gpu_properties
);
1688 static const TypeInfo virtio_gpu_info
= {
1689 .name
= TYPE_VIRTIO_GPU
,
1690 .parent
= TYPE_VIRTIO_GPU_BASE
,
1691 .instance_size
= sizeof(VirtIOGPU
),
1692 .class_size
= sizeof(VirtIOGPUClass
),
1693 .class_init
= virtio_gpu_class_init
,
1695 module_obj(TYPE_VIRTIO_GPU
);
1696 module_kconfig(VIRTIO_GPU
);
1698 static void virtio_register_types(void)
1700 type_register_static(&virtio_gpu_info
);
1703 type_init(virtio_register_types
)