4 * Copyright Red Hat, Inc. 2013-2014
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/units.h"
17 #include "ui/console.h"
19 #include "sysemu/dma.h"
20 #include "sysemu/sysemu.h"
21 #include "hw/virtio/virtio.h"
22 #include "migration/qemu-file-types.h"
23 #include "hw/virtio/virtio-gpu.h"
24 #include "hw/virtio/virtio-gpu-bswap.h"
25 #include "hw/virtio/virtio-gpu-pixman.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "hw/display/edid.h"
28 #include "hw/qdev-properties.h"
30 #include "qemu/module.h"
31 #include "qapi/error.h"
32 #include "qemu/error-report.h"
34 #define VIRTIO_GPU_VM_VERSION 1
36 static struct virtio_gpu_simple_resource
*
37 virtio_gpu_find_resource(VirtIOGPU
*g
, uint32_t resource_id
);
38 static struct virtio_gpu_simple_resource
*
39 virtio_gpu_find_check_resource(VirtIOGPU
*g
, uint32_t resource_id
,
41 const char *caller
, uint32_t *error
);
43 static void virtio_gpu_cleanup_mapping(VirtIOGPU
*g
,
44 struct virtio_gpu_simple_resource
*res
);
46 void virtio_gpu_update_cursor_data(VirtIOGPU
*g
,
47 struct virtio_gpu_scanout
*s
,
50 struct virtio_gpu_simple_resource
*res
;
54 res
= virtio_gpu_find_check_resource(g
, resource_id
, false,
61 if (res
->blob_size
< (s
->current_cursor
->width
*
62 s
->current_cursor
->height
* 4)) {
67 if (pixman_image_get_width(res
->image
) != s
->current_cursor
->width
||
68 pixman_image_get_height(res
->image
) != s
->current_cursor
->height
) {
71 data
= pixman_image_get_data(res
->image
);
74 pixels
= s
->current_cursor
->width
* s
->current_cursor
->height
;
75 memcpy(s
->current_cursor
->data
, data
,
76 pixels
* sizeof(uint32_t));
79 static void update_cursor(VirtIOGPU
*g
, struct virtio_gpu_update_cursor
*cursor
)
81 struct virtio_gpu_scanout
*s
;
82 VirtIOGPUClass
*vgc
= VIRTIO_GPU_GET_CLASS(g
);
83 bool move
= cursor
->hdr
.type
== VIRTIO_GPU_CMD_MOVE_CURSOR
;
85 if (cursor
->pos
.scanout_id
>= g
->parent_obj
.conf
.max_outputs
) {
88 s
= &g
->parent_obj
.scanout
[cursor
->pos
.scanout_id
];
90 trace_virtio_gpu_update_cursor(cursor
->pos
.scanout_id
,
93 move
? "move" : "update",
97 if (!s
->current_cursor
) {
98 s
->current_cursor
= cursor_alloc(64, 64);
101 s
->current_cursor
->hot_x
= cursor
->hot_x
;
102 s
->current_cursor
->hot_y
= cursor
->hot_y
;
104 if (cursor
->resource_id
> 0) {
105 vgc
->update_cursor_data(g
, s
, cursor
->resource_id
);
107 dpy_cursor_define(s
->con
, s
->current_cursor
);
111 s
->cursor
.pos
.x
= cursor
->pos
.x
;
112 s
->cursor
.pos
.y
= cursor
->pos
.y
;
114 dpy_mouse_set(s
->con
, cursor
->pos
.x
, cursor
->pos
.y
,
115 cursor
->resource_id
? 1 : 0);
118 static struct virtio_gpu_simple_resource
*
119 virtio_gpu_find_resource(VirtIOGPU
*g
, uint32_t resource_id
)
121 struct virtio_gpu_simple_resource
*res
;
123 QTAILQ_FOREACH(res
, &g
->reslist
, next
) {
124 if (res
->resource_id
== resource_id
) {
131 static struct virtio_gpu_simple_resource
*
132 virtio_gpu_find_check_resource(VirtIOGPU
*g
, uint32_t resource_id
,
133 bool require_backing
,
134 const char *caller
, uint32_t *error
)
136 struct virtio_gpu_simple_resource
*res
;
138 res
= virtio_gpu_find_resource(g
, resource_id
);
140 qemu_log_mask(LOG_GUEST_ERROR
, "%s: invalid resource specified %d\n",
141 caller
, resource_id
);
143 *error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
148 if (require_backing
) {
149 if (!res
->iov
|| (!res
->image
&& !res
->blob
)) {
150 qemu_log_mask(LOG_GUEST_ERROR
, "%s: no backing storage %d\n",
151 caller
, resource_id
);
153 *error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
162 void virtio_gpu_ctrl_response(VirtIOGPU
*g
,
163 struct virtio_gpu_ctrl_command
*cmd
,
164 struct virtio_gpu_ctrl_hdr
*resp
,
169 if (cmd
->cmd_hdr
.flags
& VIRTIO_GPU_FLAG_FENCE
) {
170 resp
->flags
|= VIRTIO_GPU_FLAG_FENCE
;
171 resp
->fence_id
= cmd
->cmd_hdr
.fence_id
;
172 resp
->ctx_id
= cmd
->cmd_hdr
.ctx_id
;
174 virtio_gpu_ctrl_hdr_bswap(resp
);
175 s
= iov_from_buf(cmd
->elem
.in_sg
, cmd
->elem
.in_num
, 0, resp
, resp_len
);
177 qemu_log_mask(LOG_GUEST_ERROR
,
178 "%s: response size incorrect %zu vs %zu\n",
179 __func__
, s
, resp_len
);
181 virtqueue_push(cmd
->vq
, &cmd
->elem
, s
);
182 virtio_notify(VIRTIO_DEVICE(g
), cmd
->vq
);
183 cmd
->finished
= true;
186 void virtio_gpu_ctrl_response_nodata(VirtIOGPU
*g
,
187 struct virtio_gpu_ctrl_command
*cmd
,
188 enum virtio_gpu_ctrl_type type
)
190 struct virtio_gpu_ctrl_hdr resp
;
192 memset(&resp
, 0, sizeof(resp
));
194 virtio_gpu_ctrl_response(g
, cmd
, &resp
, sizeof(resp
));
197 void virtio_gpu_get_display_info(VirtIOGPU
*g
,
198 struct virtio_gpu_ctrl_command
*cmd
)
200 struct virtio_gpu_resp_display_info display_info
;
202 trace_virtio_gpu_cmd_get_display_info();
203 memset(&display_info
, 0, sizeof(display_info
));
204 display_info
.hdr
.type
= VIRTIO_GPU_RESP_OK_DISPLAY_INFO
;
205 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g
), &display_info
);
206 virtio_gpu_ctrl_response(g
, cmd
, &display_info
.hdr
,
207 sizeof(display_info
));
211 virtio_gpu_generate_edid(VirtIOGPU
*g
, int scanout
,
212 struct virtio_gpu_resp_edid
*edid
)
214 VirtIOGPUBase
*b
= VIRTIO_GPU_BASE(g
);
215 qemu_edid_info info
= {
216 .width_mm
= b
->req_state
[scanout
].width_mm
,
217 .height_mm
= b
->req_state
[scanout
].height_mm
,
218 .prefx
= b
->req_state
[scanout
].width
,
219 .prefy
= b
->req_state
[scanout
].height
,
222 edid
->size
= cpu_to_le32(sizeof(edid
->edid
));
223 qemu_edid_generate(edid
->edid
, sizeof(edid
->edid
), &info
);
226 void virtio_gpu_get_edid(VirtIOGPU
*g
,
227 struct virtio_gpu_ctrl_command
*cmd
)
229 struct virtio_gpu_resp_edid edid
;
230 struct virtio_gpu_cmd_get_edid get_edid
;
231 VirtIOGPUBase
*b
= VIRTIO_GPU_BASE(g
);
233 VIRTIO_GPU_FILL_CMD(get_edid
);
234 virtio_gpu_bswap_32(&get_edid
, sizeof(get_edid
));
236 if (get_edid
.scanout
>= b
->conf
.max_outputs
) {
237 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
241 trace_virtio_gpu_cmd_get_edid(get_edid
.scanout
);
242 memset(&edid
, 0, sizeof(edid
));
243 edid
.hdr
.type
= VIRTIO_GPU_RESP_OK_EDID
;
244 virtio_gpu_generate_edid(g
, get_edid
.scanout
, &edid
);
245 virtio_gpu_ctrl_response(g
, cmd
, &edid
.hdr
, sizeof(edid
));
248 static uint32_t calc_image_hostmem(pixman_format_code_t pformat
,
249 uint32_t width
, uint32_t height
)
251 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check.
252 * pixman_image_create_bits will fail in case it overflow.
255 int bpp
= PIXMAN_FORMAT_BPP(pformat
);
256 int stride
= ((width
* bpp
+ 0x1f) >> 5) * sizeof(uint32_t);
257 return height
* stride
;
260 static void virtio_gpu_resource_create_2d(VirtIOGPU
*g
,
261 struct virtio_gpu_ctrl_command
*cmd
)
263 pixman_format_code_t pformat
;
264 struct virtio_gpu_simple_resource
*res
;
265 struct virtio_gpu_resource_create_2d c2d
;
267 VIRTIO_GPU_FILL_CMD(c2d
);
268 virtio_gpu_bswap_32(&c2d
, sizeof(c2d
));
269 trace_virtio_gpu_cmd_res_create_2d(c2d
.resource_id
, c2d
.format
,
270 c2d
.width
, c2d
.height
);
272 if (c2d
.resource_id
== 0) {
273 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource id 0 is not allowed\n",
275 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
279 res
= virtio_gpu_find_resource(g
, c2d
.resource_id
);
281 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource already exists %d\n",
282 __func__
, c2d
.resource_id
);
283 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
287 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
289 res
->width
= c2d
.width
;
290 res
->height
= c2d
.height
;
291 res
->format
= c2d
.format
;
292 res
->resource_id
= c2d
.resource_id
;
294 pformat
= virtio_gpu_get_pixman_format(c2d
.format
);
296 qemu_log_mask(LOG_GUEST_ERROR
,
297 "%s: host couldn't handle guest format %d\n",
298 __func__
, c2d
.format
);
300 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
304 res
->hostmem
= calc_image_hostmem(pformat
, c2d
.width
, c2d
.height
);
305 if (res
->hostmem
+ g
->hostmem
< g
->conf_max_hostmem
) {
306 res
->image
= pixman_image_create_bits(pformat
,
313 qemu_log_mask(LOG_GUEST_ERROR
,
314 "%s: resource creation failed %d %d %d\n",
315 __func__
, c2d
.resource_id
, c2d
.width
, c2d
.height
);
317 cmd
->error
= VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY
;
321 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
322 g
->hostmem
+= res
->hostmem
;
325 static void virtio_gpu_resource_create_blob(VirtIOGPU
*g
,
326 struct virtio_gpu_ctrl_command
*cmd
)
328 struct virtio_gpu_simple_resource
*res
;
329 struct virtio_gpu_resource_create_blob cblob
;
332 VIRTIO_GPU_FILL_CMD(cblob
);
333 virtio_gpu_create_blob_bswap(&cblob
);
334 trace_virtio_gpu_cmd_res_create_blob(cblob
.resource_id
, cblob
.size
);
336 if (cblob
.resource_id
== 0) {
337 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource id 0 is not allowed\n",
339 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
343 if (cblob
.blob_mem
!= VIRTIO_GPU_BLOB_MEM_GUEST
&&
344 cblob
.blob_flags
!= VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE
) {
345 qemu_log_mask(LOG_GUEST_ERROR
, "%s: invalid memory type\n",
347 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
351 if (virtio_gpu_find_resource(g
, cblob
.resource_id
)) {
352 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource already exists %d\n",
353 __func__
, cblob
.resource_id
);
354 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
358 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
359 res
->resource_id
= cblob
.resource_id
;
360 res
->blob_size
= cblob
.size
;
362 ret
= virtio_gpu_create_mapping_iov(g
, cblob
.nr_entries
, sizeof(cblob
),
363 cmd
, &res
->addrs
, &res
->iov
,
366 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
371 virtio_gpu_init_udmabuf(res
);
372 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
375 static void virtio_gpu_disable_scanout(VirtIOGPU
*g
, int scanout_id
)
377 struct virtio_gpu_scanout
*scanout
= &g
->parent_obj
.scanout
[scanout_id
];
378 struct virtio_gpu_simple_resource
*res
;
380 if (scanout
->resource_id
== 0) {
384 res
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
386 res
->scanout_bitmask
&= ~(1 << scanout_id
);
389 dpy_gfx_replace_surface(scanout
->con
, NULL
);
390 scanout
->resource_id
= 0;
396 static void virtio_gpu_resource_destroy(VirtIOGPU
*g
,
397 struct virtio_gpu_simple_resource
*res
)
401 if (res
->scanout_bitmask
) {
402 for (i
= 0; i
< g
->parent_obj
.conf
.max_outputs
; i
++) {
403 if (res
->scanout_bitmask
& (1 << i
)) {
404 virtio_gpu_disable_scanout(g
, i
);
409 qemu_pixman_image_unref(res
->image
);
410 virtio_gpu_cleanup_mapping(g
, res
);
411 QTAILQ_REMOVE(&g
->reslist
, res
, next
);
412 g
->hostmem
-= res
->hostmem
;
416 static void virtio_gpu_resource_unref(VirtIOGPU
*g
,
417 struct virtio_gpu_ctrl_command
*cmd
)
419 struct virtio_gpu_simple_resource
*res
;
420 struct virtio_gpu_resource_unref unref
;
422 VIRTIO_GPU_FILL_CMD(unref
);
423 virtio_gpu_bswap_32(&unref
, sizeof(unref
));
424 trace_virtio_gpu_cmd_res_unref(unref
.resource_id
);
426 res
= virtio_gpu_find_resource(g
, unref
.resource_id
);
428 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
429 __func__
, unref
.resource_id
);
430 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
433 virtio_gpu_resource_destroy(g
, res
);
436 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU
*g
,
437 struct virtio_gpu_ctrl_command
*cmd
)
439 struct virtio_gpu_simple_resource
*res
;
441 uint32_t src_offset
, dst_offset
, stride
;
443 pixman_format_code_t format
;
444 struct virtio_gpu_transfer_to_host_2d t2d
;
446 VIRTIO_GPU_FILL_CMD(t2d
);
447 virtio_gpu_t2d_bswap(&t2d
);
448 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d
.resource_id
);
450 res
= virtio_gpu_find_check_resource(g
, t2d
.resource_id
, true,
451 __func__
, &cmd
->error
);
452 if (!res
|| res
->blob
) {
456 if (t2d
.r
.x
> res
->width
||
457 t2d
.r
.y
> res
->height
||
458 t2d
.r
.width
> res
->width
||
459 t2d
.r
.height
> res
->height
||
460 t2d
.r
.x
+ t2d
.r
.width
> res
->width
||
461 t2d
.r
.y
+ t2d
.r
.height
> res
->height
) {
462 qemu_log_mask(LOG_GUEST_ERROR
, "%s: transfer bounds outside resource"
463 " bounds for resource %d: %d %d %d %d vs %d %d\n",
464 __func__
, t2d
.resource_id
, t2d
.r
.x
, t2d
.r
.y
,
465 t2d
.r
.width
, t2d
.r
.height
, res
->width
, res
->height
);
466 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
470 format
= pixman_image_get_format(res
->image
);
471 bpp
= DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format
), 8);
472 stride
= pixman_image_get_stride(res
->image
);
474 if (t2d
.offset
|| t2d
.r
.x
|| t2d
.r
.y
||
475 t2d
.r
.width
!= pixman_image_get_width(res
->image
)) {
476 void *img_data
= pixman_image_get_data(res
->image
);
477 for (h
= 0; h
< t2d
.r
.height
; h
++) {
478 src_offset
= t2d
.offset
+ stride
* h
;
479 dst_offset
= (t2d
.r
.y
+ h
) * stride
+ (t2d
.r
.x
* bpp
);
481 iov_to_buf(res
->iov
, res
->iov_cnt
, src_offset
,
483 + dst_offset
, t2d
.r
.width
* bpp
);
486 iov_to_buf(res
->iov
, res
->iov_cnt
, 0,
487 pixman_image_get_data(res
->image
),
488 pixman_image_get_stride(res
->image
)
489 * pixman_image_get_height(res
->image
));
493 static void virtio_gpu_resource_flush(VirtIOGPU
*g
,
494 struct virtio_gpu_ctrl_command
*cmd
)
496 struct virtio_gpu_simple_resource
*res
;
497 struct virtio_gpu_resource_flush rf
;
498 struct virtio_gpu_scanout
*scanout
;
499 pixman_region16_t flush_region
;
502 VIRTIO_GPU_FILL_CMD(rf
);
503 virtio_gpu_bswap_32(&rf
, sizeof(rf
));
504 trace_virtio_gpu_cmd_res_flush(rf
.resource_id
,
505 rf
.r
.width
, rf
.r
.height
, rf
.r
.x
, rf
.r
.y
);
507 res
= virtio_gpu_find_check_resource(g
, rf
.resource_id
, false,
508 __func__
, &cmd
->error
);
514 for (i
= 0; i
< g
->parent_obj
.conf
.max_outputs
; i
++) {
515 scanout
= &g
->parent_obj
.scanout
[i
];
516 if (scanout
->resource_id
== res
->resource_id
&&
517 console_has_gl(scanout
->con
)) {
518 dpy_gl_update(scanout
->con
, 0, 0, scanout
->width
,
526 (rf
.r
.x
> res
->width
||
527 rf
.r
.y
> res
->height
||
528 rf
.r
.width
> res
->width
||
529 rf
.r
.height
> res
->height
||
530 rf
.r
.x
+ rf
.r
.width
> res
->width
||
531 rf
.r
.y
+ rf
.r
.height
> res
->height
)) {
532 qemu_log_mask(LOG_GUEST_ERROR
, "%s: flush bounds outside resource"
533 " bounds for resource %d: %d %d %d %d vs %d %d\n",
534 __func__
, rf
.resource_id
, rf
.r
.x
, rf
.r
.y
,
535 rf
.r
.width
, rf
.r
.height
, res
->width
, res
->height
);
536 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
540 pixman_region_init_rect(&flush_region
,
541 rf
.r
.x
, rf
.r
.y
, rf
.r
.width
, rf
.r
.height
);
542 for (i
= 0; i
< g
->parent_obj
.conf
.max_outputs
; i
++) {
543 pixman_region16_t region
, finalregion
;
544 pixman_box16_t
*extents
;
546 if (!(res
->scanout_bitmask
& (1 << i
))) {
549 scanout
= &g
->parent_obj
.scanout
[i
];
551 pixman_region_init(&finalregion
);
552 pixman_region_init_rect(®ion
, scanout
->x
, scanout
->y
,
553 scanout
->width
, scanout
->height
);
555 pixman_region_intersect(&finalregion
, &flush_region
, ®ion
);
556 pixman_region_translate(&finalregion
, -scanout
->x
, -scanout
->y
);
557 extents
= pixman_region_extents(&finalregion
);
558 /* work out the area we need to update for each console */
559 dpy_gfx_update(g
->parent_obj
.scanout
[i
].con
,
560 extents
->x1
, extents
->y1
,
561 extents
->x2
- extents
->x1
,
562 extents
->y2
- extents
->y1
);
564 pixman_region_fini(®ion
);
565 pixman_region_fini(&finalregion
);
567 pixman_region_fini(&flush_region
);
570 static void virtio_unref_resource(pixman_image_t
*image
, void *data
)
572 pixman_image_unref(data
);
575 static void virtio_gpu_update_scanout(VirtIOGPU
*g
,
577 struct virtio_gpu_simple_resource
*res
,
578 struct virtio_gpu_rect
*r
)
580 struct virtio_gpu_simple_resource
*ores
;
581 struct virtio_gpu_scanout
*scanout
;
583 scanout
= &g
->parent_obj
.scanout
[scanout_id
];
584 ores
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
586 ores
->scanout_bitmask
&= ~(1 << scanout_id
);
589 res
->scanout_bitmask
|= (1 << scanout_id
);
590 scanout
->resource_id
= res
->resource_id
;
593 scanout
->width
= r
->width
;
594 scanout
->height
= r
->height
;
597 static void virtio_gpu_do_set_scanout(VirtIOGPU
*g
,
599 struct virtio_gpu_framebuffer
*fb
,
600 struct virtio_gpu_simple_resource
*res
,
601 struct virtio_gpu_rect
*r
,
604 struct virtio_gpu_scanout
*scanout
;
607 scanout
= &g
->parent_obj
.scanout
[scanout_id
];
609 if (r
->x
> fb
->width
||
613 r
->width
> fb
->width
||
614 r
->height
> fb
->height
||
615 r
->x
+ r
->width
> fb
->width
||
616 r
->y
+ r
->height
> fb
->height
) {
617 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout %d bounds for"
618 " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n",
619 __func__
, scanout_id
, res
->resource_id
,
620 r
->x
, r
->y
, r
->width
, r
->height
,
621 fb
->width
, fb
->height
);
622 *error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
626 g
->parent_obj
.enable
= 1;
629 if (console_has_gl(scanout
->con
)) {
630 if (!virtio_gpu_update_dmabuf(g
, scanout_id
, res
, fb
, r
)) {
631 virtio_gpu_update_scanout(g
, scanout_id
, res
, r
);
638 data
= (uint8_t *)pixman_image_get_data(res
->image
);
641 /* create a surface for this scanout */
642 if ((res
->blob
&& !console_has_gl(scanout
->con
)) ||
644 surface_data(scanout
->ds
) != data
+ fb
->offset
||
645 scanout
->width
!= r
->width
||
646 scanout
->height
!= r
->height
) {
647 pixman_image_t
*rect
;
648 void *ptr
= data
+ fb
->offset
;
649 rect
= pixman_image_create_bits(fb
->format
, r
->width
, r
->height
,
653 pixman_image_ref(res
->image
);
654 pixman_image_set_destroy_function(rect
, virtio_unref_resource
,
658 /* realloc the surface ptr */
659 scanout
->ds
= qemu_create_displaysurface_pixman(rect
);
661 *error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
665 pixman_image_unref(rect
);
666 dpy_gfx_replace_surface(g
->parent_obj
.scanout
[scanout_id
].con
,
670 virtio_gpu_update_scanout(g
, scanout_id
, res
, r
);
673 static void virtio_gpu_set_scanout(VirtIOGPU
*g
,
674 struct virtio_gpu_ctrl_command
*cmd
)
676 struct virtio_gpu_simple_resource
*res
;
677 struct virtio_gpu_framebuffer fb
= { 0 };
678 struct virtio_gpu_set_scanout ss
;
680 VIRTIO_GPU_FILL_CMD(ss
);
681 virtio_gpu_bswap_32(&ss
, sizeof(ss
));
682 trace_virtio_gpu_cmd_set_scanout(ss
.scanout_id
, ss
.resource_id
,
683 ss
.r
.width
, ss
.r
.height
, ss
.r
.x
, ss
.r
.y
);
685 if (ss
.scanout_id
>= g
->parent_obj
.conf
.max_outputs
) {
686 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout id specified %d",
687 __func__
, ss
.scanout_id
);
688 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
692 if (ss
.resource_id
== 0) {
693 virtio_gpu_disable_scanout(g
, ss
.scanout_id
);
697 res
= virtio_gpu_find_check_resource(g
, ss
.resource_id
, true,
698 __func__
, &cmd
->error
);
703 fb
.format
= pixman_image_get_format(res
->image
);
704 fb
.bytes_pp
= DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb
.format
), 8);
705 fb
.width
= pixman_image_get_width(res
->image
);
706 fb
.height
= pixman_image_get_height(res
->image
);
707 fb
.stride
= pixman_image_get_stride(res
->image
);
708 fb
.offset
= ss
.r
.x
* fb
.bytes_pp
+ ss
.r
.y
* fb
.stride
;
710 virtio_gpu_do_set_scanout(g
, ss
.scanout_id
,
711 &fb
, res
, &ss
.r
, &cmd
->error
);
714 static void virtio_gpu_set_scanout_blob(VirtIOGPU
*g
,
715 struct virtio_gpu_ctrl_command
*cmd
)
717 struct virtio_gpu_simple_resource
*res
;
718 struct virtio_gpu_framebuffer fb
= { 0 };
719 struct virtio_gpu_set_scanout_blob ss
;
722 VIRTIO_GPU_FILL_CMD(ss
);
723 virtio_gpu_scanout_blob_bswap(&ss
);
724 trace_virtio_gpu_cmd_set_scanout_blob(ss
.scanout_id
, ss
.resource_id
,
725 ss
.r
.width
, ss
.r
.height
, ss
.r
.x
,
728 if (ss
.scanout_id
>= g
->parent_obj
.conf
.max_outputs
) {
729 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout id specified %d",
730 __func__
, ss
.scanout_id
);
731 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
735 if (ss
.resource_id
== 0) {
736 virtio_gpu_disable_scanout(g
, ss
.scanout_id
);
740 res
= virtio_gpu_find_check_resource(g
, ss
.resource_id
, true,
741 __func__
, &cmd
->error
);
746 fb
.format
= virtio_gpu_get_pixman_format(ss
.format
);
748 qemu_log_mask(LOG_GUEST_ERROR
,
749 "%s: host couldn't handle guest format %d\n",
750 __func__
, ss
.format
);
751 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
755 fb
.bytes_pp
= DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb
.format
), 8);
757 fb
.height
= ss
.height
;
758 fb
.stride
= ss
.strides
[0];
759 fb
.offset
= ss
.offsets
[0] + ss
.r
.x
* fb
.bytes_pp
+ ss
.r
.y
* fb
.stride
;
762 fbend
+= fb
.stride
* (ss
.r
.height
- 1);
763 fbend
+= fb
.bytes_pp
* ss
.r
.width
;
764 if (fbend
> res
->blob_size
) {
765 qemu_log_mask(LOG_GUEST_ERROR
,
766 "%s: fb end out of range\n",
768 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
772 virtio_gpu_do_set_scanout(g
, ss
.scanout_id
,
773 &fb
, res
, &ss
.r
, &cmd
->error
);
776 int virtio_gpu_create_mapping_iov(VirtIOGPU
*g
,
777 uint32_t nr_entries
, uint32_t offset
,
778 struct virtio_gpu_ctrl_command
*cmd
,
779 uint64_t **addr
, struct iovec
**iov
,
782 struct virtio_gpu_mem_entry
*ents
;
786 if (nr_entries
> 16384) {
787 qemu_log_mask(LOG_GUEST_ERROR
,
788 "%s: nr_entries is too big (%d > 16384)\n",
789 __func__
, nr_entries
);
793 esize
= sizeof(*ents
) * nr_entries
;
794 ents
= g_malloc(esize
);
795 s
= iov_to_buf(cmd
->elem
.out_sg
, cmd
->elem
.out_num
,
796 offset
, ents
, esize
);
798 qemu_log_mask(LOG_GUEST_ERROR
,
799 "%s: command data size incorrect %zu vs %zu\n",
809 for (e
= 0, v
= 0; e
< nr_entries
; e
++) {
810 uint64_t a
= le64_to_cpu(ents
[e
].addr
);
811 uint32_t l
= le32_to_cpu(ents
[e
].length
);
817 map
= dma_memory_map(VIRTIO_DEVICE(g
)->dma_as
, a
, &len
,
818 DMA_DIRECTION_TO_DEVICE
,
819 MEMTXATTRS_UNSPECIFIED
);
821 qemu_log_mask(LOG_GUEST_ERROR
, "%s: failed to map MMIO memory for"
822 " element %d\n", __func__
, e
);
823 virtio_gpu_cleanup_mapping_iov(g
, *iov
, v
);
834 *iov
= g_renew(struct iovec
, *iov
, v
+ 16);
836 *addr
= g_renew(uint64_t, *addr
, v
+ 16);
839 (*iov
)[v
].iov_base
= map
;
840 (*iov
)[v
].iov_len
= len
;
856 void virtio_gpu_cleanup_mapping_iov(VirtIOGPU
*g
,
857 struct iovec
*iov
, uint32_t count
)
861 for (i
= 0; i
< count
; i
++) {
862 dma_memory_unmap(VIRTIO_DEVICE(g
)->dma_as
,
863 iov
[i
].iov_base
, iov
[i
].iov_len
,
864 DMA_DIRECTION_TO_DEVICE
,
870 static void virtio_gpu_cleanup_mapping(VirtIOGPU
*g
,
871 struct virtio_gpu_simple_resource
*res
)
873 virtio_gpu_cleanup_mapping_iov(g
, res
->iov
, res
->iov_cnt
);
880 virtio_gpu_fini_udmabuf(res
);
885 virtio_gpu_resource_attach_backing(VirtIOGPU
*g
,
886 struct virtio_gpu_ctrl_command
*cmd
)
888 struct virtio_gpu_simple_resource
*res
;
889 struct virtio_gpu_resource_attach_backing ab
;
892 VIRTIO_GPU_FILL_CMD(ab
);
893 virtio_gpu_bswap_32(&ab
, sizeof(ab
));
894 trace_virtio_gpu_cmd_res_back_attach(ab
.resource_id
);
896 res
= virtio_gpu_find_resource(g
, ab
.resource_id
);
898 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
899 __func__
, ab
.resource_id
);
900 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
905 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
909 ret
= virtio_gpu_create_mapping_iov(g
, ab
.nr_entries
, sizeof(ab
), cmd
,
910 &res
->addrs
, &res
->iov
, &res
->iov_cnt
);
912 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
918 virtio_gpu_resource_detach_backing(VirtIOGPU
*g
,
919 struct virtio_gpu_ctrl_command
*cmd
)
921 struct virtio_gpu_simple_resource
*res
;
922 struct virtio_gpu_resource_detach_backing detach
;
924 VIRTIO_GPU_FILL_CMD(detach
);
925 virtio_gpu_bswap_32(&detach
, sizeof(detach
));
926 trace_virtio_gpu_cmd_res_back_detach(detach
.resource_id
);
928 res
= virtio_gpu_find_check_resource(g
, detach
.resource_id
, true,
929 __func__
, &cmd
->error
);
933 virtio_gpu_cleanup_mapping(g
, res
);
936 void virtio_gpu_simple_process_cmd(VirtIOGPU
*g
,
937 struct virtio_gpu_ctrl_command
*cmd
)
939 VIRTIO_GPU_FILL_CMD(cmd
->cmd_hdr
);
940 virtio_gpu_ctrl_hdr_bswap(&cmd
->cmd_hdr
);
942 switch (cmd
->cmd_hdr
.type
) {
943 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO
:
944 virtio_gpu_get_display_info(g
, cmd
);
946 case VIRTIO_GPU_CMD_GET_EDID
:
947 virtio_gpu_get_edid(g
, cmd
);
949 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
:
950 virtio_gpu_resource_create_2d(g
, cmd
);
952 case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB
:
953 if (!virtio_gpu_blob_enabled(g
->parent_obj
.conf
)) {
954 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
957 virtio_gpu_resource_create_blob(g
, cmd
);
959 case VIRTIO_GPU_CMD_RESOURCE_UNREF
:
960 virtio_gpu_resource_unref(g
, cmd
);
962 case VIRTIO_GPU_CMD_RESOURCE_FLUSH
:
963 virtio_gpu_resource_flush(g
, cmd
);
965 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
:
966 virtio_gpu_transfer_to_host_2d(g
, cmd
);
968 case VIRTIO_GPU_CMD_SET_SCANOUT
:
969 virtio_gpu_set_scanout(g
, cmd
);
971 case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB
:
972 if (!virtio_gpu_blob_enabled(g
->parent_obj
.conf
)) {
973 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
976 virtio_gpu_set_scanout_blob(g
, cmd
);
978 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
:
979 virtio_gpu_resource_attach_backing(g
, cmd
);
981 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
:
982 virtio_gpu_resource_detach_backing(g
, cmd
);
985 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
988 if (!cmd
->finished
) {
989 if (!g
->parent_obj
.renderer_blocked
) {
990 virtio_gpu_ctrl_response_nodata(g
, cmd
, cmd
->error
? cmd
->error
:
991 VIRTIO_GPU_RESP_OK_NODATA
);
996 static void virtio_gpu_handle_ctrl_cb(VirtIODevice
*vdev
, VirtQueue
*vq
)
998 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
999 qemu_bh_schedule(g
->ctrl_bh
);
1002 static void virtio_gpu_handle_cursor_cb(VirtIODevice
*vdev
, VirtQueue
*vq
)
1004 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
1005 qemu_bh_schedule(g
->cursor_bh
);
1008 void virtio_gpu_process_cmdq(VirtIOGPU
*g
)
1010 struct virtio_gpu_ctrl_command
*cmd
;
1011 VirtIOGPUClass
*vgc
= VIRTIO_GPU_GET_CLASS(g
);
1013 if (g
->processing_cmdq
) {
1016 g
->processing_cmdq
= true;
1017 while (!QTAILQ_EMPTY(&g
->cmdq
)) {
1018 cmd
= QTAILQ_FIRST(&g
->cmdq
);
1020 if (g
->parent_obj
.renderer_blocked
) {
1024 /* process command */
1025 vgc
->process_cmd(g
, cmd
);
1027 QTAILQ_REMOVE(&g
->cmdq
, cmd
, next
);
1028 if (virtio_gpu_stats_enabled(g
->parent_obj
.conf
)) {
1029 g
->stats
.requests
++;
1032 if (!cmd
->finished
) {
1033 QTAILQ_INSERT_TAIL(&g
->fenceq
, cmd
, next
);
1035 if (virtio_gpu_stats_enabled(g
->parent_obj
.conf
)) {
1036 if (g
->stats
.max_inflight
< g
->inflight
) {
1037 g
->stats
.max_inflight
= g
->inflight
;
1039 fprintf(stderr
, "inflight: %3d (+)\r", g
->inflight
);
1045 g
->processing_cmdq
= false;
1048 static void virtio_gpu_process_fenceq(VirtIOGPU
*g
)
1050 struct virtio_gpu_ctrl_command
*cmd
, *tmp
;
1052 QTAILQ_FOREACH_SAFE(cmd
, &g
->fenceq
, next
, tmp
) {
1053 trace_virtio_gpu_fence_resp(cmd
->cmd_hdr
.fence_id
);
1054 virtio_gpu_ctrl_response_nodata(g
, cmd
, VIRTIO_GPU_RESP_OK_NODATA
);
1055 QTAILQ_REMOVE(&g
->fenceq
, cmd
, next
);
1058 if (virtio_gpu_stats_enabled(g
->parent_obj
.conf
)) {
1059 fprintf(stderr
, "inflight: %3d (-)\r", g
->inflight
);
1064 static void virtio_gpu_handle_gl_flushed(VirtIOGPUBase
*b
)
1066 VirtIOGPU
*g
= container_of(b
, VirtIOGPU
, parent_obj
);
1068 virtio_gpu_process_fenceq(g
);
1069 virtio_gpu_process_cmdq(g
);
1072 static void virtio_gpu_handle_ctrl(VirtIODevice
*vdev
, VirtQueue
*vq
)
1074 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
1075 struct virtio_gpu_ctrl_command
*cmd
;
1077 if (!virtio_queue_ready(vq
)) {
1081 cmd
= virtqueue_pop(vq
, sizeof(struct virtio_gpu_ctrl_command
));
1085 cmd
->finished
= false;
1086 QTAILQ_INSERT_TAIL(&g
->cmdq
, cmd
, next
);
1087 cmd
= virtqueue_pop(vq
, sizeof(struct virtio_gpu_ctrl_command
));
1090 virtio_gpu_process_cmdq(g
);
1093 static void virtio_gpu_ctrl_bh(void *opaque
)
1095 VirtIOGPU
*g
= opaque
;
1096 VirtIOGPUClass
*vgc
= VIRTIO_GPU_GET_CLASS(g
);
1098 vgc
->handle_ctrl(&g
->parent_obj
.parent_obj
, g
->ctrl_vq
);
1101 static void virtio_gpu_handle_cursor(VirtIODevice
*vdev
, VirtQueue
*vq
)
1103 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
1104 VirtQueueElement
*elem
;
1106 struct virtio_gpu_update_cursor cursor_info
;
1108 if (!virtio_queue_ready(vq
)) {
1112 elem
= virtqueue_pop(vq
, sizeof(VirtQueueElement
));
1117 s
= iov_to_buf(elem
->out_sg
, elem
->out_num
, 0,
1118 &cursor_info
, sizeof(cursor_info
));
1119 if (s
!= sizeof(cursor_info
)) {
1120 qemu_log_mask(LOG_GUEST_ERROR
,
1121 "%s: cursor size incorrect %zu vs %zu\n",
1122 __func__
, s
, sizeof(cursor_info
));
1124 virtio_gpu_bswap_32(&cursor_info
, sizeof(cursor_info
));
1125 update_cursor(g
, &cursor_info
);
1127 virtqueue_push(vq
, elem
, 0);
1128 virtio_notify(vdev
, vq
);
1133 static void virtio_gpu_cursor_bh(void *opaque
)
1135 VirtIOGPU
*g
= opaque
;
1136 virtio_gpu_handle_cursor(&g
->parent_obj
.parent_obj
, g
->cursor_vq
);
1139 static const VMStateDescription vmstate_virtio_gpu_scanout
= {
1140 .name
= "virtio-gpu-one-scanout",
1142 .fields
= (VMStateField
[]) {
1143 VMSTATE_UINT32(resource_id
, struct virtio_gpu_scanout
),
1144 VMSTATE_UINT32(width
, struct virtio_gpu_scanout
),
1145 VMSTATE_UINT32(height
, struct virtio_gpu_scanout
),
1146 VMSTATE_INT32(x
, struct virtio_gpu_scanout
),
1147 VMSTATE_INT32(y
, struct virtio_gpu_scanout
),
1148 VMSTATE_UINT32(cursor
.resource_id
, struct virtio_gpu_scanout
),
1149 VMSTATE_UINT32(cursor
.hot_x
, struct virtio_gpu_scanout
),
1150 VMSTATE_UINT32(cursor
.hot_y
, struct virtio_gpu_scanout
),
1151 VMSTATE_UINT32(cursor
.pos
.x
, struct virtio_gpu_scanout
),
1152 VMSTATE_UINT32(cursor
.pos
.y
, struct virtio_gpu_scanout
),
1153 VMSTATE_END_OF_LIST()
1157 static const VMStateDescription vmstate_virtio_gpu_scanouts
= {
1158 .name
= "virtio-gpu-scanouts",
1160 .fields
= (VMStateField
[]) {
1161 VMSTATE_INT32(parent_obj
.enable
, struct VirtIOGPU
),
1162 VMSTATE_UINT32_EQUAL(parent_obj
.conf
.max_outputs
,
1163 struct VirtIOGPU
, NULL
),
1164 VMSTATE_STRUCT_VARRAY_UINT32(parent_obj
.scanout
, struct VirtIOGPU
,
1165 parent_obj
.conf
.max_outputs
, 1,
1166 vmstate_virtio_gpu_scanout
,
1167 struct virtio_gpu_scanout
),
1168 VMSTATE_END_OF_LIST()
1172 static int virtio_gpu_save(QEMUFile
*f
, void *opaque
, size_t size
,
1173 const VMStateField
*field
, JSONWriter
*vmdesc
)
1175 VirtIOGPU
*g
= opaque
;
1176 struct virtio_gpu_simple_resource
*res
;
1179 /* in 2d mode we should never find unprocessed commands here */
1180 assert(QTAILQ_EMPTY(&g
->cmdq
));
1182 QTAILQ_FOREACH(res
, &g
->reslist
, next
) {
1183 qemu_put_be32(f
, res
->resource_id
);
1184 qemu_put_be32(f
, res
->width
);
1185 qemu_put_be32(f
, res
->height
);
1186 qemu_put_be32(f
, res
->format
);
1187 qemu_put_be32(f
, res
->iov_cnt
);
1188 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1189 qemu_put_be64(f
, res
->addrs
[i
]);
1190 qemu_put_be32(f
, res
->iov
[i
].iov_len
);
1192 qemu_put_buffer(f
, (void *)pixman_image_get_data(res
->image
),
1193 pixman_image_get_stride(res
->image
) * res
->height
);
1195 qemu_put_be32(f
, 0); /* end of list */
1197 return vmstate_save_state(f
, &vmstate_virtio_gpu_scanouts
, g
, NULL
);
1200 static int virtio_gpu_load(QEMUFile
*f
, void *opaque
, size_t size
,
1201 const VMStateField
*field
)
1203 VirtIOGPU
*g
= opaque
;
1204 struct virtio_gpu_simple_resource
*res
;
1205 struct virtio_gpu_scanout
*scanout
;
1206 uint32_t resource_id
, pformat
;
1211 resource_id
= qemu_get_be32(f
);
1212 while (resource_id
!= 0) {
1213 res
= virtio_gpu_find_resource(g
, resource_id
);
1218 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
1219 res
->resource_id
= resource_id
;
1220 res
->width
= qemu_get_be32(f
);
1221 res
->height
= qemu_get_be32(f
);
1222 res
->format
= qemu_get_be32(f
);
1223 res
->iov_cnt
= qemu_get_be32(f
);
1226 pformat
= virtio_gpu_get_pixman_format(res
->format
);
1231 res
->image
= pixman_image_create_bits(pformat
,
1232 res
->width
, res
->height
,
1239 res
->hostmem
= calc_image_hostmem(pformat
, res
->width
, res
->height
);
1241 res
->addrs
= g_new(uint64_t, res
->iov_cnt
);
1242 res
->iov
= g_new(struct iovec
, res
->iov_cnt
);
1245 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1246 res
->addrs
[i
] = qemu_get_be64(f
);
1247 res
->iov
[i
].iov_len
= qemu_get_be32(f
);
1249 qemu_get_buffer(f
, (void *)pixman_image_get_data(res
->image
),
1250 pixman_image_get_stride(res
->image
) * res
->height
);
1252 /* restore mapping */
1253 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1254 hwaddr len
= res
->iov
[i
].iov_len
;
1255 res
->iov
[i
].iov_base
=
1256 dma_memory_map(VIRTIO_DEVICE(g
)->dma_as
, res
->addrs
[i
], &len
,
1257 DMA_DIRECTION_TO_DEVICE
,
1258 MEMTXATTRS_UNSPECIFIED
);
1260 if (!res
->iov
[i
].iov_base
|| len
!= res
->iov
[i
].iov_len
) {
1261 /* Clean up the half-a-mapping we just created... */
1262 if (res
->iov
[i
].iov_base
) {
1263 dma_memory_unmap(VIRTIO_DEVICE(g
)->dma_as
,
1264 res
->iov
[i
].iov_base
,
1266 DMA_DIRECTION_TO_DEVICE
,
1269 /* ...and the mappings for previous loop iterations */
1271 virtio_gpu_cleanup_mapping(g
, res
);
1272 pixman_image_unref(res
->image
);
1278 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
1279 g
->hostmem
+= res
->hostmem
;
1281 resource_id
= qemu_get_be32(f
);
1284 /* load & apply scanout state */
1285 vmstate_load_state(f
, &vmstate_virtio_gpu_scanouts
, g
, 1);
1286 for (i
= 0; i
< g
->parent_obj
.conf
.max_outputs
; i
++) {
1287 scanout
= &g
->parent_obj
.scanout
[i
];
1288 if (!scanout
->resource_id
) {
1291 res
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
1295 scanout
->ds
= qemu_create_displaysurface_pixman(res
->image
);
1300 dpy_gfx_replace_surface(scanout
->con
, scanout
->ds
);
1301 dpy_gfx_update_full(scanout
->con
);
1302 if (scanout
->cursor
.resource_id
) {
1303 update_cursor(g
, &scanout
->cursor
);
1305 res
->scanout_bitmask
|= (1 << i
);
1311 void virtio_gpu_device_realize(DeviceState
*qdev
, Error
**errp
)
1313 VirtIODevice
*vdev
= VIRTIO_DEVICE(qdev
);
1314 VirtIOGPU
*g
= VIRTIO_GPU(qdev
);
1316 if (virtio_gpu_blob_enabled(g
->parent_obj
.conf
)) {
1317 if (!virtio_gpu_have_udmabuf()) {
1318 error_setg(errp
, "cannot enable blob resources without udmabuf");
1322 if (virtio_gpu_virgl_enabled(g
->parent_obj
.conf
)) {
1323 error_setg(errp
, "blobs and virgl are not compatible (yet)");
1328 if (!virtio_gpu_base_device_realize(qdev
,
1329 virtio_gpu_handle_ctrl_cb
,
1330 virtio_gpu_handle_cursor_cb
,
1335 g
->ctrl_vq
= virtio_get_queue(vdev
, 0);
1336 g
->cursor_vq
= virtio_get_queue(vdev
, 1);
1337 g
->ctrl_bh
= qemu_bh_new(virtio_gpu_ctrl_bh
, g
);
1338 g
->cursor_bh
= qemu_bh_new(virtio_gpu_cursor_bh
, g
);
1339 QTAILQ_INIT(&g
->reslist
);
1340 QTAILQ_INIT(&g
->cmdq
);
1341 QTAILQ_INIT(&g
->fenceq
);
1344 void virtio_gpu_reset(VirtIODevice
*vdev
)
1346 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
1347 struct virtio_gpu_simple_resource
*res
, *tmp
;
1348 struct virtio_gpu_ctrl_command
*cmd
;
1350 QTAILQ_FOREACH_SAFE(res
, &g
->reslist
, next
, tmp
) {
1351 virtio_gpu_resource_destroy(g
, res
);
1354 while (!QTAILQ_EMPTY(&g
->cmdq
)) {
1355 cmd
= QTAILQ_FIRST(&g
->cmdq
);
1356 QTAILQ_REMOVE(&g
->cmdq
, cmd
, next
);
1360 while (!QTAILQ_EMPTY(&g
->fenceq
)) {
1361 cmd
= QTAILQ_FIRST(&g
->fenceq
);
1362 QTAILQ_REMOVE(&g
->fenceq
, cmd
, next
);
1367 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev
));
1371 virtio_gpu_get_config(VirtIODevice
*vdev
, uint8_t *config
)
1373 VirtIOGPUBase
*g
= VIRTIO_GPU_BASE(vdev
);
1375 memcpy(config
, &g
->virtio_config
, sizeof(g
->virtio_config
));
1379 virtio_gpu_set_config(VirtIODevice
*vdev
, const uint8_t *config
)
1381 VirtIOGPUBase
*g
= VIRTIO_GPU_BASE(vdev
);
1382 const struct virtio_gpu_config
*vgconfig
=
1383 (const struct virtio_gpu_config
*)config
;
1385 if (vgconfig
->events_clear
) {
1386 g
->virtio_config
.events_read
&= ~vgconfig
->events_clear
;
1391 * For historical reasons virtio_gpu does not adhere to virtio migration
1392 * scheme as described in doc/virtio-migration.txt, in a sense that no
1393 * save/load callback are provided to the core. Instead the device data
1394 * is saved/loaded after the core data.
1396 * Because of this we need a special vmsd.
1398 static const VMStateDescription vmstate_virtio_gpu
= {
1399 .name
= "virtio-gpu",
1400 .minimum_version_id
= VIRTIO_GPU_VM_VERSION
,
1401 .version_id
= VIRTIO_GPU_VM_VERSION
,
1402 .fields
= (VMStateField
[]) {
1403 VMSTATE_VIRTIO_DEVICE
/* core */,
1405 .name
= "virtio-gpu",
1406 .info
= &(const VMStateInfo
) {
1407 .name
= "virtio-gpu",
1408 .get
= virtio_gpu_load
,
1409 .put
= virtio_gpu_save
,
1411 .flags
= VMS_SINGLE
,
1413 VMSTATE_END_OF_LIST()
1417 static Property virtio_gpu_properties
[] = {
1418 VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU
, parent_obj
.conf
),
1419 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU
, conf_max_hostmem
,
1421 DEFINE_PROP_BIT("blob", VirtIOGPU
, parent_obj
.conf
.flags
,
1422 VIRTIO_GPU_FLAG_BLOB_ENABLED
, false),
1423 DEFINE_PROP_END_OF_LIST(),
1426 static void virtio_gpu_class_init(ObjectClass
*klass
, void *data
)
1428 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1429 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
1430 VirtIOGPUClass
*vgc
= VIRTIO_GPU_CLASS(klass
);
1431 VirtIOGPUBaseClass
*vgbc
= &vgc
->parent
;
1433 vgc
->handle_ctrl
= virtio_gpu_handle_ctrl
;
1434 vgc
->process_cmd
= virtio_gpu_simple_process_cmd
;
1435 vgc
->update_cursor_data
= virtio_gpu_update_cursor_data
;
1436 vgbc
->gl_flushed
= virtio_gpu_handle_gl_flushed
;
1438 vdc
->realize
= virtio_gpu_device_realize
;
1439 vdc
->reset
= virtio_gpu_reset
;
1440 vdc
->get_config
= virtio_gpu_get_config
;
1441 vdc
->set_config
= virtio_gpu_set_config
;
1443 dc
->vmsd
= &vmstate_virtio_gpu
;
1444 device_class_set_props(dc
, virtio_gpu_properties
);
1447 static const TypeInfo virtio_gpu_info
= {
1448 .name
= TYPE_VIRTIO_GPU
,
1449 .parent
= TYPE_VIRTIO_GPU_BASE
,
1450 .instance_size
= sizeof(VirtIOGPU
),
1451 .class_size
= sizeof(VirtIOGPUClass
),
1452 .class_init
= virtio_gpu_class_init
,
1454 module_obj(TYPE_VIRTIO_GPU
);
1455 module_kconfig(VIRTIO_GPU
);
1457 static void virtio_register_types(void)
1459 type_register_static(&virtio_gpu_info
);
1462 type_init(virtio_register_types
)