4 * Copyright Red Hat, Inc. 2013-2014
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/units.h"
17 #include "ui/console.h"
19 #include "sysemu/dma.h"
20 #include "sysemu/sysemu.h"
21 #include "hw/virtio/virtio.h"
22 #include "migration/qemu-file-types.h"
23 #include "hw/virtio/virtio-gpu.h"
24 #include "hw/virtio/virtio-gpu-bswap.h"
25 #include "hw/virtio/virtio-gpu-pixman.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "hw/display/edid.h"
28 #include "hw/qdev-properties.h"
30 #include "qemu/module.h"
31 #include "qapi/error.h"
32 #include "qemu/error-report.h"
34 #define VIRTIO_GPU_VM_VERSION 1
36 static struct virtio_gpu_simple_resource
*
37 virtio_gpu_find_resource(VirtIOGPU
*g
, uint32_t resource_id
);
39 static void virtio_gpu_cleanup_mapping(VirtIOGPU
*g
,
40 struct virtio_gpu_simple_resource
*res
);
43 #include <virglrenderer.h>
44 #define VIRGL(_g, _virgl, _simple, ...) \
46 if (_g->parent_obj.use_virgl_renderer) { \
47 _virgl(__VA_ARGS__); \
49 _simple(__VA_ARGS__); \
53 #define VIRGL(_g, _virgl, _simple, ...) \
55 _simple(__VA_ARGS__); \
59 static void update_cursor_data_simple(VirtIOGPU
*g
,
60 struct virtio_gpu_scanout
*s
,
63 struct virtio_gpu_simple_resource
*res
;
66 res
= virtio_gpu_find_resource(g
, resource_id
);
71 if (pixman_image_get_width(res
->image
) != s
->current_cursor
->width
||
72 pixman_image_get_height(res
->image
) != s
->current_cursor
->height
) {
76 pixels
= s
->current_cursor
->width
* s
->current_cursor
->height
;
77 memcpy(s
->current_cursor
->data
,
78 pixman_image_get_data(res
->image
),
79 pixels
* sizeof(uint32_t));
84 static void update_cursor_data_virgl(VirtIOGPU
*g
,
85 struct virtio_gpu_scanout
*s
,
88 uint32_t width
, height
;
89 uint32_t pixels
, *data
;
91 data
= virgl_renderer_get_cursor_data(resource_id
, &width
, &height
);
96 if (width
!= s
->current_cursor
->width
||
97 height
!= s
->current_cursor
->height
) {
102 pixels
= s
->current_cursor
->width
* s
->current_cursor
->height
;
103 memcpy(s
->current_cursor
->data
, data
, pixels
* sizeof(uint32_t));
109 static void update_cursor(VirtIOGPU
*g
, struct virtio_gpu_update_cursor
*cursor
)
111 struct virtio_gpu_scanout
*s
;
112 bool move
= cursor
->hdr
.type
== VIRTIO_GPU_CMD_MOVE_CURSOR
;
114 if (cursor
->pos
.scanout_id
>= g
->parent_obj
.conf
.max_outputs
) {
117 s
= &g
->parent_obj
.scanout
[cursor
->pos
.scanout_id
];
119 trace_virtio_gpu_update_cursor(cursor
->pos
.scanout_id
,
122 move
? "move" : "update",
123 cursor
->resource_id
);
126 if (!s
->current_cursor
) {
127 s
->current_cursor
= cursor_alloc(64, 64);
130 s
->current_cursor
->hot_x
= cursor
->hot_x
;
131 s
->current_cursor
->hot_y
= cursor
->hot_y
;
133 if (cursor
->resource_id
> 0) {
134 VIRGL(g
, update_cursor_data_virgl
, update_cursor_data_simple
,
135 g
, s
, cursor
->resource_id
);
137 dpy_cursor_define(s
->con
, s
->current_cursor
);
141 s
->cursor
.pos
.x
= cursor
->pos
.x
;
142 s
->cursor
.pos
.y
= cursor
->pos
.y
;
144 dpy_mouse_set(s
->con
, cursor
->pos
.x
, cursor
->pos
.y
,
145 cursor
->resource_id
? 1 : 0);
148 static struct virtio_gpu_simple_resource
*
149 virtio_gpu_find_resource(VirtIOGPU
*g
, uint32_t resource_id
)
151 struct virtio_gpu_simple_resource
*res
;
153 QTAILQ_FOREACH(res
, &g
->reslist
, next
) {
154 if (res
->resource_id
== resource_id
) {
161 void virtio_gpu_ctrl_response(VirtIOGPU
*g
,
162 struct virtio_gpu_ctrl_command
*cmd
,
163 struct virtio_gpu_ctrl_hdr
*resp
,
168 if (cmd
->cmd_hdr
.flags
& VIRTIO_GPU_FLAG_FENCE
) {
169 resp
->flags
|= VIRTIO_GPU_FLAG_FENCE
;
170 resp
->fence_id
= cmd
->cmd_hdr
.fence_id
;
171 resp
->ctx_id
= cmd
->cmd_hdr
.ctx_id
;
173 virtio_gpu_ctrl_hdr_bswap(resp
);
174 s
= iov_from_buf(cmd
->elem
.in_sg
, cmd
->elem
.in_num
, 0, resp
, resp_len
);
176 qemu_log_mask(LOG_GUEST_ERROR
,
177 "%s: response size incorrect %zu vs %zu\n",
178 __func__
, s
, resp_len
);
180 virtqueue_push(cmd
->vq
, &cmd
->elem
, s
);
181 virtio_notify(VIRTIO_DEVICE(g
), cmd
->vq
);
182 cmd
->finished
= true;
185 void virtio_gpu_ctrl_response_nodata(VirtIOGPU
*g
,
186 struct virtio_gpu_ctrl_command
*cmd
,
187 enum virtio_gpu_ctrl_type type
)
189 struct virtio_gpu_ctrl_hdr resp
;
191 memset(&resp
, 0, sizeof(resp
));
193 virtio_gpu_ctrl_response(g
, cmd
, &resp
, sizeof(resp
));
196 void virtio_gpu_get_display_info(VirtIOGPU
*g
,
197 struct virtio_gpu_ctrl_command
*cmd
)
199 struct virtio_gpu_resp_display_info display_info
;
201 trace_virtio_gpu_cmd_get_display_info();
202 memset(&display_info
, 0, sizeof(display_info
));
203 display_info
.hdr
.type
= VIRTIO_GPU_RESP_OK_DISPLAY_INFO
;
204 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g
), &display_info
);
205 virtio_gpu_ctrl_response(g
, cmd
, &display_info
.hdr
,
206 sizeof(display_info
));
210 virtio_gpu_generate_edid(VirtIOGPU
*g
, int scanout
,
211 struct virtio_gpu_resp_edid
*edid
)
213 VirtIOGPUBase
*b
= VIRTIO_GPU_BASE(g
);
214 qemu_edid_info info
= {
215 .prefx
= b
->req_state
[scanout
].width
,
216 .prefy
= b
->req_state
[scanout
].height
,
219 edid
->size
= cpu_to_le32(sizeof(edid
->edid
));
220 qemu_edid_generate(edid
->edid
, sizeof(edid
->edid
), &info
);
223 void virtio_gpu_get_edid(VirtIOGPU
*g
,
224 struct virtio_gpu_ctrl_command
*cmd
)
226 struct virtio_gpu_resp_edid edid
;
227 struct virtio_gpu_cmd_get_edid get_edid
;
228 VirtIOGPUBase
*b
= VIRTIO_GPU_BASE(g
);
230 VIRTIO_GPU_FILL_CMD(get_edid
);
231 virtio_gpu_bswap_32(&get_edid
, sizeof(get_edid
));
233 if (get_edid
.scanout
>= b
->conf
.max_outputs
) {
234 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
238 trace_virtio_gpu_cmd_get_edid(get_edid
.scanout
);
239 memset(&edid
, 0, sizeof(edid
));
240 edid
.hdr
.type
= VIRTIO_GPU_RESP_OK_EDID
;
241 virtio_gpu_generate_edid(g
, get_edid
.scanout
, &edid
);
242 virtio_gpu_ctrl_response(g
, cmd
, &edid
.hdr
, sizeof(edid
));
245 static uint32_t calc_image_hostmem(pixman_format_code_t pformat
,
246 uint32_t width
, uint32_t height
)
248 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check.
249 * pixman_image_create_bits will fail in case it overflow.
252 int bpp
= PIXMAN_FORMAT_BPP(pformat
);
253 int stride
= ((width
* bpp
+ 0x1f) >> 5) * sizeof(uint32_t);
254 return height
* stride
;
257 static void virtio_gpu_resource_create_2d(VirtIOGPU
*g
,
258 struct virtio_gpu_ctrl_command
*cmd
)
260 pixman_format_code_t pformat
;
261 struct virtio_gpu_simple_resource
*res
;
262 struct virtio_gpu_resource_create_2d c2d
;
264 VIRTIO_GPU_FILL_CMD(c2d
);
265 virtio_gpu_bswap_32(&c2d
, sizeof(c2d
));
266 trace_virtio_gpu_cmd_res_create_2d(c2d
.resource_id
, c2d
.format
,
267 c2d
.width
, c2d
.height
);
269 if (c2d
.resource_id
== 0) {
270 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource id 0 is not allowed\n",
272 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
276 res
= virtio_gpu_find_resource(g
, c2d
.resource_id
);
278 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource already exists %d\n",
279 __func__
, c2d
.resource_id
);
280 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
284 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
286 res
->width
= c2d
.width
;
287 res
->height
= c2d
.height
;
288 res
->format
= c2d
.format
;
289 res
->resource_id
= c2d
.resource_id
;
291 pformat
= virtio_gpu_get_pixman_format(c2d
.format
);
293 qemu_log_mask(LOG_GUEST_ERROR
,
294 "%s: host couldn't handle guest format %d\n",
295 __func__
, c2d
.format
);
297 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
301 res
->hostmem
= calc_image_hostmem(pformat
, c2d
.width
, c2d
.height
);
302 if (res
->hostmem
+ g
->hostmem
< g
->conf_max_hostmem
) {
303 res
->image
= pixman_image_create_bits(pformat
,
310 qemu_log_mask(LOG_GUEST_ERROR
,
311 "%s: resource creation failed %d %d %d\n",
312 __func__
, c2d
.resource_id
, c2d
.width
, c2d
.height
);
314 cmd
->error
= VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY
;
318 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
319 g
->hostmem
+= res
->hostmem
;
322 static void virtio_gpu_disable_scanout(VirtIOGPU
*g
, int scanout_id
)
324 struct virtio_gpu_scanout
*scanout
= &g
->parent_obj
.scanout
[scanout_id
];
325 struct virtio_gpu_simple_resource
*res
;
326 DisplaySurface
*ds
= NULL
;
328 if (scanout
->resource_id
== 0) {
332 res
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
334 res
->scanout_bitmask
&= ~(1 << scanout_id
);
337 if (scanout_id
== 0) {
339 ds
= qemu_create_message_surface(scanout
->width
?: 640,
340 scanout
->height
?: 480,
341 "Guest disabled display.");
343 dpy_gfx_replace_surface(scanout
->con
, ds
);
344 scanout
->resource_id
= 0;
350 static void virtio_gpu_resource_destroy(VirtIOGPU
*g
,
351 struct virtio_gpu_simple_resource
*res
)
355 if (res
->scanout_bitmask
) {
356 for (i
= 0; i
< g
->parent_obj
.conf
.max_outputs
; i
++) {
357 if (res
->scanout_bitmask
& (1 << i
)) {
358 virtio_gpu_disable_scanout(g
, i
);
363 pixman_image_unref(res
->image
);
364 virtio_gpu_cleanup_mapping(g
, res
);
365 QTAILQ_REMOVE(&g
->reslist
, res
, next
);
366 g
->hostmem
-= res
->hostmem
;
370 static void virtio_gpu_resource_unref(VirtIOGPU
*g
,
371 struct virtio_gpu_ctrl_command
*cmd
)
373 struct virtio_gpu_simple_resource
*res
;
374 struct virtio_gpu_resource_unref unref
;
376 VIRTIO_GPU_FILL_CMD(unref
);
377 virtio_gpu_bswap_32(&unref
, sizeof(unref
));
378 trace_virtio_gpu_cmd_res_unref(unref
.resource_id
);
380 res
= virtio_gpu_find_resource(g
, unref
.resource_id
);
382 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
383 __func__
, unref
.resource_id
);
384 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
387 virtio_gpu_resource_destroy(g
, res
);
390 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU
*g
,
391 struct virtio_gpu_ctrl_command
*cmd
)
393 struct virtio_gpu_simple_resource
*res
;
395 uint32_t src_offset
, dst_offset
, stride
;
397 pixman_format_code_t format
;
398 struct virtio_gpu_transfer_to_host_2d t2d
;
400 VIRTIO_GPU_FILL_CMD(t2d
);
401 virtio_gpu_t2d_bswap(&t2d
);
402 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d
.resource_id
);
404 res
= virtio_gpu_find_resource(g
, t2d
.resource_id
);
405 if (!res
|| !res
->iov
) {
406 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
407 __func__
, t2d
.resource_id
);
408 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
412 if (t2d
.r
.x
> res
->width
||
413 t2d
.r
.y
> res
->height
||
414 t2d
.r
.width
> res
->width
||
415 t2d
.r
.height
> res
->height
||
416 t2d
.r
.x
+ t2d
.r
.width
> res
->width
||
417 t2d
.r
.y
+ t2d
.r
.height
> res
->height
) {
418 qemu_log_mask(LOG_GUEST_ERROR
, "%s: transfer bounds outside resource"
419 " bounds for resource %d: %d %d %d %d vs %d %d\n",
420 __func__
, t2d
.resource_id
, t2d
.r
.x
, t2d
.r
.y
,
421 t2d
.r
.width
, t2d
.r
.height
, res
->width
, res
->height
);
422 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
426 format
= pixman_image_get_format(res
->image
);
427 bpp
= DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format
), 8);
428 stride
= pixman_image_get_stride(res
->image
);
430 if (t2d
.offset
|| t2d
.r
.x
|| t2d
.r
.y
||
431 t2d
.r
.width
!= pixman_image_get_width(res
->image
)) {
432 void *img_data
= pixman_image_get_data(res
->image
);
433 for (h
= 0; h
< t2d
.r
.height
; h
++) {
434 src_offset
= t2d
.offset
+ stride
* h
;
435 dst_offset
= (t2d
.r
.y
+ h
) * stride
+ (t2d
.r
.x
* bpp
);
437 iov_to_buf(res
->iov
, res
->iov_cnt
, src_offset
,
439 + dst_offset
, t2d
.r
.width
* bpp
);
442 iov_to_buf(res
->iov
, res
->iov_cnt
, 0,
443 pixman_image_get_data(res
->image
),
444 pixman_image_get_stride(res
->image
)
445 * pixman_image_get_height(res
->image
));
449 static void virtio_gpu_resource_flush(VirtIOGPU
*g
,
450 struct virtio_gpu_ctrl_command
*cmd
)
452 struct virtio_gpu_simple_resource
*res
;
453 struct virtio_gpu_resource_flush rf
;
454 pixman_region16_t flush_region
;
457 VIRTIO_GPU_FILL_CMD(rf
);
458 virtio_gpu_bswap_32(&rf
, sizeof(rf
));
459 trace_virtio_gpu_cmd_res_flush(rf
.resource_id
,
460 rf
.r
.width
, rf
.r
.height
, rf
.r
.x
, rf
.r
.y
);
462 res
= virtio_gpu_find_resource(g
, rf
.resource_id
);
464 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
465 __func__
, rf
.resource_id
);
466 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
470 if (rf
.r
.x
> res
->width
||
471 rf
.r
.y
> res
->height
||
472 rf
.r
.width
> res
->width
||
473 rf
.r
.height
> res
->height
||
474 rf
.r
.x
+ rf
.r
.width
> res
->width
||
475 rf
.r
.y
+ rf
.r
.height
> res
->height
) {
476 qemu_log_mask(LOG_GUEST_ERROR
, "%s: flush bounds outside resource"
477 " bounds for resource %d: %d %d %d %d vs %d %d\n",
478 __func__
, rf
.resource_id
, rf
.r
.x
, rf
.r
.y
,
479 rf
.r
.width
, rf
.r
.height
, res
->width
, res
->height
);
480 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
484 pixman_region_init_rect(&flush_region
,
485 rf
.r
.x
, rf
.r
.y
, rf
.r
.width
, rf
.r
.height
);
486 for (i
= 0; i
< g
->parent_obj
.conf
.max_outputs
; i
++) {
487 struct virtio_gpu_scanout
*scanout
;
488 pixman_region16_t region
, finalregion
;
489 pixman_box16_t
*extents
;
491 if (!(res
->scanout_bitmask
& (1 << i
))) {
494 scanout
= &g
->parent_obj
.scanout
[i
];
496 pixman_region_init(&finalregion
);
497 pixman_region_init_rect(®ion
, scanout
->x
, scanout
->y
,
498 scanout
->width
, scanout
->height
);
500 pixman_region_intersect(&finalregion
, &flush_region
, ®ion
);
501 pixman_region_translate(&finalregion
, -scanout
->x
, -scanout
->y
);
502 extents
= pixman_region_extents(&finalregion
);
503 /* work out the area we need to update for each console */
504 dpy_gfx_update(g
->parent_obj
.scanout
[i
].con
,
505 extents
->x1
, extents
->y1
,
506 extents
->x2
- extents
->x1
,
507 extents
->y2
- extents
->y1
);
509 pixman_region_fini(®ion
);
510 pixman_region_fini(&finalregion
);
512 pixman_region_fini(&flush_region
);
515 static void virtio_unref_resource(pixman_image_t
*image
, void *data
)
517 pixman_image_unref(data
);
520 static void virtio_gpu_set_scanout(VirtIOGPU
*g
,
521 struct virtio_gpu_ctrl_command
*cmd
)
523 struct virtio_gpu_simple_resource
*res
, *ores
;
524 struct virtio_gpu_scanout
*scanout
;
525 pixman_format_code_t format
;
528 struct virtio_gpu_set_scanout ss
;
530 VIRTIO_GPU_FILL_CMD(ss
);
531 virtio_gpu_bswap_32(&ss
, sizeof(ss
));
532 trace_virtio_gpu_cmd_set_scanout(ss
.scanout_id
, ss
.resource_id
,
533 ss
.r
.width
, ss
.r
.height
, ss
.r
.x
, ss
.r
.y
);
535 if (ss
.scanout_id
>= g
->parent_obj
.conf
.max_outputs
) {
536 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout id specified %d",
537 __func__
, ss
.scanout_id
);
538 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
542 g
->parent_obj
.enable
= 1;
543 if (ss
.resource_id
== 0) {
544 virtio_gpu_disable_scanout(g
, ss
.scanout_id
);
548 /* create a surface for this scanout */
549 res
= virtio_gpu_find_resource(g
, ss
.resource_id
);
551 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
552 __func__
, ss
.resource_id
);
553 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
557 if (ss
.r
.x
> res
->width
||
558 ss
.r
.y
> res
->height
||
561 ss
.r
.width
> res
->width
||
562 ss
.r
.height
> res
->height
||
563 ss
.r
.x
+ ss
.r
.width
> res
->width
||
564 ss
.r
.y
+ ss
.r
.height
> res
->height
) {
565 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout %d bounds for"
566 " resource %d, (%d,%d)+%d,%d vs %d %d\n",
567 __func__
, ss
.scanout_id
, ss
.resource_id
, ss
.r
.x
, ss
.r
.y
,
568 ss
.r
.width
, ss
.r
.height
, res
->width
, res
->height
);
569 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
573 scanout
= &g
->parent_obj
.scanout
[ss
.scanout_id
];
575 format
= pixman_image_get_format(res
->image
);
576 bpp
= DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format
), 8);
577 offset
= (ss
.r
.x
* bpp
) + ss
.r
.y
* pixman_image_get_stride(res
->image
);
578 if (!scanout
->ds
|| surface_data(scanout
->ds
)
579 != ((uint8_t *)pixman_image_get_data(res
->image
) + offset
) ||
580 scanout
->width
!= ss
.r
.width
||
581 scanout
->height
!= ss
.r
.height
) {
582 pixman_image_t
*rect
;
583 void *ptr
= (uint8_t *)pixman_image_get_data(res
->image
) + offset
;
584 rect
= pixman_image_create_bits(format
, ss
.r
.width
, ss
.r
.height
, ptr
,
585 pixman_image_get_stride(res
->image
));
586 pixman_image_ref(res
->image
);
587 pixman_image_set_destroy_function(rect
, virtio_unref_resource
,
589 /* realloc the surface ptr */
590 scanout
->ds
= qemu_create_displaysurface_pixman(rect
);
592 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
595 pixman_image_unref(rect
);
596 dpy_gfx_replace_surface(g
->parent_obj
.scanout
[ss
.scanout_id
].con
,
600 ores
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
602 ores
->scanout_bitmask
&= ~(1 << ss
.scanout_id
);
605 res
->scanout_bitmask
|= (1 << ss
.scanout_id
);
606 scanout
->resource_id
= ss
.resource_id
;
609 scanout
->width
= ss
.r
.width
;
610 scanout
->height
= ss
.r
.height
;
613 int virtio_gpu_create_mapping_iov(VirtIOGPU
*g
,
614 struct virtio_gpu_resource_attach_backing
*ab
,
615 struct virtio_gpu_ctrl_command
*cmd
,
616 uint64_t **addr
, struct iovec
**iov
)
618 struct virtio_gpu_mem_entry
*ents
;
622 if (ab
->nr_entries
> 16384) {
623 qemu_log_mask(LOG_GUEST_ERROR
,
624 "%s: nr_entries is too big (%d > 16384)\n",
625 __func__
, ab
->nr_entries
);
629 esize
= sizeof(*ents
) * ab
->nr_entries
;
630 ents
= g_malloc(esize
);
631 s
= iov_to_buf(cmd
->elem
.out_sg
, cmd
->elem
.out_num
,
632 sizeof(*ab
), ents
, esize
);
634 qemu_log_mask(LOG_GUEST_ERROR
,
635 "%s: command data size incorrect %zu vs %zu\n",
641 *iov
= g_malloc0(sizeof(struct iovec
) * ab
->nr_entries
);
643 *addr
= g_malloc0(sizeof(uint64_t) * ab
->nr_entries
);
645 for (i
= 0; i
< ab
->nr_entries
; i
++) {
646 uint64_t a
= le64_to_cpu(ents
[i
].addr
);
647 uint32_t l
= le32_to_cpu(ents
[i
].length
);
649 (*iov
)[i
].iov_len
= l
;
650 (*iov
)[i
].iov_base
= dma_memory_map(VIRTIO_DEVICE(g
)->dma_as
,
651 a
, &len
, DMA_DIRECTION_TO_DEVICE
);
655 if (!(*iov
)[i
].iov_base
|| len
!= l
) {
656 qemu_log_mask(LOG_GUEST_ERROR
, "%s: failed to map MMIO memory for"
657 " resource %d element %d\n",
658 __func__
, ab
->resource_id
, i
);
659 virtio_gpu_cleanup_mapping_iov(g
, *iov
, i
);
673 void virtio_gpu_cleanup_mapping_iov(VirtIOGPU
*g
,
674 struct iovec
*iov
, uint32_t count
)
678 for (i
= 0; i
< count
; i
++) {
679 dma_memory_unmap(VIRTIO_DEVICE(g
)->dma_as
,
680 iov
[i
].iov_base
, iov
[i
].iov_len
,
681 DMA_DIRECTION_TO_DEVICE
,
687 static void virtio_gpu_cleanup_mapping(VirtIOGPU
*g
,
688 struct virtio_gpu_simple_resource
*res
)
690 virtio_gpu_cleanup_mapping_iov(g
, res
->iov
, res
->iov_cnt
);
698 virtio_gpu_resource_attach_backing(VirtIOGPU
*g
,
699 struct virtio_gpu_ctrl_command
*cmd
)
701 struct virtio_gpu_simple_resource
*res
;
702 struct virtio_gpu_resource_attach_backing ab
;
705 VIRTIO_GPU_FILL_CMD(ab
);
706 virtio_gpu_bswap_32(&ab
, sizeof(ab
));
707 trace_virtio_gpu_cmd_res_back_attach(ab
.resource_id
);
709 res
= virtio_gpu_find_resource(g
, ab
.resource_id
);
711 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
712 __func__
, ab
.resource_id
);
713 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
718 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
722 ret
= virtio_gpu_create_mapping_iov(g
, &ab
, cmd
, &res
->addrs
, &res
->iov
);
724 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
728 res
->iov_cnt
= ab
.nr_entries
;
732 virtio_gpu_resource_detach_backing(VirtIOGPU
*g
,
733 struct virtio_gpu_ctrl_command
*cmd
)
735 struct virtio_gpu_simple_resource
*res
;
736 struct virtio_gpu_resource_detach_backing detach
;
738 VIRTIO_GPU_FILL_CMD(detach
);
739 virtio_gpu_bswap_32(&detach
, sizeof(detach
));
740 trace_virtio_gpu_cmd_res_back_detach(detach
.resource_id
);
742 res
= virtio_gpu_find_resource(g
, detach
.resource_id
);
743 if (!res
|| !res
->iov
) {
744 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
745 __func__
, detach
.resource_id
);
746 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
749 virtio_gpu_cleanup_mapping(g
, res
);
752 static void virtio_gpu_simple_process_cmd(VirtIOGPU
*g
,
753 struct virtio_gpu_ctrl_command
*cmd
)
755 VIRTIO_GPU_FILL_CMD(cmd
->cmd_hdr
);
756 virtio_gpu_ctrl_hdr_bswap(&cmd
->cmd_hdr
);
758 switch (cmd
->cmd_hdr
.type
) {
759 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO
:
760 virtio_gpu_get_display_info(g
, cmd
);
762 case VIRTIO_GPU_CMD_GET_EDID
:
763 virtio_gpu_get_edid(g
, cmd
);
765 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
:
766 virtio_gpu_resource_create_2d(g
, cmd
);
768 case VIRTIO_GPU_CMD_RESOURCE_UNREF
:
769 virtio_gpu_resource_unref(g
, cmd
);
771 case VIRTIO_GPU_CMD_RESOURCE_FLUSH
:
772 virtio_gpu_resource_flush(g
, cmd
);
774 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
:
775 virtio_gpu_transfer_to_host_2d(g
, cmd
);
777 case VIRTIO_GPU_CMD_SET_SCANOUT
:
778 virtio_gpu_set_scanout(g
, cmd
);
780 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
:
781 virtio_gpu_resource_attach_backing(g
, cmd
);
783 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
:
784 virtio_gpu_resource_detach_backing(g
, cmd
);
787 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
790 if (!cmd
->finished
) {
791 virtio_gpu_ctrl_response_nodata(g
, cmd
, cmd
->error
? cmd
->error
:
792 VIRTIO_GPU_RESP_OK_NODATA
);
796 static void virtio_gpu_handle_ctrl_cb(VirtIODevice
*vdev
, VirtQueue
*vq
)
798 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
799 qemu_bh_schedule(g
->ctrl_bh
);
802 static void virtio_gpu_handle_cursor_cb(VirtIODevice
*vdev
, VirtQueue
*vq
)
804 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
805 qemu_bh_schedule(g
->cursor_bh
);
808 void virtio_gpu_process_cmdq(VirtIOGPU
*g
)
810 struct virtio_gpu_ctrl_command
*cmd
;
812 while (!QTAILQ_EMPTY(&g
->cmdq
)) {
813 cmd
= QTAILQ_FIRST(&g
->cmdq
);
815 if (g
->parent_obj
.renderer_blocked
) {
819 /* process command */
820 VIRGL(g
, virtio_gpu_virgl_process_cmd
, virtio_gpu_simple_process_cmd
,
823 QTAILQ_REMOVE(&g
->cmdq
, cmd
, next
);
824 if (virtio_gpu_stats_enabled(g
->parent_obj
.conf
)) {
828 if (!cmd
->finished
) {
829 QTAILQ_INSERT_TAIL(&g
->fenceq
, cmd
, next
);
831 if (virtio_gpu_stats_enabled(g
->parent_obj
.conf
)) {
832 if (g
->stats
.max_inflight
< g
->inflight
) {
833 g
->stats
.max_inflight
= g
->inflight
;
835 fprintf(stderr
, "inflight: %3d (+)\r", g
->inflight
);
843 static void virtio_gpu_gl_unblock(VirtIOGPUBase
*b
)
845 VirtIOGPU
*g
= VIRTIO_GPU(b
);
848 if (g
->renderer_reset
) {
849 g
->renderer_reset
= false;
850 virtio_gpu_virgl_reset(g
);
853 virtio_gpu_process_cmdq(g
);
856 static void virtio_gpu_handle_ctrl(VirtIODevice
*vdev
, VirtQueue
*vq
)
858 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
859 struct virtio_gpu_ctrl_command
*cmd
;
861 if (!virtio_queue_ready(vq
)) {
866 if (!g
->renderer_inited
&& g
->parent_obj
.use_virgl_renderer
) {
867 virtio_gpu_virgl_init(g
);
868 g
->renderer_inited
= true;
872 cmd
= virtqueue_pop(vq
, sizeof(struct virtio_gpu_ctrl_command
));
876 cmd
->finished
= false;
877 QTAILQ_INSERT_TAIL(&g
->cmdq
, cmd
, next
);
878 cmd
= virtqueue_pop(vq
, sizeof(struct virtio_gpu_ctrl_command
));
881 virtio_gpu_process_cmdq(g
);
884 if (g
->parent_obj
.use_virgl_renderer
) {
885 virtio_gpu_virgl_fence_poll(g
);
890 static void virtio_gpu_ctrl_bh(void *opaque
)
892 VirtIOGPU
*g
= opaque
;
893 virtio_gpu_handle_ctrl(&g
->parent_obj
.parent_obj
, g
->ctrl_vq
);
896 static void virtio_gpu_handle_cursor(VirtIODevice
*vdev
, VirtQueue
*vq
)
898 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
899 VirtQueueElement
*elem
;
901 struct virtio_gpu_update_cursor cursor_info
;
903 if (!virtio_queue_ready(vq
)) {
907 elem
= virtqueue_pop(vq
, sizeof(VirtQueueElement
));
912 s
= iov_to_buf(elem
->out_sg
, elem
->out_num
, 0,
913 &cursor_info
, sizeof(cursor_info
));
914 if (s
!= sizeof(cursor_info
)) {
915 qemu_log_mask(LOG_GUEST_ERROR
,
916 "%s: cursor size incorrect %zu vs %zu\n",
917 __func__
, s
, sizeof(cursor_info
));
919 virtio_gpu_bswap_32(&cursor_info
, sizeof(cursor_info
));
920 update_cursor(g
, &cursor_info
);
922 virtqueue_push(vq
, elem
, 0);
923 virtio_notify(vdev
, vq
);
928 static void virtio_gpu_cursor_bh(void *opaque
)
930 VirtIOGPU
*g
= opaque
;
931 virtio_gpu_handle_cursor(&g
->parent_obj
.parent_obj
, g
->cursor_vq
);
934 static const VMStateDescription vmstate_virtio_gpu_scanout
= {
935 .name
= "virtio-gpu-one-scanout",
937 .fields
= (VMStateField
[]) {
938 VMSTATE_UINT32(resource_id
, struct virtio_gpu_scanout
),
939 VMSTATE_UINT32(width
, struct virtio_gpu_scanout
),
940 VMSTATE_UINT32(height
, struct virtio_gpu_scanout
),
941 VMSTATE_INT32(x
, struct virtio_gpu_scanout
),
942 VMSTATE_INT32(y
, struct virtio_gpu_scanout
),
943 VMSTATE_UINT32(cursor
.resource_id
, struct virtio_gpu_scanout
),
944 VMSTATE_UINT32(cursor
.hot_x
, struct virtio_gpu_scanout
),
945 VMSTATE_UINT32(cursor
.hot_y
, struct virtio_gpu_scanout
),
946 VMSTATE_UINT32(cursor
.pos
.x
, struct virtio_gpu_scanout
),
947 VMSTATE_UINT32(cursor
.pos
.y
, struct virtio_gpu_scanout
),
948 VMSTATE_END_OF_LIST()
952 static const VMStateDescription vmstate_virtio_gpu_scanouts
= {
953 .name
= "virtio-gpu-scanouts",
955 .fields
= (VMStateField
[]) {
956 VMSTATE_INT32(parent_obj
.enable
, struct VirtIOGPU
),
957 VMSTATE_UINT32_EQUAL(parent_obj
.conf
.max_outputs
,
958 struct VirtIOGPU
, NULL
),
959 VMSTATE_STRUCT_VARRAY_UINT32(parent_obj
.scanout
, struct VirtIOGPU
,
960 parent_obj
.conf
.max_outputs
, 1,
961 vmstate_virtio_gpu_scanout
,
962 struct virtio_gpu_scanout
),
963 VMSTATE_END_OF_LIST()
967 static int virtio_gpu_save(QEMUFile
*f
, void *opaque
, size_t size
,
968 const VMStateField
*field
, QJSON
*vmdesc
)
970 VirtIOGPU
*g
= opaque
;
971 struct virtio_gpu_simple_resource
*res
;
974 /* in 2d mode we should never find unprocessed commands here */
975 assert(QTAILQ_EMPTY(&g
->cmdq
));
977 QTAILQ_FOREACH(res
, &g
->reslist
, next
) {
978 qemu_put_be32(f
, res
->resource_id
);
979 qemu_put_be32(f
, res
->width
);
980 qemu_put_be32(f
, res
->height
);
981 qemu_put_be32(f
, res
->format
);
982 qemu_put_be32(f
, res
->iov_cnt
);
983 for (i
= 0; i
< res
->iov_cnt
; i
++) {
984 qemu_put_be64(f
, res
->addrs
[i
]);
985 qemu_put_be32(f
, res
->iov
[i
].iov_len
);
987 qemu_put_buffer(f
, (void *)pixman_image_get_data(res
->image
),
988 pixman_image_get_stride(res
->image
) * res
->height
);
990 qemu_put_be32(f
, 0); /* end of list */
992 return vmstate_save_state(f
, &vmstate_virtio_gpu_scanouts
, g
, NULL
);
995 static int virtio_gpu_load(QEMUFile
*f
, void *opaque
, size_t size
,
996 const VMStateField
*field
)
998 VirtIOGPU
*g
= opaque
;
999 struct virtio_gpu_simple_resource
*res
;
1000 struct virtio_gpu_scanout
*scanout
;
1001 uint32_t resource_id
, pformat
;
1006 resource_id
= qemu_get_be32(f
);
1007 while (resource_id
!= 0) {
1008 res
= virtio_gpu_find_resource(g
, resource_id
);
1013 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
1014 res
->resource_id
= resource_id
;
1015 res
->width
= qemu_get_be32(f
);
1016 res
->height
= qemu_get_be32(f
);
1017 res
->format
= qemu_get_be32(f
);
1018 res
->iov_cnt
= qemu_get_be32(f
);
1021 pformat
= virtio_gpu_get_pixman_format(res
->format
);
1026 res
->image
= pixman_image_create_bits(pformat
,
1027 res
->width
, res
->height
,
1034 res
->hostmem
= calc_image_hostmem(pformat
, res
->width
, res
->height
);
1036 res
->addrs
= g_new(uint64_t, res
->iov_cnt
);
1037 res
->iov
= g_new(struct iovec
, res
->iov_cnt
);
1040 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1041 res
->addrs
[i
] = qemu_get_be64(f
);
1042 res
->iov
[i
].iov_len
= qemu_get_be32(f
);
1044 qemu_get_buffer(f
, (void *)pixman_image_get_data(res
->image
),
1045 pixman_image_get_stride(res
->image
) * res
->height
);
1047 /* restore mapping */
1048 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1049 hwaddr len
= res
->iov
[i
].iov_len
;
1050 res
->iov
[i
].iov_base
=
1051 dma_memory_map(VIRTIO_DEVICE(g
)->dma_as
,
1052 res
->addrs
[i
], &len
, DMA_DIRECTION_TO_DEVICE
);
1054 if (!res
->iov
[i
].iov_base
|| len
!= res
->iov
[i
].iov_len
) {
1055 /* Clean up the half-a-mapping we just created... */
1056 if (res
->iov
[i
].iov_base
) {
1057 dma_memory_unmap(VIRTIO_DEVICE(g
)->dma_as
,
1058 res
->iov
[i
].iov_base
,
1060 DMA_DIRECTION_TO_DEVICE
,
1063 /* ...and the mappings for previous loop iterations */
1065 virtio_gpu_cleanup_mapping(g
, res
);
1066 pixman_image_unref(res
->image
);
1072 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
1073 g
->hostmem
+= res
->hostmem
;
1075 resource_id
= qemu_get_be32(f
);
1078 /* load & apply scanout state */
1079 vmstate_load_state(f
, &vmstate_virtio_gpu_scanouts
, g
, 1);
1080 for (i
= 0; i
< g
->parent_obj
.conf
.max_outputs
; i
++) {
1081 scanout
= &g
->parent_obj
.scanout
[i
];
1082 if (!scanout
->resource_id
) {
1085 res
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
1089 scanout
->ds
= qemu_create_displaysurface_pixman(res
->image
);
1094 dpy_gfx_replace_surface(scanout
->con
, scanout
->ds
);
1095 dpy_gfx_update_full(scanout
->con
);
1096 if (scanout
->cursor
.resource_id
) {
1097 update_cursor(g
, &scanout
->cursor
);
1099 res
->scanout_bitmask
|= (1 << i
);
1105 static void virtio_gpu_device_realize(DeviceState
*qdev
, Error
**errp
)
1107 VirtIODevice
*vdev
= VIRTIO_DEVICE(qdev
);
1108 VirtIOGPU
*g
= VIRTIO_GPU(qdev
);
1111 #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN)
1114 have_virgl
= display_opengl
;
1117 g
->parent_obj
.conf
.flags
&= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED
);
1119 #if defined(CONFIG_VIRGL)
1120 VIRTIO_GPU_BASE(g
)->virtio_config
.num_capsets
=
1121 virtio_gpu_virgl_get_num_capsets(g
);
1125 if (!virtio_gpu_base_device_realize(qdev
,
1126 virtio_gpu_handle_ctrl_cb
,
1127 virtio_gpu_handle_cursor_cb
,
1132 g
->ctrl_vq
= virtio_get_queue(vdev
, 0);
1133 g
->cursor_vq
= virtio_get_queue(vdev
, 1);
1134 g
->ctrl_bh
= qemu_bh_new(virtio_gpu_ctrl_bh
, g
);
1135 g
->cursor_bh
= qemu_bh_new(virtio_gpu_cursor_bh
, g
);
1136 QTAILQ_INIT(&g
->reslist
);
1137 QTAILQ_INIT(&g
->cmdq
);
1138 QTAILQ_INIT(&g
->fenceq
);
1141 static void virtio_gpu_reset(VirtIODevice
*vdev
)
1143 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
1144 struct virtio_gpu_simple_resource
*res
, *tmp
;
1145 struct virtio_gpu_ctrl_command
*cmd
;
1148 if (g
->parent_obj
.use_virgl_renderer
) {
1149 virtio_gpu_virgl_reset(g
);
1153 QTAILQ_FOREACH_SAFE(res
, &g
->reslist
, next
, tmp
) {
1154 virtio_gpu_resource_destroy(g
, res
);
1157 while (!QTAILQ_EMPTY(&g
->cmdq
)) {
1158 cmd
= QTAILQ_FIRST(&g
->cmdq
);
1159 QTAILQ_REMOVE(&g
->cmdq
, cmd
, next
);
1163 while (!QTAILQ_EMPTY(&g
->fenceq
)) {
1164 cmd
= QTAILQ_FIRST(&g
->fenceq
);
1165 QTAILQ_REMOVE(&g
->fenceq
, cmd
, next
);
1171 if (g
->parent_obj
.use_virgl_renderer
) {
1172 if (g
->parent_obj
.renderer_blocked
) {
1173 g
->renderer_reset
= true;
1175 virtio_gpu_virgl_reset(g
);
1177 g
->parent_obj
.use_virgl_renderer
= false;
1181 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev
));
1185 virtio_gpu_get_config(VirtIODevice
*vdev
, uint8_t *config
)
1187 VirtIOGPUBase
*g
= VIRTIO_GPU_BASE(vdev
);
1189 memcpy(config
, &g
->virtio_config
, sizeof(g
->virtio_config
));
1193 virtio_gpu_set_config(VirtIODevice
*vdev
, const uint8_t *config
)
1195 VirtIOGPUBase
*g
= VIRTIO_GPU_BASE(vdev
);
1196 const struct virtio_gpu_config
*vgconfig
=
1197 (const struct virtio_gpu_config
*)config
;
1199 if (vgconfig
->events_clear
) {
1200 g
->virtio_config
.events_read
&= ~vgconfig
->events_clear
;
1205 * For historical reasons virtio_gpu does not adhere to virtio migration
1206 * scheme as described in doc/virtio-migration.txt, in a sense that no
1207 * save/load callback are provided to the core. Instead the device data
1208 * is saved/loaded after the core data.
1210 * Because of this we need a special vmsd.
1212 static const VMStateDescription vmstate_virtio_gpu
= {
1213 .name
= "virtio-gpu",
1214 .minimum_version_id
= VIRTIO_GPU_VM_VERSION
,
1215 .version_id
= VIRTIO_GPU_VM_VERSION
,
1216 .fields
= (VMStateField
[]) {
1217 VMSTATE_VIRTIO_DEVICE
/* core */,
1219 .name
= "virtio-gpu",
1220 .info
= &(const VMStateInfo
) {
1221 .name
= "virtio-gpu",
1222 .get
= virtio_gpu_load
,
1223 .put
= virtio_gpu_save
,
1225 .flags
= VMS_SINGLE
,
1227 VMSTATE_END_OF_LIST()
1231 static Property virtio_gpu_properties
[] = {
1232 VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU
, parent_obj
.conf
),
1233 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU
, conf_max_hostmem
,
1236 DEFINE_PROP_BIT("virgl", VirtIOGPU
, parent_obj
.conf
.flags
,
1237 VIRTIO_GPU_FLAG_VIRGL_ENABLED
, true),
1238 DEFINE_PROP_BIT("stats", VirtIOGPU
, parent_obj
.conf
.flags
,
1239 VIRTIO_GPU_FLAG_STATS_ENABLED
, false),
1241 DEFINE_PROP_END_OF_LIST(),
1244 static void virtio_gpu_class_init(ObjectClass
*klass
, void *data
)
1246 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1247 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
1248 VirtIOGPUBaseClass
*vgc
= VIRTIO_GPU_BASE_CLASS(klass
);
1250 vgc
->gl_unblock
= virtio_gpu_gl_unblock
;
1251 vdc
->realize
= virtio_gpu_device_realize
;
1252 vdc
->reset
= virtio_gpu_reset
;
1253 vdc
->get_config
= virtio_gpu_get_config
;
1254 vdc
->set_config
= virtio_gpu_set_config
;
1256 dc
->vmsd
= &vmstate_virtio_gpu
;
1257 device_class_set_props(dc
, virtio_gpu_properties
);
1260 static const TypeInfo virtio_gpu_info
= {
1261 .name
= TYPE_VIRTIO_GPU
,
1262 .parent
= TYPE_VIRTIO_GPU_BASE
,
1263 .instance_size
= sizeof(VirtIOGPU
),
1264 .class_init
= virtio_gpu_class_init
,
1267 static void virtio_register_types(void)
1269 type_register_static(&virtio_gpu_info
);
1272 type_init(virtio_register_types
)