4 * Copyright Red Hat, Inc. 2013-2014
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/units.h"
17 #include "ui/console.h"
19 #include "sysemu/dma.h"
20 #include "sysemu/sysemu.h"
21 #include "hw/virtio/virtio.h"
22 #include "migration/qemu-file-types.h"
23 #include "hw/virtio/virtio-gpu.h"
24 #include "hw/virtio/virtio-gpu-bswap.h"
25 #include "hw/virtio/virtio-gpu-pixman.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "hw/display/edid.h"
28 #include "hw/qdev-properties.h"
30 #include "qemu/module.h"
31 #include "qapi/error.h"
32 #include "qemu/error-report.h"
34 #define VIRTIO_GPU_VM_VERSION 1
36 static struct virtio_gpu_simple_resource
*
37 virtio_gpu_find_resource(VirtIOGPU
*g
, uint32_t resource_id
);
39 static void virtio_gpu_cleanup_mapping(VirtIOGPU
*g
,
40 struct virtio_gpu_simple_resource
*res
);
43 #include <virglrenderer.h>
44 #define VIRGL(_g, _virgl, _simple, ...) \
46 if (_g->parent_obj.use_virgl_renderer) { \
47 _virgl(__VA_ARGS__); \
49 _simple(__VA_ARGS__); \
53 #define VIRGL(_g, _virgl, _simple, ...) \
55 _simple(__VA_ARGS__); \
59 static void update_cursor_data_simple(VirtIOGPU
*g
,
60 struct virtio_gpu_scanout
*s
,
63 struct virtio_gpu_simple_resource
*res
;
66 res
= virtio_gpu_find_resource(g
, resource_id
);
71 if (pixman_image_get_width(res
->image
) != s
->current_cursor
->width
||
72 pixman_image_get_height(res
->image
) != s
->current_cursor
->height
) {
76 pixels
= s
->current_cursor
->width
* s
->current_cursor
->height
;
77 memcpy(s
->current_cursor
->data
,
78 pixman_image_get_data(res
->image
),
79 pixels
* sizeof(uint32_t));
84 static void update_cursor_data_virgl(VirtIOGPU
*g
,
85 struct virtio_gpu_scanout
*s
,
88 uint32_t width
, height
;
89 uint32_t pixels
, *data
;
91 data
= virgl_renderer_get_cursor_data(resource_id
, &width
, &height
);
96 if (width
!= s
->current_cursor
->width
||
97 height
!= s
->current_cursor
->height
) {
102 pixels
= s
->current_cursor
->width
* s
->current_cursor
->height
;
103 memcpy(s
->current_cursor
->data
, data
, pixels
* sizeof(uint32_t));
109 static void update_cursor(VirtIOGPU
*g
, struct virtio_gpu_update_cursor
*cursor
)
111 struct virtio_gpu_scanout
*s
;
112 bool move
= cursor
->hdr
.type
== VIRTIO_GPU_CMD_MOVE_CURSOR
;
114 if (cursor
->pos
.scanout_id
>= g
->parent_obj
.conf
.max_outputs
) {
117 s
= &g
->parent_obj
.scanout
[cursor
->pos
.scanout_id
];
119 trace_virtio_gpu_update_cursor(cursor
->pos
.scanout_id
,
122 move
? "move" : "update",
123 cursor
->resource_id
);
126 if (!s
->current_cursor
) {
127 s
->current_cursor
= cursor_alloc(64, 64);
130 s
->current_cursor
->hot_x
= cursor
->hot_x
;
131 s
->current_cursor
->hot_y
= cursor
->hot_y
;
133 if (cursor
->resource_id
> 0) {
134 VIRGL(g
, update_cursor_data_virgl
, update_cursor_data_simple
,
135 g
, s
, cursor
->resource_id
);
137 dpy_cursor_define(s
->con
, s
->current_cursor
);
141 s
->cursor
.pos
.x
= cursor
->pos
.x
;
142 s
->cursor
.pos
.y
= cursor
->pos
.y
;
144 dpy_mouse_set(s
->con
, cursor
->pos
.x
, cursor
->pos
.y
,
145 cursor
->resource_id
? 1 : 0);
148 static struct virtio_gpu_simple_resource
*
149 virtio_gpu_find_resource(VirtIOGPU
*g
, uint32_t resource_id
)
151 struct virtio_gpu_simple_resource
*res
;
153 QTAILQ_FOREACH(res
, &g
->reslist
, next
) {
154 if (res
->resource_id
== resource_id
) {
161 void virtio_gpu_ctrl_response(VirtIOGPU
*g
,
162 struct virtio_gpu_ctrl_command
*cmd
,
163 struct virtio_gpu_ctrl_hdr
*resp
,
168 if (cmd
->cmd_hdr
.flags
& VIRTIO_GPU_FLAG_FENCE
) {
169 resp
->flags
|= VIRTIO_GPU_FLAG_FENCE
;
170 resp
->fence_id
= cmd
->cmd_hdr
.fence_id
;
171 resp
->ctx_id
= cmd
->cmd_hdr
.ctx_id
;
173 virtio_gpu_ctrl_hdr_bswap(resp
);
174 s
= iov_from_buf(cmd
->elem
.in_sg
, cmd
->elem
.in_num
, 0, resp
, resp_len
);
176 qemu_log_mask(LOG_GUEST_ERROR
,
177 "%s: response size incorrect %zu vs %zu\n",
178 __func__
, s
, resp_len
);
180 virtqueue_push(cmd
->vq
, &cmd
->elem
, s
);
181 virtio_notify(VIRTIO_DEVICE(g
), cmd
->vq
);
182 cmd
->finished
= true;
185 void virtio_gpu_ctrl_response_nodata(VirtIOGPU
*g
,
186 struct virtio_gpu_ctrl_command
*cmd
,
187 enum virtio_gpu_ctrl_type type
)
189 struct virtio_gpu_ctrl_hdr resp
;
191 memset(&resp
, 0, sizeof(resp
));
193 virtio_gpu_ctrl_response(g
, cmd
, &resp
, sizeof(resp
));
196 void virtio_gpu_get_display_info(VirtIOGPU
*g
,
197 struct virtio_gpu_ctrl_command
*cmd
)
199 struct virtio_gpu_resp_display_info display_info
;
201 trace_virtio_gpu_cmd_get_display_info();
202 memset(&display_info
, 0, sizeof(display_info
));
203 display_info
.hdr
.type
= VIRTIO_GPU_RESP_OK_DISPLAY_INFO
;
204 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g
), &display_info
);
205 virtio_gpu_ctrl_response(g
, cmd
, &display_info
.hdr
,
206 sizeof(display_info
));
210 virtio_gpu_generate_edid(VirtIOGPU
*g
, int scanout
,
211 struct virtio_gpu_resp_edid
*edid
)
213 VirtIOGPUBase
*b
= VIRTIO_GPU_BASE(g
);
214 qemu_edid_info info
= {
215 .width_mm
= b
->req_state
[scanout
].width_mm
,
216 .height_mm
= b
->req_state
[scanout
].height_mm
,
217 .prefx
= b
->req_state
[scanout
].width
,
218 .prefy
= b
->req_state
[scanout
].height
,
221 edid
->size
= cpu_to_le32(sizeof(edid
->edid
));
222 qemu_edid_generate(edid
->edid
, sizeof(edid
->edid
), &info
);
225 void virtio_gpu_get_edid(VirtIOGPU
*g
,
226 struct virtio_gpu_ctrl_command
*cmd
)
228 struct virtio_gpu_resp_edid edid
;
229 struct virtio_gpu_cmd_get_edid get_edid
;
230 VirtIOGPUBase
*b
= VIRTIO_GPU_BASE(g
);
232 VIRTIO_GPU_FILL_CMD(get_edid
);
233 virtio_gpu_bswap_32(&get_edid
, sizeof(get_edid
));
235 if (get_edid
.scanout
>= b
->conf
.max_outputs
) {
236 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
240 trace_virtio_gpu_cmd_get_edid(get_edid
.scanout
);
241 memset(&edid
, 0, sizeof(edid
));
242 edid
.hdr
.type
= VIRTIO_GPU_RESP_OK_EDID
;
243 virtio_gpu_generate_edid(g
, get_edid
.scanout
, &edid
);
244 virtio_gpu_ctrl_response(g
, cmd
, &edid
.hdr
, sizeof(edid
));
247 static uint32_t calc_image_hostmem(pixman_format_code_t pformat
,
248 uint32_t width
, uint32_t height
)
250 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check.
251 * pixman_image_create_bits will fail in case it overflow.
254 int bpp
= PIXMAN_FORMAT_BPP(pformat
);
255 int stride
= ((width
* bpp
+ 0x1f) >> 5) * sizeof(uint32_t);
256 return height
* stride
;
259 static void virtio_gpu_resource_create_2d(VirtIOGPU
*g
,
260 struct virtio_gpu_ctrl_command
*cmd
)
262 pixman_format_code_t pformat
;
263 struct virtio_gpu_simple_resource
*res
;
264 struct virtio_gpu_resource_create_2d c2d
;
266 VIRTIO_GPU_FILL_CMD(c2d
);
267 virtio_gpu_bswap_32(&c2d
, sizeof(c2d
));
268 trace_virtio_gpu_cmd_res_create_2d(c2d
.resource_id
, c2d
.format
,
269 c2d
.width
, c2d
.height
);
271 if (c2d
.resource_id
== 0) {
272 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource id 0 is not allowed\n",
274 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
278 res
= virtio_gpu_find_resource(g
, c2d
.resource_id
);
280 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource already exists %d\n",
281 __func__
, c2d
.resource_id
);
282 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
286 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
288 res
->width
= c2d
.width
;
289 res
->height
= c2d
.height
;
290 res
->format
= c2d
.format
;
291 res
->resource_id
= c2d
.resource_id
;
293 pformat
= virtio_gpu_get_pixman_format(c2d
.format
);
295 qemu_log_mask(LOG_GUEST_ERROR
,
296 "%s: host couldn't handle guest format %d\n",
297 __func__
, c2d
.format
);
299 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
303 res
->hostmem
= calc_image_hostmem(pformat
, c2d
.width
, c2d
.height
);
304 if (res
->hostmem
+ g
->hostmem
< g
->conf_max_hostmem
) {
305 res
->image
= pixman_image_create_bits(pformat
,
312 qemu_log_mask(LOG_GUEST_ERROR
,
313 "%s: resource creation failed %d %d %d\n",
314 __func__
, c2d
.resource_id
, c2d
.width
, c2d
.height
);
316 cmd
->error
= VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY
;
320 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
321 g
->hostmem
+= res
->hostmem
;
324 static void virtio_gpu_disable_scanout(VirtIOGPU
*g
, int scanout_id
)
326 struct virtio_gpu_scanout
*scanout
= &g
->parent_obj
.scanout
[scanout_id
];
327 struct virtio_gpu_simple_resource
*res
;
329 if (scanout
->resource_id
== 0) {
333 res
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
335 res
->scanout_bitmask
&= ~(1 << scanout_id
);
338 dpy_gfx_replace_surface(scanout
->con
, NULL
);
339 scanout
->resource_id
= 0;
345 static void virtio_gpu_resource_destroy(VirtIOGPU
*g
,
346 struct virtio_gpu_simple_resource
*res
)
350 if (res
->scanout_bitmask
) {
351 for (i
= 0; i
< g
->parent_obj
.conf
.max_outputs
; i
++) {
352 if (res
->scanout_bitmask
& (1 << i
)) {
353 virtio_gpu_disable_scanout(g
, i
);
358 pixman_image_unref(res
->image
);
359 virtio_gpu_cleanup_mapping(g
, res
);
360 QTAILQ_REMOVE(&g
->reslist
, res
, next
);
361 g
->hostmem
-= res
->hostmem
;
365 static void virtio_gpu_resource_unref(VirtIOGPU
*g
,
366 struct virtio_gpu_ctrl_command
*cmd
)
368 struct virtio_gpu_simple_resource
*res
;
369 struct virtio_gpu_resource_unref unref
;
371 VIRTIO_GPU_FILL_CMD(unref
);
372 virtio_gpu_bswap_32(&unref
, sizeof(unref
));
373 trace_virtio_gpu_cmd_res_unref(unref
.resource_id
);
375 res
= virtio_gpu_find_resource(g
, unref
.resource_id
);
377 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
378 __func__
, unref
.resource_id
);
379 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
382 virtio_gpu_resource_destroy(g
, res
);
385 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU
*g
,
386 struct virtio_gpu_ctrl_command
*cmd
)
388 struct virtio_gpu_simple_resource
*res
;
390 uint32_t src_offset
, dst_offset
, stride
;
392 pixman_format_code_t format
;
393 struct virtio_gpu_transfer_to_host_2d t2d
;
395 VIRTIO_GPU_FILL_CMD(t2d
);
396 virtio_gpu_t2d_bswap(&t2d
);
397 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d
.resource_id
);
399 res
= virtio_gpu_find_resource(g
, t2d
.resource_id
);
400 if (!res
|| !res
->iov
) {
401 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
402 __func__
, t2d
.resource_id
);
403 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
407 if (t2d
.r
.x
> res
->width
||
408 t2d
.r
.y
> res
->height
||
409 t2d
.r
.width
> res
->width
||
410 t2d
.r
.height
> res
->height
||
411 t2d
.r
.x
+ t2d
.r
.width
> res
->width
||
412 t2d
.r
.y
+ t2d
.r
.height
> res
->height
) {
413 qemu_log_mask(LOG_GUEST_ERROR
, "%s: transfer bounds outside resource"
414 " bounds for resource %d: %d %d %d %d vs %d %d\n",
415 __func__
, t2d
.resource_id
, t2d
.r
.x
, t2d
.r
.y
,
416 t2d
.r
.width
, t2d
.r
.height
, res
->width
, res
->height
);
417 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
421 format
= pixman_image_get_format(res
->image
);
422 bpp
= DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format
), 8);
423 stride
= pixman_image_get_stride(res
->image
);
425 if (t2d
.offset
|| t2d
.r
.x
|| t2d
.r
.y
||
426 t2d
.r
.width
!= pixman_image_get_width(res
->image
)) {
427 void *img_data
= pixman_image_get_data(res
->image
);
428 for (h
= 0; h
< t2d
.r
.height
; h
++) {
429 src_offset
= t2d
.offset
+ stride
* h
;
430 dst_offset
= (t2d
.r
.y
+ h
) * stride
+ (t2d
.r
.x
* bpp
);
432 iov_to_buf(res
->iov
, res
->iov_cnt
, src_offset
,
434 + dst_offset
, t2d
.r
.width
* bpp
);
437 iov_to_buf(res
->iov
, res
->iov_cnt
, 0,
438 pixman_image_get_data(res
->image
),
439 pixman_image_get_stride(res
->image
)
440 * pixman_image_get_height(res
->image
));
444 static void virtio_gpu_resource_flush(VirtIOGPU
*g
,
445 struct virtio_gpu_ctrl_command
*cmd
)
447 struct virtio_gpu_simple_resource
*res
;
448 struct virtio_gpu_resource_flush rf
;
449 pixman_region16_t flush_region
;
452 VIRTIO_GPU_FILL_CMD(rf
);
453 virtio_gpu_bswap_32(&rf
, sizeof(rf
));
454 trace_virtio_gpu_cmd_res_flush(rf
.resource_id
,
455 rf
.r
.width
, rf
.r
.height
, rf
.r
.x
, rf
.r
.y
);
457 res
= virtio_gpu_find_resource(g
, rf
.resource_id
);
459 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
460 __func__
, rf
.resource_id
);
461 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
465 if (rf
.r
.x
> res
->width
||
466 rf
.r
.y
> res
->height
||
467 rf
.r
.width
> res
->width
||
468 rf
.r
.height
> res
->height
||
469 rf
.r
.x
+ rf
.r
.width
> res
->width
||
470 rf
.r
.y
+ rf
.r
.height
> res
->height
) {
471 qemu_log_mask(LOG_GUEST_ERROR
, "%s: flush bounds outside resource"
472 " bounds for resource %d: %d %d %d %d vs %d %d\n",
473 __func__
, rf
.resource_id
, rf
.r
.x
, rf
.r
.y
,
474 rf
.r
.width
, rf
.r
.height
, res
->width
, res
->height
);
475 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
479 pixman_region_init_rect(&flush_region
,
480 rf
.r
.x
, rf
.r
.y
, rf
.r
.width
, rf
.r
.height
);
481 for (i
= 0; i
< g
->parent_obj
.conf
.max_outputs
; i
++) {
482 struct virtio_gpu_scanout
*scanout
;
483 pixman_region16_t region
, finalregion
;
484 pixman_box16_t
*extents
;
486 if (!(res
->scanout_bitmask
& (1 << i
))) {
489 scanout
= &g
->parent_obj
.scanout
[i
];
491 pixman_region_init(&finalregion
);
492 pixman_region_init_rect(®ion
, scanout
->x
, scanout
->y
,
493 scanout
->width
, scanout
->height
);
495 pixman_region_intersect(&finalregion
, &flush_region
, ®ion
);
496 pixman_region_translate(&finalregion
, -scanout
->x
, -scanout
->y
);
497 extents
= pixman_region_extents(&finalregion
);
498 /* work out the area we need to update for each console */
499 dpy_gfx_update(g
->parent_obj
.scanout
[i
].con
,
500 extents
->x1
, extents
->y1
,
501 extents
->x2
- extents
->x1
,
502 extents
->y2
- extents
->y1
);
504 pixman_region_fini(®ion
);
505 pixman_region_fini(&finalregion
);
507 pixman_region_fini(&flush_region
);
510 static void virtio_unref_resource(pixman_image_t
*image
, void *data
)
512 pixman_image_unref(data
);
515 static void virtio_gpu_set_scanout(VirtIOGPU
*g
,
516 struct virtio_gpu_ctrl_command
*cmd
)
518 struct virtio_gpu_simple_resource
*res
, *ores
;
519 struct virtio_gpu_scanout
*scanout
;
520 pixman_format_code_t format
;
523 struct virtio_gpu_set_scanout ss
;
525 VIRTIO_GPU_FILL_CMD(ss
);
526 virtio_gpu_bswap_32(&ss
, sizeof(ss
));
527 trace_virtio_gpu_cmd_set_scanout(ss
.scanout_id
, ss
.resource_id
,
528 ss
.r
.width
, ss
.r
.height
, ss
.r
.x
, ss
.r
.y
);
530 if (ss
.scanout_id
>= g
->parent_obj
.conf
.max_outputs
) {
531 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout id specified %d",
532 __func__
, ss
.scanout_id
);
533 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
537 g
->parent_obj
.enable
= 1;
538 if (ss
.resource_id
== 0) {
539 virtio_gpu_disable_scanout(g
, ss
.scanout_id
);
543 /* create a surface for this scanout */
544 res
= virtio_gpu_find_resource(g
, ss
.resource_id
);
546 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
547 __func__
, ss
.resource_id
);
548 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
552 if (ss
.r
.x
> res
->width
||
553 ss
.r
.y
> res
->height
||
556 ss
.r
.width
> res
->width
||
557 ss
.r
.height
> res
->height
||
558 ss
.r
.x
+ ss
.r
.width
> res
->width
||
559 ss
.r
.y
+ ss
.r
.height
> res
->height
) {
560 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout %d bounds for"
561 " resource %d, (%d,%d)+%d,%d vs %d %d\n",
562 __func__
, ss
.scanout_id
, ss
.resource_id
, ss
.r
.x
, ss
.r
.y
,
563 ss
.r
.width
, ss
.r
.height
, res
->width
, res
->height
);
564 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
568 scanout
= &g
->parent_obj
.scanout
[ss
.scanout_id
];
570 format
= pixman_image_get_format(res
->image
);
571 bpp
= DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format
), 8);
572 offset
= (ss
.r
.x
* bpp
) + ss
.r
.y
* pixman_image_get_stride(res
->image
);
573 if (!scanout
->ds
|| surface_data(scanout
->ds
)
574 != ((uint8_t *)pixman_image_get_data(res
->image
) + offset
) ||
575 scanout
->width
!= ss
.r
.width
||
576 scanout
->height
!= ss
.r
.height
) {
577 pixman_image_t
*rect
;
578 void *ptr
= (uint8_t *)pixman_image_get_data(res
->image
) + offset
;
579 rect
= pixman_image_create_bits(format
, ss
.r
.width
, ss
.r
.height
, ptr
,
580 pixman_image_get_stride(res
->image
));
581 pixman_image_ref(res
->image
);
582 pixman_image_set_destroy_function(rect
, virtio_unref_resource
,
584 /* realloc the surface ptr */
585 scanout
->ds
= qemu_create_displaysurface_pixman(rect
);
587 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
590 pixman_image_unref(rect
);
591 dpy_gfx_replace_surface(g
->parent_obj
.scanout
[ss
.scanout_id
].con
,
595 ores
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
597 ores
->scanout_bitmask
&= ~(1 << ss
.scanout_id
);
600 res
->scanout_bitmask
|= (1 << ss
.scanout_id
);
601 scanout
->resource_id
= ss
.resource_id
;
604 scanout
->width
= ss
.r
.width
;
605 scanout
->height
= ss
.r
.height
;
608 int virtio_gpu_create_mapping_iov(VirtIOGPU
*g
,
609 struct virtio_gpu_resource_attach_backing
*ab
,
610 struct virtio_gpu_ctrl_command
*cmd
,
611 uint64_t **addr
, struct iovec
**iov
)
613 struct virtio_gpu_mem_entry
*ents
;
617 if (ab
->nr_entries
> 16384) {
618 qemu_log_mask(LOG_GUEST_ERROR
,
619 "%s: nr_entries is too big (%d > 16384)\n",
620 __func__
, ab
->nr_entries
);
624 esize
= sizeof(*ents
) * ab
->nr_entries
;
625 ents
= g_malloc(esize
);
626 s
= iov_to_buf(cmd
->elem
.out_sg
, cmd
->elem
.out_num
,
627 sizeof(*ab
), ents
, esize
);
629 qemu_log_mask(LOG_GUEST_ERROR
,
630 "%s: command data size incorrect %zu vs %zu\n",
636 *iov
= g_malloc0(sizeof(struct iovec
) * ab
->nr_entries
);
638 *addr
= g_malloc0(sizeof(uint64_t) * ab
->nr_entries
);
640 for (i
= 0; i
< ab
->nr_entries
; i
++) {
641 uint64_t a
= le64_to_cpu(ents
[i
].addr
);
642 uint32_t l
= le32_to_cpu(ents
[i
].length
);
644 (*iov
)[i
].iov_base
= dma_memory_map(VIRTIO_DEVICE(g
)->dma_as
,
645 a
, &len
, DMA_DIRECTION_TO_DEVICE
);
646 (*iov
)[i
].iov_len
= len
;
650 if (!(*iov
)[i
].iov_base
|| len
!= l
) {
651 qemu_log_mask(LOG_GUEST_ERROR
, "%s: failed to map MMIO memory for"
652 " resource %d element %d\n",
653 __func__
, ab
->resource_id
, i
);
654 if ((*iov
)[i
].iov_base
) {
655 i
++; /* cleanup the 'i'th map */
657 virtio_gpu_cleanup_mapping_iov(g
, *iov
, i
);
671 void virtio_gpu_cleanup_mapping_iov(VirtIOGPU
*g
,
672 struct iovec
*iov
, uint32_t count
)
676 for (i
= 0; i
< count
; i
++) {
677 dma_memory_unmap(VIRTIO_DEVICE(g
)->dma_as
,
678 iov
[i
].iov_base
, iov
[i
].iov_len
,
679 DMA_DIRECTION_TO_DEVICE
,
685 static void virtio_gpu_cleanup_mapping(VirtIOGPU
*g
,
686 struct virtio_gpu_simple_resource
*res
)
688 virtio_gpu_cleanup_mapping_iov(g
, res
->iov
, res
->iov_cnt
);
696 virtio_gpu_resource_attach_backing(VirtIOGPU
*g
,
697 struct virtio_gpu_ctrl_command
*cmd
)
699 struct virtio_gpu_simple_resource
*res
;
700 struct virtio_gpu_resource_attach_backing ab
;
703 VIRTIO_GPU_FILL_CMD(ab
);
704 virtio_gpu_bswap_32(&ab
, sizeof(ab
));
705 trace_virtio_gpu_cmd_res_back_attach(ab
.resource_id
);
707 res
= virtio_gpu_find_resource(g
, ab
.resource_id
);
709 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
710 __func__
, ab
.resource_id
);
711 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
716 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
720 ret
= virtio_gpu_create_mapping_iov(g
, &ab
, cmd
, &res
->addrs
, &res
->iov
);
722 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
726 res
->iov_cnt
= ab
.nr_entries
;
730 virtio_gpu_resource_detach_backing(VirtIOGPU
*g
,
731 struct virtio_gpu_ctrl_command
*cmd
)
733 struct virtio_gpu_simple_resource
*res
;
734 struct virtio_gpu_resource_detach_backing detach
;
736 VIRTIO_GPU_FILL_CMD(detach
);
737 virtio_gpu_bswap_32(&detach
, sizeof(detach
));
738 trace_virtio_gpu_cmd_res_back_detach(detach
.resource_id
);
740 res
= virtio_gpu_find_resource(g
, detach
.resource_id
);
741 if (!res
|| !res
->iov
) {
742 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
743 __func__
, detach
.resource_id
);
744 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
747 virtio_gpu_cleanup_mapping(g
, res
);
750 static void virtio_gpu_simple_process_cmd(VirtIOGPU
*g
,
751 struct virtio_gpu_ctrl_command
*cmd
)
753 VIRTIO_GPU_FILL_CMD(cmd
->cmd_hdr
);
754 virtio_gpu_ctrl_hdr_bswap(&cmd
->cmd_hdr
);
756 switch (cmd
->cmd_hdr
.type
) {
757 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO
:
758 virtio_gpu_get_display_info(g
, cmd
);
760 case VIRTIO_GPU_CMD_GET_EDID
:
761 virtio_gpu_get_edid(g
, cmd
);
763 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
:
764 virtio_gpu_resource_create_2d(g
, cmd
);
766 case VIRTIO_GPU_CMD_RESOURCE_UNREF
:
767 virtio_gpu_resource_unref(g
, cmd
);
769 case VIRTIO_GPU_CMD_RESOURCE_FLUSH
:
770 virtio_gpu_resource_flush(g
, cmd
);
772 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
:
773 virtio_gpu_transfer_to_host_2d(g
, cmd
);
775 case VIRTIO_GPU_CMD_SET_SCANOUT
:
776 virtio_gpu_set_scanout(g
, cmd
);
778 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
:
779 virtio_gpu_resource_attach_backing(g
, cmd
);
781 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
:
782 virtio_gpu_resource_detach_backing(g
, cmd
);
785 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
788 if (!cmd
->finished
) {
789 virtio_gpu_ctrl_response_nodata(g
, cmd
, cmd
->error
? cmd
->error
:
790 VIRTIO_GPU_RESP_OK_NODATA
);
794 static void virtio_gpu_handle_ctrl_cb(VirtIODevice
*vdev
, VirtQueue
*vq
)
796 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
797 qemu_bh_schedule(g
->ctrl_bh
);
800 static void virtio_gpu_handle_cursor_cb(VirtIODevice
*vdev
, VirtQueue
*vq
)
802 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
803 qemu_bh_schedule(g
->cursor_bh
);
806 void virtio_gpu_process_cmdq(VirtIOGPU
*g
)
808 struct virtio_gpu_ctrl_command
*cmd
;
810 if (g
->processing_cmdq
) {
813 g
->processing_cmdq
= true;
814 while (!QTAILQ_EMPTY(&g
->cmdq
)) {
815 cmd
= QTAILQ_FIRST(&g
->cmdq
);
817 if (g
->parent_obj
.renderer_blocked
) {
821 /* process command */
822 VIRGL(g
, virtio_gpu_virgl_process_cmd
, virtio_gpu_simple_process_cmd
,
825 QTAILQ_REMOVE(&g
->cmdq
, cmd
, next
);
826 if (virtio_gpu_stats_enabled(g
->parent_obj
.conf
)) {
830 if (!cmd
->finished
) {
831 QTAILQ_INSERT_TAIL(&g
->fenceq
, cmd
, next
);
833 if (virtio_gpu_stats_enabled(g
->parent_obj
.conf
)) {
834 if (g
->stats
.max_inflight
< g
->inflight
) {
835 g
->stats
.max_inflight
= g
->inflight
;
837 fprintf(stderr
, "inflight: %3d (+)\r", g
->inflight
);
843 g
->processing_cmdq
= false;
846 static void virtio_gpu_gl_flushed(VirtIOGPUBase
*b
)
848 VirtIOGPU
*g
= VIRTIO_GPU(b
);
851 if (g
->renderer_reset
) {
852 g
->renderer_reset
= false;
853 virtio_gpu_virgl_reset(g
);
856 virtio_gpu_process_cmdq(g
);
859 static void virtio_gpu_handle_ctrl(VirtIODevice
*vdev
, VirtQueue
*vq
)
861 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
862 struct virtio_gpu_ctrl_command
*cmd
;
864 if (!virtio_queue_ready(vq
)) {
869 if (!g
->renderer_inited
&& g
->parent_obj
.use_virgl_renderer
) {
870 virtio_gpu_virgl_init(g
);
871 g
->renderer_inited
= true;
875 cmd
= virtqueue_pop(vq
, sizeof(struct virtio_gpu_ctrl_command
));
879 cmd
->finished
= false;
880 QTAILQ_INSERT_TAIL(&g
->cmdq
, cmd
, next
);
881 cmd
= virtqueue_pop(vq
, sizeof(struct virtio_gpu_ctrl_command
));
884 virtio_gpu_process_cmdq(g
);
887 if (g
->parent_obj
.use_virgl_renderer
) {
888 virtio_gpu_virgl_fence_poll(g
);
893 static void virtio_gpu_ctrl_bh(void *opaque
)
895 VirtIOGPU
*g
= opaque
;
896 virtio_gpu_handle_ctrl(&g
->parent_obj
.parent_obj
, g
->ctrl_vq
);
899 static void virtio_gpu_handle_cursor(VirtIODevice
*vdev
, VirtQueue
*vq
)
901 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
902 VirtQueueElement
*elem
;
904 struct virtio_gpu_update_cursor cursor_info
;
906 if (!virtio_queue_ready(vq
)) {
910 elem
= virtqueue_pop(vq
, sizeof(VirtQueueElement
));
915 s
= iov_to_buf(elem
->out_sg
, elem
->out_num
, 0,
916 &cursor_info
, sizeof(cursor_info
));
917 if (s
!= sizeof(cursor_info
)) {
918 qemu_log_mask(LOG_GUEST_ERROR
,
919 "%s: cursor size incorrect %zu vs %zu\n",
920 __func__
, s
, sizeof(cursor_info
));
922 virtio_gpu_bswap_32(&cursor_info
, sizeof(cursor_info
));
923 update_cursor(g
, &cursor_info
);
925 virtqueue_push(vq
, elem
, 0);
926 virtio_notify(vdev
, vq
);
931 static void virtio_gpu_cursor_bh(void *opaque
)
933 VirtIOGPU
*g
= opaque
;
934 virtio_gpu_handle_cursor(&g
->parent_obj
.parent_obj
, g
->cursor_vq
);
937 static const VMStateDescription vmstate_virtio_gpu_scanout
= {
938 .name
= "virtio-gpu-one-scanout",
940 .fields
= (VMStateField
[]) {
941 VMSTATE_UINT32(resource_id
, struct virtio_gpu_scanout
),
942 VMSTATE_UINT32(width
, struct virtio_gpu_scanout
),
943 VMSTATE_UINT32(height
, struct virtio_gpu_scanout
),
944 VMSTATE_INT32(x
, struct virtio_gpu_scanout
),
945 VMSTATE_INT32(y
, struct virtio_gpu_scanout
),
946 VMSTATE_UINT32(cursor
.resource_id
, struct virtio_gpu_scanout
),
947 VMSTATE_UINT32(cursor
.hot_x
, struct virtio_gpu_scanout
),
948 VMSTATE_UINT32(cursor
.hot_y
, struct virtio_gpu_scanout
),
949 VMSTATE_UINT32(cursor
.pos
.x
, struct virtio_gpu_scanout
),
950 VMSTATE_UINT32(cursor
.pos
.y
, struct virtio_gpu_scanout
),
951 VMSTATE_END_OF_LIST()
955 static const VMStateDescription vmstate_virtio_gpu_scanouts
= {
956 .name
= "virtio-gpu-scanouts",
958 .fields
= (VMStateField
[]) {
959 VMSTATE_INT32(parent_obj
.enable
, struct VirtIOGPU
),
960 VMSTATE_UINT32_EQUAL(parent_obj
.conf
.max_outputs
,
961 struct VirtIOGPU
, NULL
),
962 VMSTATE_STRUCT_VARRAY_UINT32(parent_obj
.scanout
, struct VirtIOGPU
,
963 parent_obj
.conf
.max_outputs
, 1,
964 vmstate_virtio_gpu_scanout
,
965 struct virtio_gpu_scanout
),
966 VMSTATE_END_OF_LIST()
970 static int virtio_gpu_save(QEMUFile
*f
, void *opaque
, size_t size
,
971 const VMStateField
*field
, JSONWriter
*vmdesc
)
973 VirtIOGPU
*g
= opaque
;
974 struct virtio_gpu_simple_resource
*res
;
977 /* in 2d mode we should never find unprocessed commands here */
978 assert(QTAILQ_EMPTY(&g
->cmdq
));
980 QTAILQ_FOREACH(res
, &g
->reslist
, next
) {
981 qemu_put_be32(f
, res
->resource_id
);
982 qemu_put_be32(f
, res
->width
);
983 qemu_put_be32(f
, res
->height
);
984 qemu_put_be32(f
, res
->format
);
985 qemu_put_be32(f
, res
->iov_cnt
);
986 for (i
= 0; i
< res
->iov_cnt
; i
++) {
987 qemu_put_be64(f
, res
->addrs
[i
]);
988 qemu_put_be32(f
, res
->iov
[i
].iov_len
);
990 qemu_put_buffer(f
, (void *)pixman_image_get_data(res
->image
),
991 pixman_image_get_stride(res
->image
) * res
->height
);
993 qemu_put_be32(f
, 0); /* end of list */
995 return vmstate_save_state(f
, &vmstate_virtio_gpu_scanouts
, g
, NULL
);
998 static int virtio_gpu_load(QEMUFile
*f
, void *opaque
, size_t size
,
999 const VMStateField
*field
)
1001 VirtIOGPU
*g
= opaque
;
1002 struct virtio_gpu_simple_resource
*res
;
1003 struct virtio_gpu_scanout
*scanout
;
1004 uint32_t resource_id
, pformat
;
1009 resource_id
= qemu_get_be32(f
);
1010 while (resource_id
!= 0) {
1011 res
= virtio_gpu_find_resource(g
, resource_id
);
1016 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
1017 res
->resource_id
= resource_id
;
1018 res
->width
= qemu_get_be32(f
);
1019 res
->height
= qemu_get_be32(f
);
1020 res
->format
= qemu_get_be32(f
);
1021 res
->iov_cnt
= qemu_get_be32(f
);
1024 pformat
= virtio_gpu_get_pixman_format(res
->format
);
1029 res
->image
= pixman_image_create_bits(pformat
,
1030 res
->width
, res
->height
,
1037 res
->hostmem
= calc_image_hostmem(pformat
, res
->width
, res
->height
);
1039 res
->addrs
= g_new(uint64_t, res
->iov_cnt
);
1040 res
->iov
= g_new(struct iovec
, res
->iov_cnt
);
1043 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1044 res
->addrs
[i
] = qemu_get_be64(f
);
1045 res
->iov
[i
].iov_len
= qemu_get_be32(f
);
1047 qemu_get_buffer(f
, (void *)pixman_image_get_data(res
->image
),
1048 pixman_image_get_stride(res
->image
) * res
->height
);
1050 /* restore mapping */
1051 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1052 hwaddr len
= res
->iov
[i
].iov_len
;
1053 res
->iov
[i
].iov_base
=
1054 dma_memory_map(VIRTIO_DEVICE(g
)->dma_as
,
1055 res
->addrs
[i
], &len
, DMA_DIRECTION_TO_DEVICE
);
1057 if (!res
->iov
[i
].iov_base
|| len
!= res
->iov
[i
].iov_len
) {
1058 /* Clean up the half-a-mapping we just created... */
1059 if (res
->iov
[i
].iov_base
) {
1060 dma_memory_unmap(VIRTIO_DEVICE(g
)->dma_as
,
1061 res
->iov
[i
].iov_base
,
1063 DMA_DIRECTION_TO_DEVICE
,
1066 /* ...and the mappings for previous loop iterations */
1068 virtio_gpu_cleanup_mapping(g
, res
);
1069 pixman_image_unref(res
->image
);
1075 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
1076 g
->hostmem
+= res
->hostmem
;
1078 resource_id
= qemu_get_be32(f
);
1081 /* load & apply scanout state */
1082 vmstate_load_state(f
, &vmstate_virtio_gpu_scanouts
, g
, 1);
1083 for (i
= 0; i
< g
->parent_obj
.conf
.max_outputs
; i
++) {
1084 scanout
= &g
->parent_obj
.scanout
[i
];
1085 if (!scanout
->resource_id
) {
1088 res
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
1092 scanout
->ds
= qemu_create_displaysurface_pixman(res
->image
);
1097 dpy_gfx_replace_surface(scanout
->con
, scanout
->ds
);
1098 dpy_gfx_update_full(scanout
->con
);
1099 if (scanout
->cursor
.resource_id
) {
1100 update_cursor(g
, &scanout
->cursor
);
1102 res
->scanout_bitmask
|= (1 << i
);
1108 static void virtio_gpu_device_realize(DeviceState
*qdev
, Error
**errp
)
1110 VirtIODevice
*vdev
= VIRTIO_DEVICE(qdev
);
1111 VirtIOGPU
*g
= VIRTIO_GPU(qdev
);
1114 #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN)
1117 have_virgl
= display_opengl
;
1120 g
->parent_obj
.conf
.flags
&= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED
);
1122 #if defined(CONFIG_VIRGL)
1123 VIRTIO_GPU_BASE(g
)->virtio_config
.num_capsets
=
1124 virtio_gpu_virgl_get_num_capsets(g
);
1128 if (!virtio_gpu_base_device_realize(qdev
,
1129 virtio_gpu_handle_ctrl_cb
,
1130 virtio_gpu_handle_cursor_cb
,
1135 g
->ctrl_vq
= virtio_get_queue(vdev
, 0);
1136 g
->cursor_vq
= virtio_get_queue(vdev
, 1);
1137 g
->ctrl_bh
= qemu_bh_new(virtio_gpu_ctrl_bh
, g
);
1138 g
->cursor_bh
= qemu_bh_new(virtio_gpu_cursor_bh
, g
);
1139 QTAILQ_INIT(&g
->reslist
);
1140 QTAILQ_INIT(&g
->cmdq
);
1141 QTAILQ_INIT(&g
->fenceq
);
1144 static void virtio_gpu_reset(VirtIODevice
*vdev
)
1146 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
1147 struct virtio_gpu_simple_resource
*res
, *tmp
;
1148 struct virtio_gpu_ctrl_command
*cmd
;
1151 if (g
->parent_obj
.use_virgl_renderer
) {
1152 virtio_gpu_virgl_reset(g
);
1156 QTAILQ_FOREACH_SAFE(res
, &g
->reslist
, next
, tmp
) {
1157 virtio_gpu_resource_destroy(g
, res
);
1160 while (!QTAILQ_EMPTY(&g
->cmdq
)) {
1161 cmd
= QTAILQ_FIRST(&g
->cmdq
);
1162 QTAILQ_REMOVE(&g
->cmdq
, cmd
, next
);
1166 while (!QTAILQ_EMPTY(&g
->fenceq
)) {
1167 cmd
= QTAILQ_FIRST(&g
->fenceq
);
1168 QTAILQ_REMOVE(&g
->fenceq
, cmd
, next
);
1174 if (g
->parent_obj
.use_virgl_renderer
) {
1175 if (g
->parent_obj
.renderer_blocked
) {
1176 g
->renderer_reset
= true;
1178 virtio_gpu_virgl_reset(g
);
1180 g
->parent_obj
.use_virgl_renderer
= false;
1184 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev
));
1188 virtio_gpu_get_config(VirtIODevice
*vdev
, uint8_t *config
)
1190 VirtIOGPUBase
*g
= VIRTIO_GPU_BASE(vdev
);
1192 memcpy(config
, &g
->virtio_config
, sizeof(g
->virtio_config
));
1196 virtio_gpu_set_config(VirtIODevice
*vdev
, const uint8_t *config
)
1198 VirtIOGPUBase
*g
= VIRTIO_GPU_BASE(vdev
);
1199 const struct virtio_gpu_config
*vgconfig
=
1200 (const struct virtio_gpu_config
*)config
;
1202 if (vgconfig
->events_clear
) {
1203 g
->virtio_config
.events_read
&= ~vgconfig
->events_clear
;
1208 * For historical reasons virtio_gpu does not adhere to virtio migration
1209 * scheme as described in doc/virtio-migration.txt, in a sense that no
1210 * save/load callback are provided to the core. Instead the device data
1211 * is saved/loaded after the core data.
1213 * Because of this we need a special vmsd.
1215 static const VMStateDescription vmstate_virtio_gpu
= {
1216 .name
= "virtio-gpu",
1217 .minimum_version_id
= VIRTIO_GPU_VM_VERSION
,
1218 .version_id
= VIRTIO_GPU_VM_VERSION
,
1219 .fields
= (VMStateField
[]) {
1220 VMSTATE_VIRTIO_DEVICE
/* core */,
1222 .name
= "virtio-gpu",
1223 .info
= &(const VMStateInfo
) {
1224 .name
= "virtio-gpu",
1225 .get
= virtio_gpu_load
,
1226 .put
= virtio_gpu_save
,
1228 .flags
= VMS_SINGLE
,
1230 VMSTATE_END_OF_LIST()
1234 static Property virtio_gpu_properties
[] = {
1235 VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU
, parent_obj
.conf
),
1236 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU
, conf_max_hostmem
,
1239 DEFINE_PROP_BIT("virgl", VirtIOGPU
, parent_obj
.conf
.flags
,
1240 VIRTIO_GPU_FLAG_VIRGL_ENABLED
, true),
1241 DEFINE_PROP_BIT("stats", VirtIOGPU
, parent_obj
.conf
.flags
,
1242 VIRTIO_GPU_FLAG_STATS_ENABLED
, false),
1244 DEFINE_PROP_END_OF_LIST(),
1247 static void virtio_gpu_class_init(ObjectClass
*klass
, void *data
)
1249 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1250 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
1251 VirtIOGPUBaseClass
*vgc
= VIRTIO_GPU_BASE_CLASS(klass
);
1253 vgc
->gl_flushed
= virtio_gpu_gl_flushed
;
1254 vdc
->realize
= virtio_gpu_device_realize
;
1255 vdc
->reset
= virtio_gpu_reset
;
1256 vdc
->get_config
= virtio_gpu_get_config
;
1257 vdc
->set_config
= virtio_gpu_set_config
;
1259 dc
->vmsd
= &vmstate_virtio_gpu
;
1260 device_class_set_props(dc
, virtio_gpu_properties
);
1263 static const TypeInfo virtio_gpu_info
= {
1264 .name
= TYPE_VIRTIO_GPU
,
1265 .parent
= TYPE_VIRTIO_GPU_BASE
,
1266 .instance_size
= sizeof(VirtIOGPU
),
1267 .class_init
= virtio_gpu_class_init
,
1270 static void virtio_register_types(void)
1272 type_register_static(&virtio_gpu_info
);
1275 type_init(virtio_register_types
)