4 * Copyright Red Hat, Inc. 2013-2014
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/units.h"
17 #include "ui/console.h"
19 #include "sysemu/dma.h"
20 #include "sysemu/sysemu.h"
21 #include "hw/virtio/virtio.h"
22 #include "migration/qemu-file-types.h"
23 #include "hw/virtio/virtio-gpu.h"
24 #include "hw/virtio/virtio-gpu-bswap.h"
25 #include "hw/virtio/virtio-gpu-pixman.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "hw/display/edid.h"
28 #include "hw/qdev-properties.h"
30 #include "qemu/module.h"
31 #include "qapi/error.h"
32 #include "qemu/error-report.h"
34 #define VIRTIO_GPU_VM_VERSION 1
36 static struct virtio_gpu_simple_resource
*
37 virtio_gpu_find_resource(VirtIOGPU
*g
, uint32_t resource_id
);
39 static void virtio_gpu_cleanup_mapping(VirtIOGPU
*g
,
40 struct virtio_gpu_simple_resource
*res
);
43 #include <virglrenderer.h>
44 #define VIRGL(_g, _virgl, _simple, ...) \
46 if (_g->parent_obj.use_virgl_renderer) { \
47 _virgl(__VA_ARGS__); \
49 _simple(__VA_ARGS__); \
53 #define VIRGL(_g, _virgl, _simple, ...) \
55 _simple(__VA_ARGS__); \
59 static void update_cursor_data_simple(VirtIOGPU
*g
,
60 struct virtio_gpu_scanout
*s
,
63 struct virtio_gpu_simple_resource
*res
;
66 res
= virtio_gpu_find_resource(g
, resource_id
);
71 if (pixman_image_get_width(res
->image
) != s
->current_cursor
->width
||
72 pixman_image_get_height(res
->image
) != s
->current_cursor
->height
) {
76 pixels
= s
->current_cursor
->width
* s
->current_cursor
->height
;
77 memcpy(s
->current_cursor
->data
,
78 pixman_image_get_data(res
->image
),
79 pixels
* sizeof(uint32_t));
84 static void update_cursor_data_virgl(VirtIOGPU
*g
,
85 struct virtio_gpu_scanout
*s
,
88 uint32_t width
, height
;
89 uint32_t pixels
, *data
;
91 data
= virgl_renderer_get_cursor_data(resource_id
, &width
, &height
);
96 if (width
!= s
->current_cursor
->width
||
97 height
!= s
->current_cursor
->height
) {
102 pixels
= s
->current_cursor
->width
* s
->current_cursor
->height
;
103 memcpy(s
->current_cursor
->data
, data
, pixels
* sizeof(uint32_t));
109 static void update_cursor(VirtIOGPU
*g
, struct virtio_gpu_update_cursor
*cursor
)
111 struct virtio_gpu_scanout
*s
;
112 bool move
= cursor
->hdr
.type
== VIRTIO_GPU_CMD_MOVE_CURSOR
;
114 if (cursor
->pos
.scanout_id
>= g
->parent_obj
.conf
.max_outputs
) {
117 s
= &g
->parent_obj
.scanout
[cursor
->pos
.scanout_id
];
119 trace_virtio_gpu_update_cursor(cursor
->pos
.scanout_id
,
122 move
? "move" : "update",
123 cursor
->resource_id
);
126 if (!s
->current_cursor
) {
127 s
->current_cursor
= cursor_alloc(64, 64);
130 s
->current_cursor
->hot_x
= cursor
->hot_x
;
131 s
->current_cursor
->hot_y
= cursor
->hot_y
;
133 if (cursor
->resource_id
> 0) {
134 VIRGL(g
, update_cursor_data_virgl
, update_cursor_data_simple
,
135 g
, s
, cursor
->resource_id
);
137 dpy_cursor_define(s
->con
, s
->current_cursor
);
141 s
->cursor
.pos
.x
= cursor
->pos
.x
;
142 s
->cursor
.pos
.y
= cursor
->pos
.y
;
144 dpy_mouse_set(s
->con
, cursor
->pos
.x
, cursor
->pos
.y
,
145 cursor
->resource_id
? 1 : 0);
148 static struct virtio_gpu_simple_resource
*
149 virtio_gpu_find_resource(VirtIOGPU
*g
, uint32_t resource_id
)
151 struct virtio_gpu_simple_resource
*res
;
153 QTAILQ_FOREACH(res
, &g
->reslist
, next
) {
154 if (res
->resource_id
== resource_id
) {
161 void virtio_gpu_ctrl_response(VirtIOGPU
*g
,
162 struct virtio_gpu_ctrl_command
*cmd
,
163 struct virtio_gpu_ctrl_hdr
*resp
,
168 if (cmd
->cmd_hdr
.flags
& VIRTIO_GPU_FLAG_FENCE
) {
169 resp
->flags
|= VIRTIO_GPU_FLAG_FENCE
;
170 resp
->fence_id
= cmd
->cmd_hdr
.fence_id
;
171 resp
->ctx_id
= cmd
->cmd_hdr
.ctx_id
;
173 virtio_gpu_ctrl_hdr_bswap(resp
);
174 s
= iov_from_buf(cmd
->elem
.in_sg
, cmd
->elem
.in_num
, 0, resp
, resp_len
);
176 qemu_log_mask(LOG_GUEST_ERROR
,
177 "%s: response size incorrect %zu vs %zu\n",
178 __func__
, s
, resp_len
);
180 virtqueue_push(cmd
->vq
, &cmd
->elem
, s
);
181 virtio_notify(VIRTIO_DEVICE(g
), cmd
->vq
);
182 cmd
->finished
= true;
185 void virtio_gpu_ctrl_response_nodata(VirtIOGPU
*g
,
186 struct virtio_gpu_ctrl_command
*cmd
,
187 enum virtio_gpu_ctrl_type type
)
189 struct virtio_gpu_ctrl_hdr resp
;
191 memset(&resp
, 0, sizeof(resp
));
193 virtio_gpu_ctrl_response(g
, cmd
, &resp
, sizeof(resp
));
196 void virtio_gpu_get_display_info(VirtIOGPU
*g
,
197 struct virtio_gpu_ctrl_command
*cmd
)
199 struct virtio_gpu_resp_display_info display_info
;
201 trace_virtio_gpu_cmd_get_display_info();
202 memset(&display_info
, 0, sizeof(display_info
));
203 display_info
.hdr
.type
= VIRTIO_GPU_RESP_OK_DISPLAY_INFO
;
204 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g
), &display_info
);
205 virtio_gpu_ctrl_response(g
, cmd
, &display_info
.hdr
,
206 sizeof(display_info
));
210 virtio_gpu_generate_edid(VirtIOGPU
*g
, int scanout
,
211 struct virtio_gpu_resp_edid
*edid
)
213 VirtIOGPUBase
*b
= VIRTIO_GPU_BASE(g
);
214 qemu_edid_info info
= {
215 .width_mm
= b
->req_state
[scanout
].width_mm
,
216 .height_mm
= b
->req_state
[scanout
].height_mm
,
217 .prefx
= b
->req_state
[scanout
].width
,
218 .prefy
= b
->req_state
[scanout
].height
,
221 edid
->size
= cpu_to_le32(sizeof(edid
->edid
));
222 qemu_edid_generate(edid
->edid
, sizeof(edid
->edid
), &info
);
225 void virtio_gpu_get_edid(VirtIOGPU
*g
,
226 struct virtio_gpu_ctrl_command
*cmd
)
228 struct virtio_gpu_resp_edid edid
;
229 struct virtio_gpu_cmd_get_edid get_edid
;
230 VirtIOGPUBase
*b
= VIRTIO_GPU_BASE(g
);
232 VIRTIO_GPU_FILL_CMD(get_edid
);
233 virtio_gpu_bswap_32(&get_edid
, sizeof(get_edid
));
235 if (get_edid
.scanout
>= b
->conf
.max_outputs
) {
236 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
240 trace_virtio_gpu_cmd_get_edid(get_edid
.scanout
);
241 memset(&edid
, 0, sizeof(edid
));
242 edid
.hdr
.type
= VIRTIO_GPU_RESP_OK_EDID
;
243 virtio_gpu_generate_edid(g
, get_edid
.scanout
, &edid
);
244 virtio_gpu_ctrl_response(g
, cmd
, &edid
.hdr
, sizeof(edid
));
247 static uint32_t calc_image_hostmem(pixman_format_code_t pformat
,
248 uint32_t width
, uint32_t height
)
250 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check.
251 * pixman_image_create_bits will fail in case it overflow.
254 int bpp
= PIXMAN_FORMAT_BPP(pformat
);
255 int stride
= ((width
* bpp
+ 0x1f) >> 5) * sizeof(uint32_t);
256 return height
* stride
;
259 static void virtio_gpu_resource_create_2d(VirtIOGPU
*g
,
260 struct virtio_gpu_ctrl_command
*cmd
)
262 pixman_format_code_t pformat
;
263 struct virtio_gpu_simple_resource
*res
;
264 struct virtio_gpu_resource_create_2d c2d
;
266 VIRTIO_GPU_FILL_CMD(c2d
);
267 virtio_gpu_bswap_32(&c2d
, sizeof(c2d
));
268 trace_virtio_gpu_cmd_res_create_2d(c2d
.resource_id
, c2d
.format
,
269 c2d
.width
, c2d
.height
);
271 if (c2d
.resource_id
== 0) {
272 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource id 0 is not allowed\n",
274 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
278 res
= virtio_gpu_find_resource(g
, c2d
.resource_id
);
280 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource already exists %d\n",
281 __func__
, c2d
.resource_id
);
282 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
286 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
288 res
->width
= c2d
.width
;
289 res
->height
= c2d
.height
;
290 res
->format
= c2d
.format
;
291 res
->resource_id
= c2d
.resource_id
;
293 pformat
= virtio_gpu_get_pixman_format(c2d
.format
);
295 qemu_log_mask(LOG_GUEST_ERROR
,
296 "%s: host couldn't handle guest format %d\n",
297 __func__
, c2d
.format
);
299 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
303 res
->hostmem
= calc_image_hostmem(pformat
, c2d
.width
, c2d
.height
);
304 if (res
->hostmem
+ g
->hostmem
< g
->conf_max_hostmem
) {
305 res
->image
= pixman_image_create_bits(pformat
,
312 qemu_log_mask(LOG_GUEST_ERROR
,
313 "%s: resource creation failed %d %d %d\n",
314 __func__
, c2d
.resource_id
, c2d
.width
, c2d
.height
);
316 cmd
->error
= VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY
;
320 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
321 g
->hostmem
+= res
->hostmem
;
324 static void virtio_gpu_disable_scanout(VirtIOGPU
*g
, int scanout_id
)
326 struct virtio_gpu_scanout
*scanout
= &g
->parent_obj
.scanout
[scanout_id
];
327 struct virtio_gpu_simple_resource
*res
;
328 DisplaySurface
*ds
= NULL
;
330 if (scanout
->resource_id
== 0) {
334 res
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
336 res
->scanout_bitmask
&= ~(1 << scanout_id
);
339 if (scanout_id
== 0) {
341 ds
= qemu_create_message_surface(scanout
->width
?: 640,
342 scanout
->height
?: 480,
343 "Guest disabled display.");
345 dpy_gfx_replace_surface(scanout
->con
, ds
);
346 scanout
->resource_id
= 0;
352 static void virtio_gpu_resource_destroy(VirtIOGPU
*g
,
353 struct virtio_gpu_simple_resource
*res
)
357 if (res
->scanout_bitmask
) {
358 for (i
= 0; i
< g
->parent_obj
.conf
.max_outputs
; i
++) {
359 if (res
->scanout_bitmask
& (1 << i
)) {
360 virtio_gpu_disable_scanout(g
, i
);
365 pixman_image_unref(res
->image
);
366 virtio_gpu_cleanup_mapping(g
, res
);
367 QTAILQ_REMOVE(&g
->reslist
, res
, next
);
368 g
->hostmem
-= res
->hostmem
;
372 static void virtio_gpu_resource_unref(VirtIOGPU
*g
,
373 struct virtio_gpu_ctrl_command
*cmd
)
375 struct virtio_gpu_simple_resource
*res
;
376 struct virtio_gpu_resource_unref unref
;
378 VIRTIO_GPU_FILL_CMD(unref
);
379 virtio_gpu_bswap_32(&unref
, sizeof(unref
));
380 trace_virtio_gpu_cmd_res_unref(unref
.resource_id
);
382 res
= virtio_gpu_find_resource(g
, unref
.resource_id
);
384 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
385 __func__
, unref
.resource_id
);
386 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
389 virtio_gpu_resource_destroy(g
, res
);
392 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU
*g
,
393 struct virtio_gpu_ctrl_command
*cmd
)
395 struct virtio_gpu_simple_resource
*res
;
397 uint32_t src_offset
, dst_offset
, stride
;
399 pixman_format_code_t format
;
400 struct virtio_gpu_transfer_to_host_2d t2d
;
402 VIRTIO_GPU_FILL_CMD(t2d
);
403 virtio_gpu_t2d_bswap(&t2d
);
404 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d
.resource_id
);
406 res
= virtio_gpu_find_resource(g
, t2d
.resource_id
);
407 if (!res
|| !res
->iov
) {
408 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
409 __func__
, t2d
.resource_id
);
410 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
414 if (t2d
.r
.x
> res
->width
||
415 t2d
.r
.y
> res
->height
||
416 t2d
.r
.width
> res
->width
||
417 t2d
.r
.height
> res
->height
||
418 t2d
.r
.x
+ t2d
.r
.width
> res
->width
||
419 t2d
.r
.y
+ t2d
.r
.height
> res
->height
) {
420 qemu_log_mask(LOG_GUEST_ERROR
, "%s: transfer bounds outside resource"
421 " bounds for resource %d: %d %d %d %d vs %d %d\n",
422 __func__
, t2d
.resource_id
, t2d
.r
.x
, t2d
.r
.y
,
423 t2d
.r
.width
, t2d
.r
.height
, res
->width
, res
->height
);
424 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
428 format
= pixman_image_get_format(res
->image
);
429 bpp
= DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format
), 8);
430 stride
= pixman_image_get_stride(res
->image
);
432 if (t2d
.offset
|| t2d
.r
.x
|| t2d
.r
.y
||
433 t2d
.r
.width
!= pixman_image_get_width(res
->image
)) {
434 void *img_data
= pixman_image_get_data(res
->image
);
435 for (h
= 0; h
< t2d
.r
.height
; h
++) {
436 src_offset
= t2d
.offset
+ stride
* h
;
437 dst_offset
= (t2d
.r
.y
+ h
) * stride
+ (t2d
.r
.x
* bpp
);
439 iov_to_buf(res
->iov
, res
->iov_cnt
, src_offset
,
441 + dst_offset
, t2d
.r
.width
* bpp
);
444 iov_to_buf(res
->iov
, res
->iov_cnt
, 0,
445 pixman_image_get_data(res
->image
),
446 pixman_image_get_stride(res
->image
)
447 * pixman_image_get_height(res
->image
));
451 static void virtio_gpu_resource_flush(VirtIOGPU
*g
,
452 struct virtio_gpu_ctrl_command
*cmd
)
454 struct virtio_gpu_simple_resource
*res
;
455 struct virtio_gpu_resource_flush rf
;
456 pixman_region16_t flush_region
;
459 VIRTIO_GPU_FILL_CMD(rf
);
460 virtio_gpu_bswap_32(&rf
, sizeof(rf
));
461 trace_virtio_gpu_cmd_res_flush(rf
.resource_id
,
462 rf
.r
.width
, rf
.r
.height
, rf
.r
.x
, rf
.r
.y
);
464 res
= virtio_gpu_find_resource(g
, rf
.resource_id
);
466 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
467 __func__
, rf
.resource_id
);
468 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
472 if (rf
.r
.x
> res
->width
||
473 rf
.r
.y
> res
->height
||
474 rf
.r
.width
> res
->width
||
475 rf
.r
.height
> res
->height
||
476 rf
.r
.x
+ rf
.r
.width
> res
->width
||
477 rf
.r
.y
+ rf
.r
.height
> res
->height
) {
478 qemu_log_mask(LOG_GUEST_ERROR
, "%s: flush bounds outside resource"
479 " bounds for resource %d: %d %d %d %d vs %d %d\n",
480 __func__
, rf
.resource_id
, rf
.r
.x
, rf
.r
.y
,
481 rf
.r
.width
, rf
.r
.height
, res
->width
, res
->height
);
482 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
486 pixman_region_init_rect(&flush_region
,
487 rf
.r
.x
, rf
.r
.y
, rf
.r
.width
, rf
.r
.height
);
488 for (i
= 0; i
< g
->parent_obj
.conf
.max_outputs
; i
++) {
489 struct virtio_gpu_scanout
*scanout
;
490 pixman_region16_t region
, finalregion
;
491 pixman_box16_t
*extents
;
493 if (!(res
->scanout_bitmask
& (1 << i
))) {
496 scanout
= &g
->parent_obj
.scanout
[i
];
498 pixman_region_init(&finalregion
);
499 pixman_region_init_rect(®ion
, scanout
->x
, scanout
->y
,
500 scanout
->width
, scanout
->height
);
502 pixman_region_intersect(&finalregion
, &flush_region
, ®ion
);
503 pixman_region_translate(&finalregion
, -scanout
->x
, -scanout
->y
);
504 extents
= pixman_region_extents(&finalregion
);
505 /* work out the area we need to update for each console */
506 dpy_gfx_update(g
->parent_obj
.scanout
[i
].con
,
507 extents
->x1
, extents
->y1
,
508 extents
->x2
- extents
->x1
,
509 extents
->y2
- extents
->y1
);
511 pixman_region_fini(®ion
);
512 pixman_region_fini(&finalregion
);
514 pixman_region_fini(&flush_region
);
517 static void virtio_unref_resource(pixman_image_t
*image
, void *data
)
519 pixman_image_unref(data
);
522 static void virtio_gpu_set_scanout(VirtIOGPU
*g
,
523 struct virtio_gpu_ctrl_command
*cmd
)
525 struct virtio_gpu_simple_resource
*res
, *ores
;
526 struct virtio_gpu_scanout
*scanout
;
527 pixman_format_code_t format
;
530 struct virtio_gpu_set_scanout ss
;
532 VIRTIO_GPU_FILL_CMD(ss
);
533 virtio_gpu_bswap_32(&ss
, sizeof(ss
));
534 trace_virtio_gpu_cmd_set_scanout(ss
.scanout_id
, ss
.resource_id
,
535 ss
.r
.width
, ss
.r
.height
, ss
.r
.x
, ss
.r
.y
);
537 if (ss
.scanout_id
>= g
->parent_obj
.conf
.max_outputs
) {
538 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout id specified %d",
539 __func__
, ss
.scanout_id
);
540 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
544 g
->parent_obj
.enable
= 1;
545 if (ss
.resource_id
== 0) {
546 virtio_gpu_disable_scanout(g
, ss
.scanout_id
);
550 /* create a surface for this scanout */
551 res
= virtio_gpu_find_resource(g
, ss
.resource_id
);
553 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
554 __func__
, ss
.resource_id
);
555 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
559 if (ss
.r
.x
> res
->width
||
560 ss
.r
.y
> res
->height
||
563 ss
.r
.width
> res
->width
||
564 ss
.r
.height
> res
->height
||
565 ss
.r
.x
+ ss
.r
.width
> res
->width
||
566 ss
.r
.y
+ ss
.r
.height
> res
->height
) {
567 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout %d bounds for"
568 " resource %d, (%d,%d)+%d,%d vs %d %d\n",
569 __func__
, ss
.scanout_id
, ss
.resource_id
, ss
.r
.x
, ss
.r
.y
,
570 ss
.r
.width
, ss
.r
.height
, res
->width
, res
->height
);
571 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
575 scanout
= &g
->parent_obj
.scanout
[ss
.scanout_id
];
577 format
= pixman_image_get_format(res
->image
);
578 bpp
= DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format
), 8);
579 offset
= (ss
.r
.x
* bpp
) + ss
.r
.y
* pixman_image_get_stride(res
->image
);
580 if (!scanout
->ds
|| surface_data(scanout
->ds
)
581 != ((uint8_t *)pixman_image_get_data(res
->image
) + offset
) ||
582 scanout
->width
!= ss
.r
.width
||
583 scanout
->height
!= ss
.r
.height
) {
584 pixman_image_t
*rect
;
585 void *ptr
= (uint8_t *)pixman_image_get_data(res
->image
) + offset
;
586 rect
= pixman_image_create_bits(format
, ss
.r
.width
, ss
.r
.height
, ptr
,
587 pixman_image_get_stride(res
->image
));
588 pixman_image_ref(res
->image
);
589 pixman_image_set_destroy_function(rect
, virtio_unref_resource
,
591 /* realloc the surface ptr */
592 scanout
->ds
= qemu_create_displaysurface_pixman(rect
);
594 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
597 pixman_image_unref(rect
);
598 dpy_gfx_replace_surface(g
->parent_obj
.scanout
[ss
.scanout_id
].con
,
602 ores
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
604 ores
->scanout_bitmask
&= ~(1 << ss
.scanout_id
);
607 res
->scanout_bitmask
|= (1 << ss
.scanout_id
);
608 scanout
->resource_id
= ss
.resource_id
;
611 scanout
->width
= ss
.r
.width
;
612 scanout
->height
= ss
.r
.height
;
615 int virtio_gpu_create_mapping_iov(VirtIOGPU
*g
,
616 struct virtio_gpu_resource_attach_backing
*ab
,
617 struct virtio_gpu_ctrl_command
*cmd
,
618 uint64_t **addr
, struct iovec
**iov
)
620 struct virtio_gpu_mem_entry
*ents
;
624 if (ab
->nr_entries
> 16384) {
625 qemu_log_mask(LOG_GUEST_ERROR
,
626 "%s: nr_entries is too big (%d > 16384)\n",
627 __func__
, ab
->nr_entries
);
631 esize
= sizeof(*ents
) * ab
->nr_entries
;
632 ents
= g_malloc(esize
);
633 s
= iov_to_buf(cmd
->elem
.out_sg
, cmd
->elem
.out_num
,
634 sizeof(*ab
), ents
, esize
);
636 qemu_log_mask(LOG_GUEST_ERROR
,
637 "%s: command data size incorrect %zu vs %zu\n",
643 *iov
= g_malloc0(sizeof(struct iovec
) * ab
->nr_entries
);
645 *addr
= g_malloc0(sizeof(uint64_t) * ab
->nr_entries
);
647 for (i
= 0; i
< ab
->nr_entries
; i
++) {
648 uint64_t a
= le64_to_cpu(ents
[i
].addr
);
649 uint32_t l
= le32_to_cpu(ents
[i
].length
);
651 (*iov
)[i
].iov_base
= dma_memory_map(VIRTIO_DEVICE(g
)->dma_as
,
652 a
, &len
, DMA_DIRECTION_TO_DEVICE
);
653 (*iov
)[i
].iov_len
= len
;
657 if (!(*iov
)[i
].iov_base
|| len
!= l
) {
658 qemu_log_mask(LOG_GUEST_ERROR
, "%s: failed to map MMIO memory for"
659 " resource %d element %d\n",
660 __func__
, ab
->resource_id
, i
);
661 if ((*iov
)[i
].iov_base
) {
662 i
++; /* cleanup the 'i'th map */
664 virtio_gpu_cleanup_mapping_iov(g
, *iov
, i
);
678 void virtio_gpu_cleanup_mapping_iov(VirtIOGPU
*g
,
679 struct iovec
*iov
, uint32_t count
)
683 for (i
= 0; i
< count
; i
++) {
684 dma_memory_unmap(VIRTIO_DEVICE(g
)->dma_as
,
685 iov
[i
].iov_base
, iov
[i
].iov_len
,
686 DMA_DIRECTION_TO_DEVICE
,
692 static void virtio_gpu_cleanup_mapping(VirtIOGPU
*g
,
693 struct virtio_gpu_simple_resource
*res
)
695 virtio_gpu_cleanup_mapping_iov(g
, res
->iov
, res
->iov_cnt
);
703 virtio_gpu_resource_attach_backing(VirtIOGPU
*g
,
704 struct virtio_gpu_ctrl_command
*cmd
)
706 struct virtio_gpu_simple_resource
*res
;
707 struct virtio_gpu_resource_attach_backing ab
;
710 VIRTIO_GPU_FILL_CMD(ab
);
711 virtio_gpu_bswap_32(&ab
, sizeof(ab
));
712 trace_virtio_gpu_cmd_res_back_attach(ab
.resource_id
);
714 res
= virtio_gpu_find_resource(g
, ab
.resource_id
);
716 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
717 __func__
, ab
.resource_id
);
718 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
723 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
727 ret
= virtio_gpu_create_mapping_iov(g
, &ab
, cmd
, &res
->addrs
, &res
->iov
);
729 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
733 res
->iov_cnt
= ab
.nr_entries
;
737 virtio_gpu_resource_detach_backing(VirtIOGPU
*g
,
738 struct virtio_gpu_ctrl_command
*cmd
)
740 struct virtio_gpu_simple_resource
*res
;
741 struct virtio_gpu_resource_detach_backing detach
;
743 VIRTIO_GPU_FILL_CMD(detach
);
744 virtio_gpu_bswap_32(&detach
, sizeof(detach
));
745 trace_virtio_gpu_cmd_res_back_detach(detach
.resource_id
);
747 res
= virtio_gpu_find_resource(g
, detach
.resource_id
);
748 if (!res
|| !res
->iov
) {
749 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
750 __func__
, detach
.resource_id
);
751 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
754 virtio_gpu_cleanup_mapping(g
, res
);
757 static void virtio_gpu_simple_process_cmd(VirtIOGPU
*g
,
758 struct virtio_gpu_ctrl_command
*cmd
)
760 VIRTIO_GPU_FILL_CMD(cmd
->cmd_hdr
);
761 virtio_gpu_ctrl_hdr_bswap(&cmd
->cmd_hdr
);
763 switch (cmd
->cmd_hdr
.type
) {
764 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO
:
765 virtio_gpu_get_display_info(g
, cmd
);
767 case VIRTIO_GPU_CMD_GET_EDID
:
768 virtio_gpu_get_edid(g
, cmd
);
770 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
:
771 virtio_gpu_resource_create_2d(g
, cmd
);
773 case VIRTIO_GPU_CMD_RESOURCE_UNREF
:
774 virtio_gpu_resource_unref(g
, cmd
);
776 case VIRTIO_GPU_CMD_RESOURCE_FLUSH
:
777 virtio_gpu_resource_flush(g
, cmd
);
779 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
:
780 virtio_gpu_transfer_to_host_2d(g
, cmd
);
782 case VIRTIO_GPU_CMD_SET_SCANOUT
:
783 virtio_gpu_set_scanout(g
, cmd
);
785 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
:
786 virtio_gpu_resource_attach_backing(g
, cmd
);
788 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
:
789 virtio_gpu_resource_detach_backing(g
, cmd
);
792 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
795 if (!cmd
->finished
) {
796 virtio_gpu_ctrl_response_nodata(g
, cmd
, cmd
->error
? cmd
->error
:
797 VIRTIO_GPU_RESP_OK_NODATA
);
801 static void virtio_gpu_handle_ctrl_cb(VirtIODevice
*vdev
, VirtQueue
*vq
)
803 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
804 qemu_bh_schedule(g
->ctrl_bh
);
807 static void virtio_gpu_handle_cursor_cb(VirtIODevice
*vdev
, VirtQueue
*vq
)
809 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
810 qemu_bh_schedule(g
->cursor_bh
);
813 void virtio_gpu_process_cmdq(VirtIOGPU
*g
)
815 struct virtio_gpu_ctrl_command
*cmd
;
817 while (!QTAILQ_EMPTY(&g
->cmdq
)) {
818 cmd
= QTAILQ_FIRST(&g
->cmdq
);
820 if (g
->parent_obj
.renderer_blocked
) {
824 /* process command */
825 VIRGL(g
, virtio_gpu_virgl_process_cmd
, virtio_gpu_simple_process_cmd
,
828 QTAILQ_REMOVE(&g
->cmdq
, cmd
, next
);
829 if (virtio_gpu_stats_enabled(g
->parent_obj
.conf
)) {
833 if (!cmd
->finished
) {
834 QTAILQ_INSERT_TAIL(&g
->fenceq
, cmd
, next
);
836 if (virtio_gpu_stats_enabled(g
->parent_obj
.conf
)) {
837 if (g
->stats
.max_inflight
< g
->inflight
) {
838 g
->stats
.max_inflight
= g
->inflight
;
840 fprintf(stderr
, "inflight: %3d (+)\r", g
->inflight
);
848 static void virtio_gpu_gl_unblock(VirtIOGPUBase
*b
)
850 VirtIOGPU
*g
= VIRTIO_GPU(b
);
853 if (g
->renderer_reset
) {
854 g
->renderer_reset
= false;
855 virtio_gpu_virgl_reset(g
);
858 virtio_gpu_process_cmdq(g
);
861 static void virtio_gpu_handle_ctrl(VirtIODevice
*vdev
, VirtQueue
*vq
)
863 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
864 struct virtio_gpu_ctrl_command
*cmd
;
866 if (!virtio_queue_ready(vq
)) {
871 if (!g
->renderer_inited
&& g
->parent_obj
.use_virgl_renderer
) {
872 virtio_gpu_virgl_init(g
);
873 g
->renderer_inited
= true;
877 cmd
= virtqueue_pop(vq
, sizeof(struct virtio_gpu_ctrl_command
));
881 cmd
->finished
= false;
882 QTAILQ_INSERT_TAIL(&g
->cmdq
, cmd
, next
);
883 cmd
= virtqueue_pop(vq
, sizeof(struct virtio_gpu_ctrl_command
));
886 virtio_gpu_process_cmdq(g
);
889 if (g
->parent_obj
.use_virgl_renderer
) {
890 virtio_gpu_virgl_fence_poll(g
);
895 static void virtio_gpu_ctrl_bh(void *opaque
)
897 VirtIOGPU
*g
= opaque
;
898 virtio_gpu_handle_ctrl(&g
->parent_obj
.parent_obj
, g
->ctrl_vq
);
901 static void virtio_gpu_handle_cursor(VirtIODevice
*vdev
, VirtQueue
*vq
)
903 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
904 VirtQueueElement
*elem
;
906 struct virtio_gpu_update_cursor cursor_info
;
908 if (!virtio_queue_ready(vq
)) {
912 elem
= virtqueue_pop(vq
, sizeof(VirtQueueElement
));
917 s
= iov_to_buf(elem
->out_sg
, elem
->out_num
, 0,
918 &cursor_info
, sizeof(cursor_info
));
919 if (s
!= sizeof(cursor_info
)) {
920 qemu_log_mask(LOG_GUEST_ERROR
,
921 "%s: cursor size incorrect %zu vs %zu\n",
922 __func__
, s
, sizeof(cursor_info
));
924 virtio_gpu_bswap_32(&cursor_info
, sizeof(cursor_info
));
925 update_cursor(g
, &cursor_info
);
927 virtqueue_push(vq
, elem
, 0);
928 virtio_notify(vdev
, vq
);
933 static void virtio_gpu_cursor_bh(void *opaque
)
935 VirtIOGPU
*g
= opaque
;
936 virtio_gpu_handle_cursor(&g
->parent_obj
.parent_obj
, g
->cursor_vq
);
939 static const VMStateDescription vmstate_virtio_gpu_scanout
= {
940 .name
= "virtio-gpu-one-scanout",
942 .fields
= (VMStateField
[]) {
943 VMSTATE_UINT32(resource_id
, struct virtio_gpu_scanout
),
944 VMSTATE_UINT32(width
, struct virtio_gpu_scanout
),
945 VMSTATE_UINT32(height
, struct virtio_gpu_scanout
),
946 VMSTATE_INT32(x
, struct virtio_gpu_scanout
),
947 VMSTATE_INT32(y
, struct virtio_gpu_scanout
),
948 VMSTATE_UINT32(cursor
.resource_id
, struct virtio_gpu_scanout
),
949 VMSTATE_UINT32(cursor
.hot_x
, struct virtio_gpu_scanout
),
950 VMSTATE_UINT32(cursor
.hot_y
, struct virtio_gpu_scanout
),
951 VMSTATE_UINT32(cursor
.pos
.x
, struct virtio_gpu_scanout
),
952 VMSTATE_UINT32(cursor
.pos
.y
, struct virtio_gpu_scanout
),
953 VMSTATE_END_OF_LIST()
957 static const VMStateDescription vmstate_virtio_gpu_scanouts
= {
958 .name
= "virtio-gpu-scanouts",
960 .fields
= (VMStateField
[]) {
961 VMSTATE_INT32(parent_obj
.enable
, struct VirtIOGPU
),
962 VMSTATE_UINT32_EQUAL(parent_obj
.conf
.max_outputs
,
963 struct VirtIOGPU
, NULL
),
964 VMSTATE_STRUCT_VARRAY_UINT32(parent_obj
.scanout
, struct VirtIOGPU
,
965 parent_obj
.conf
.max_outputs
, 1,
966 vmstate_virtio_gpu_scanout
,
967 struct virtio_gpu_scanout
),
968 VMSTATE_END_OF_LIST()
972 static int virtio_gpu_save(QEMUFile
*f
, void *opaque
, size_t size
,
973 const VMStateField
*field
, JSONWriter
*vmdesc
)
975 VirtIOGPU
*g
= opaque
;
976 struct virtio_gpu_simple_resource
*res
;
979 /* in 2d mode we should never find unprocessed commands here */
980 assert(QTAILQ_EMPTY(&g
->cmdq
));
982 QTAILQ_FOREACH(res
, &g
->reslist
, next
) {
983 qemu_put_be32(f
, res
->resource_id
);
984 qemu_put_be32(f
, res
->width
);
985 qemu_put_be32(f
, res
->height
);
986 qemu_put_be32(f
, res
->format
);
987 qemu_put_be32(f
, res
->iov_cnt
);
988 for (i
= 0; i
< res
->iov_cnt
; i
++) {
989 qemu_put_be64(f
, res
->addrs
[i
]);
990 qemu_put_be32(f
, res
->iov
[i
].iov_len
);
992 qemu_put_buffer(f
, (void *)pixman_image_get_data(res
->image
),
993 pixman_image_get_stride(res
->image
) * res
->height
);
995 qemu_put_be32(f
, 0); /* end of list */
997 return vmstate_save_state(f
, &vmstate_virtio_gpu_scanouts
, g
, NULL
);
1000 static int virtio_gpu_load(QEMUFile
*f
, void *opaque
, size_t size
,
1001 const VMStateField
*field
)
1003 VirtIOGPU
*g
= opaque
;
1004 struct virtio_gpu_simple_resource
*res
;
1005 struct virtio_gpu_scanout
*scanout
;
1006 uint32_t resource_id
, pformat
;
1011 resource_id
= qemu_get_be32(f
);
1012 while (resource_id
!= 0) {
1013 res
= virtio_gpu_find_resource(g
, resource_id
);
1018 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
1019 res
->resource_id
= resource_id
;
1020 res
->width
= qemu_get_be32(f
);
1021 res
->height
= qemu_get_be32(f
);
1022 res
->format
= qemu_get_be32(f
);
1023 res
->iov_cnt
= qemu_get_be32(f
);
1026 pformat
= virtio_gpu_get_pixman_format(res
->format
);
1031 res
->image
= pixman_image_create_bits(pformat
,
1032 res
->width
, res
->height
,
1039 res
->hostmem
= calc_image_hostmem(pformat
, res
->width
, res
->height
);
1041 res
->addrs
= g_new(uint64_t, res
->iov_cnt
);
1042 res
->iov
= g_new(struct iovec
, res
->iov_cnt
);
1045 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1046 res
->addrs
[i
] = qemu_get_be64(f
);
1047 res
->iov
[i
].iov_len
= qemu_get_be32(f
);
1049 qemu_get_buffer(f
, (void *)pixman_image_get_data(res
->image
),
1050 pixman_image_get_stride(res
->image
) * res
->height
);
1052 /* restore mapping */
1053 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1054 hwaddr len
= res
->iov
[i
].iov_len
;
1055 res
->iov
[i
].iov_base
=
1056 dma_memory_map(VIRTIO_DEVICE(g
)->dma_as
,
1057 res
->addrs
[i
], &len
, DMA_DIRECTION_TO_DEVICE
);
1059 if (!res
->iov
[i
].iov_base
|| len
!= res
->iov
[i
].iov_len
) {
1060 /* Clean up the half-a-mapping we just created... */
1061 if (res
->iov
[i
].iov_base
) {
1062 dma_memory_unmap(VIRTIO_DEVICE(g
)->dma_as
,
1063 res
->iov
[i
].iov_base
,
1065 DMA_DIRECTION_TO_DEVICE
,
1068 /* ...and the mappings for previous loop iterations */
1070 virtio_gpu_cleanup_mapping(g
, res
);
1071 pixman_image_unref(res
->image
);
1077 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
1078 g
->hostmem
+= res
->hostmem
;
1080 resource_id
= qemu_get_be32(f
);
1083 /* load & apply scanout state */
1084 vmstate_load_state(f
, &vmstate_virtio_gpu_scanouts
, g
, 1);
1085 for (i
= 0; i
< g
->parent_obj
.conf
.max_outputs
; i
++) {
1086 scanout
= &g
->parent_obj
.scanout
[i
];
1087 if (!scanout
->resource_id
) {
1090 res
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
1094 scanout
->ds
= qemu_create_displaysurface_pixman(res
->image
);
1099 dpy_gfx_replace_surface(scanout
->con
, scanout
->ds
);
1100 dpy_gfx_update_full(scanout
->con
);
1101 if (scanout
->cursor
.resource_id
) {
1102 update_cursor(g
, &scanout
->cursor
);
1104 res
->scanout_bitmask
|= (1 << i
);
1110 static void virtio_gpu_device_realize(DeviceState
*qdev
, Error
**errp
)
1112 VirtIODevice
*vdev
= VIRTIO_DEVICE(qdev
);
1113 VirtIOGPU
*g
= VIRTIO_GPU(qdev
);
1116 #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN)
1119 have_virgl
= display_opengl
;
1122 g
->parent_obj
.conf
.flags
&= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED
);
1124 #if defined(CONFIG_VIRGL)
1125 VIRTIO_GPU_BASE(g
)->virtio_config
.num_capsets
=
1126 virtio_gpu_virgl_get_num_capsets(g
);
1130 if (!virtio_gpu_base_device_realize(qdev
,
1131 virtio_gpu_handle_ctrl_cb
,
1132 virtio_gpu_handle_cursor_cb
,
1137 g
->ctrl_vq
= virtio_get_queue(vdev
, 0);
1138 g
->cursor_vq
= virtio_get_queue(vdev
, 1);
1139 g
->ctrl_bh
= qemu_bh_new(virtio_gpu_ctrl_bh
, g
);
1140 g
->cursor_bh
= qemu_bh_new(virtio_gpu_cursor_bh
, g
);
1141 QTAILQ_INIT(&g
->reslist
);
1142 QTAILQ_INIT(&g
->cmdq
);
1143 QTAILQ_INIT(&g
->fenceq
);
1146 static void virtio_gpu_reset(VirtIODevice
*vdev
)
1148 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
1149 struct virtio_gpu_simple_resource
*res
, *tmp
;
1150 struct virtio_gpu_ctrl_command
*cmd
;
1153 if (g
->parent_obj
.use_virgl_renderer
) {
1154 virtio_gpu_virgl_reset(g
);
1158 QTAILQ_FOREACH_SAFE(res
, &g
->reslist
, next
, tmp
) {
1159 virtio_gpu_resource_destroy(g
, res
);
1162 while (!QTAILQ_EMPTY(&g
->cmdq
)) {
1163 cmd
= QTAILQ_FIRST(&g
->cmdq
);
1164 QTAILQ_REMOVE(&g
->cmdq
, cmd
, next
);
1168 while (!QTAILQ_EMPTY(&g
->fenceq
)) {
1169 cmd
= QTAILQ_FIRST(&g
->fenceq
);
1170 QTAILQ_REMOVE(&g
->fenceq
, cmd
, next
);
1176 if (g
->parent_obj
.use_virgl_renderer
) {
1177 if (g
->parent_obj
.renderer_blocked
) {
1178 g
->renderer_reset
= true;
1180 virtio_gpu_virgl_reset(g
);
1182 g
->parent_obj
.use_virgl_renderer
= false;
1186 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev
));
1190 virtio_gpu_get_config(VirtIODevice
*vdev
, uint8_t *config
)
1192 VirtIOGPUBase
*g
= VIRTIO_GPU_BASE(vdev
);
1194 memcpy(config
, &g
->virtio_config
, sizeof(g
->virtio_config
));
1198 virtio_gpu_set_config(VirtIODevice
*vdev
, const uint8_t *config
)
1200 VirtIOGPUBase
*g
= VIRTIO_GPU_BASE(vdev
);
1201 const struct virtio_gpu_config
*vgconfig
=
1202 (const struct virtio_gpu_config
*)config
;
1204 if (vgconfig
->events_clear
) {
1205 g
->virtio_config
.events_read
&= ~vgconfig
->events_clear
;
1210 * For historical reasons virtio_gpu does not adhere to virtio migration
1211 * scheme as described in doc/virtio-migration.txt, in a sense that no
1212 * save/load callback are provided to the core. Instead the device data
1213 * is saved/loaded after the core data.
1215 * Because of this we need a special vmsd.
1217 static const VMStateDescription vmstate_virtio_gpu
= {
1218 .name
= "virtio-gpu",
1219 .minimum_version_id
= VIRTIO_GPU_VM_VERSION
,
1220 .version_id
= VIRTIO_GPU_VM_VERSION
,
1221 .fields
= (VMStateField
[]) {
1222 VMSTATE_VIRTIO_DEVICE
/* core */,
1224 .name
= "virtio-gpu",
1225 .info
= &(const VMStateInfo
) {
1226 .name
= "virtio-gpu",
1227 .get
= virtio_gpu_load
,
1228 .put
= virtio_gpu_save
,
1230 .flags
= VMS_SINGLE
,
1232 VMSTATE_END_OF_LIST()
1236 static Property virtio_gpu_properties
[] = {
1237 VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU
, parent_obj
.conf
),
1238 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU
, conf_max_hostmem
,
1241 DEFINE_PROP_BIT("virgl", VirtIOGPU
, parent_obj
.conf
.flags
,
1242 VIRTIO_GPU_FLAG_VIRGL_ENABLED
, true),
1243 DEFINE_PROP_BIT("stats", VirtIOGPU
, parent_obj
.conf
.flags
,
1244 VIRTIO_GPU_FLAG_STATS_ENABLED
, false),
1246 DEFINE_PROP_END_OF_LIST(),
1249 static void virtio_gpu_class_init(ObjectClass
*klass
, void *data
)
1251 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1252 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
1253 VirtIOGPUBaseClass
*vgc
= VIRTIO_GPU_BASE_CLASS(klass
);
1255 vgc
->gl_unblock
= virtio_gpu_gl_unblock
;
1256 vdc
->realize
= virtio_gpu_device_realize
;
1257 vdc
->reset
= virtio_gpu_reset
;
1258 vdc
->get_config
= virtio_gpu_get_config
;
1259 vdc
->set_config
= virtio_gpu_set_config
;
1261 dc
->vmsd
= &vmstate_virtio_gpu
;
1262 device_class_set_props(dc
, virtio_gpu_properties
);
1265 static const TypeInfo virtio_gpu_info
= {
1266 .name
= TYPE_VIRTIO_GPU
,
1267 .parent
= TYPE_VIRTIO_GPU_BASE
,
1268 .instance_size
= sizeof(VirtIOGPU
),
1269 .class_init
= virtio_gpu_class_init
,
1272 static void virtio_register_types(void)
1274 type_register_static(&virtio_gpu_info
);
1277 type_init(virtio_register_types
)