4 * Copyright Red Hat, Inc. 2013-2014
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu-common.h"
17 #include "ui/console.h"
19 #include "hw/virtio/virtio.h"
20 #include "hw/virtio/virtio-gpu.h"
21 #include "hw/virtio/virtio-bus.h"
23 #include "qapi/error.h"
25 static struct virtio_gpu_simple_resource
*
26 virtio_gpu_find_resource(VirtIOGPU
*g
, uint32_t resource_id
);
29 #include "virglrenderer.h"
30 #define VIRGL(_g, _virgl, _simple, ...) \
32 if (_g->use_virgl_renderer) { \
33 _virgl(__VA_ARGS__); \
35 _simple(__VA_ARGS__); \
39 #define VIRGL(_g, _virgl, _simple, ...) \
41 _simple(__VA_ARGS__); \
45 static void update_cursor_data_simple(VirtIOGPU
*g
,
46 struct virtio_gpu_scanout
*s
,
49 struct virtio_gpu_simple_resource
*res
;
52 res
= virtio_gpu_find_resource(g
, resource_id
);
57 if (pixman_image_get_width(res
->image
) != s
->current_cursor
->width
||
58 pixman_image_get_height(res
->image
) != s
->current_cursor
->height
) {
62 pixels
= s
->current_cursor
->width
* s
->current_cursor
->height
;
63 memcpy(s
->current_cursor
->data
,
64 pixman_image_get_data(res
->image
),
65 pixels
* sizeof(uint32_t));
70 static void update_cursor_data_virgl(VirtIOGPU
*g
,
71 struct virtio_gpu_scanout
*s
,
74 uint32_t width
, height
;
75 uint32_t pixels
, *data
;
77 data
= virgl_renderer_get_cursor_data(resource_id
, &width
, &height
);
82 if (width
!= s
->current_cursor
->width
||
83 height
!= s
->current_cursor
->height
) {
87 pixels
= s
->current_cursor
->width
* s
->current_cursor
->height
;
88 memcpy(s
->current_cursor
->data
, data
, pixels
* sizeof(uint32_t));
94 static void update_cursor(VirtIOGPU
*g
, struct virtio_gpu_update_cursor
*cursor
)
96 struct virtio_gpu_scanout
*s
;
97 bool move
= cursor
->hdr
.type
!= VIRTIO_GPU_CMD_MOVE_CURSOR
;
99 if (cursor
->pos
.scanout_id
>= g
->conf
.max_outputs
) {
102 s
= &g
->scanout
[cursor
->pos
.scanout_id
];
104 trace_virtio_gpu_update_cursor(cursor
->pos
.scanout_id
,
107 move
? "move" : "update",
108 cursor
->resource_id
);
111 if (!s
->current_cursor
) {
112 s
->current_cursor
= cursor_alloc(64, 64);
115 s
->current_cursor
->hot_x
= cursor
->hot_x
;
116 s
->current_cursor
->hot_y
= cursor
->hot_y
;
118 if (cursor
->resource_id
> 0) {
119 VIRGL(g
, update_cursor_data_virgl
, update_cursor_data_simple
,
120 g
, s
, cursor
->resource_id
);
122 dpy_cursor_define(s
->con
, s
->current_cursor
);
124 dpy_mouse_set(s
->con
, cursor
->pos
.x
, cursor
->pos
.y
,
125 cursor
->resource_id
? 1 : 0);
128 static void virtio_gpu_get_config(VirtIODevice
*vdev
, uint8_t *config
)
130 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
131 memcpy(config
, &g
->virtio_config
, sizeof(g
->virtio_config
));
134 static void virtio_gpu_set_config(VirtIODevice
*vdev
, const uint8_t *config
)
136 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
137 struct virtio_gpu_config vgconfig
;
139 memcpy(&vgconfig
, config
, sizeof(g
->virtio_config
));
141 if (vgconfig
.events_clear
) {
142 g
->virtio_config
.events_read
&= ~vgconfig
.events_clear
;
146 static uint64_t virtio_gpu_get_features(VirtIODevice
*vdev
, uint64_t features
,
149 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
151 if (virtio_gpu_virgl_enabled(g
->conf
)) {
152 features
|= (1 << VIRTIO_GPU_F_VIRGL
);
157 static void virtio_gpu_set_features(VirtIODevice
*vdev
, uint64_t features
)
159 static const uint32_t virgl
= (1 << VIRTIO_GPU_F_VIRGL
);
160 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
162 g
->use_virgl_renderer
= ((features
& virgl
) == virgl
);
163 trace_virtio_gpu_features(g
->use_virgl_renderer
);
166 static void virtio_gpu_notify_event(VirtIOGPU
*g
, uint32_t event_type
)
168 g
->virtio_config
.events_read
|= event_type
;
169 virtio_notify_config(&g
->parent_obj
);
172 static struct virtio_gpu_simple_resource
*
173 virtio_gpu_find_resource(VirtIOGPU
*g
, uint32_t resource_id
)
175 struct virtio_gpu_simple_resource
*res
;
177 QTAILQ_FOREACH(res
, &g
->reslist
, next
) {
178 if (res
->resource_id
== resource_id
) {
185 void virtio_gpu_ctrl_response(VirtIOGPU
*g
,
186 struct virtio_gpu_ctrl_command
*cmd
,
187 struct virtio_gpu_ctrl_hdr
*resp
,
192 if (cmd
->cmd_hdr
.flags
& VIRTIO_GPU_FLAG_FENCE
) {
193 resp
->flags
|= VIRTIO_GPU_FLAG_FENCE
;
194 resp
->fence_id
= cmd
->cmd_hdr
.fence_id
;
195 resp
->ctx_id
= cmd
->cmd_hdr
.ctx_id
;
197 s
= iov_from_buf(cmd
->elem
.in_sg
, cmd
->elem
.in_num
, 0, resp
, resp_len
);
199 qemu_log_mask(LOG_GUEST_ERROR
,
200 "%s: response size incorrect %zu vs %zu\n",
201 __func__
, s
, resp_len
);
203 virtqueue_push(cmd
->vq
, &cmd
->elem
, s
);
204 virtio_notify(VIRTIO_DEVICE(g
), cmd
->vq
);
205 cmd
->finished
= true;
208 void virtio_gpu_ctrl_response_nodata(VirtIOGPU
*g
,
209 struct virtio_gpu_ctrl_command
*cmd
,
210 enum virtio_gpu_ctrl_type type
)
212 struct virtio_gpu_ctrl_hdr resp
;
214 memset(&resp
, 0, sizeof(resp
));
216 virtio_gpu_ctrl_response(g
, cmd
, &resp
, sizeof(resp
));
220 virtio_gpu_fill_display_info(VirtIOGPU
*g
,
221 struct virtio_gpu_resp_display_info
*dpy_info
)
225 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
226 if (g
->enabled_output_bitmask
& (1 << i
)) {
227 dpy_info
->pmodes
[i
].enabled
= 1;
228 dpy_info
->pmodes
[i
].r
.width
= g
->req_state
[i
].width
;
229 dpy_info
->pmodes
[i
].r
.height
= g
->req_state
[i
].height
;
234 void virtio_gpu_get_display_info(VirtIOGPU
*g
,
235 struct virtio_gpu_ctrl_command
*cmd
)
237 struct virtio_gpu_resp_display_info display_info
;
239 trace_virtio_gpu_cmd_get_display_info();
240 memset(&display_info
, 0, sizeof(display_info
));
241 display_info
.hdr
.type
= VIRTIO_GPU_RESP_OK_DISPLAY_INFO
;
242 virtio_gpu_fill_display_info(g
, &display_info
);
243 virtio_gpu_ctrl_response(g
, cmd
, &display_info
.hdr
,
244 sizeof(display_info
));
247 static pixman_format_code_t
get_pixman_format(uint32_t virtio_gpu_format
)
249 switch (virtio_gpu_format
) {
250 #ifdef HOST_WORDS_BIGENDIAN
251 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM
:
252 return PIXMAN_b8g8r8x8
;
253 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM
:
254 return PIXMAN_b8g8r8a8
;
255 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM
:
256 return PIXMAN_x8r8g8b8
;
257 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM
:
258 return PIXMAN_a8r8g8b8
;
259 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM
:
260 return PIXMAN_r8g8b8x8
;
261 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM
:
262 return PIXMAN_r8g8b8a8
;
263 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM
:
264 return PIXMAN_x8b8g8r8
;
265 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM
:
266 return PIXMAN_a8b8g8r8
;
268 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM
:
269 return PIXMAN_x8r8g8b8
;
270 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM
:
271 return PIXMAN_a8r8g8b8
;
272 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM
:
273 return PIXMAN_b8g8r8x8
;
274 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM
:
275 return PIXMAN_b8g8r8a8
;
276 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM
:
277 return PIXMAN_x8b8g8r8
;
278 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM
:
279 return PIXMAN_a8b8g8r8
;
280 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM
:
281 return PIXMAN_r8g8b8x8
;
282 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM
:
283 return PIXMAN_r8g8b8a8
;
290 static void virtio_gpu_resource_create_2d(VirtIOGPU
*g
,
291 struct virtio_gpu_ctrl_command
*cmd
)
293 pixman_format_code_t pformat
;
294 struct virtio_gpu_simple_resource
*res
;
295 struct virtio_gpu_resource_create_2d c2d
;
297 VIRTIO_GPU_FILL_CMD(c2d
);
298 trace_virtio_gpu_cmd_res_create_2d(c2d
.resource_id
, c2d
.format
,
299 c2d
.width
, c2d
.height
);
301 if (c2d
.resource_id
== 0) {
302 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource id 0 is not allowed\n",
304 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
308 res
= virtio_gpu_find_resource(g
, c2d
.resource_id
);
310 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource already exists %d\n",
311 __func__
, c2d
.resource_id
);
312 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
316 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
318 res
->width
= c2d
.width
;
319 res
->height
= c2d
.height
;
320 res
->format
= c2d
.format
;
321 res
->resource_id
= c2d
.resource_id
;
323 pformat
= get_pixman_format(c2d
.format
);
325 qemu_log_mask(LOG_GUEST_ERROR
,
326 "%s: host couldn't handle guest format %d\n",
327 __func__
, c2d
.format
);
328 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
331 res
->image
= pixman_image_create_bits(pformat
,
337 qemu_log_mask(LOG_GUEST_ERROR
,
338 "%s: resource creation failed %d %d %d\n",
339 __func__
, c2d
.resource_id
, c2d
.width
, c2d
.height
);
341 cmd
->error
= VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY
;
345 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
348 static void virtio_gpu_resource_destroy(VirtIOGPU
*g
,
349 struct virtio_gpu_simple_resource
*res
)
351 pixman_image_unref(res
->image
);
352 QTAILQ_REMOVE(&g
->reslist
, res
, next
);
356 static void virtio_gpu_resource_unref(VirtIOGPU
*g
,
357 struct virtio_gpu_ctrl_command
*cmd
)
359 struct virtio_gpu_simple_resource
*res
;
360 struct virtio_gpu_resource_unref unref
;
362 VIRTIO_GPU_FILL_CMD(unref
);
363 trace_virtio_gpu_cmd_res_unref(unref
.resource_id
);
365 res
= virtio_gpu_find_resource(g
, unref
.resource_id
);
367 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
368 __func__
, unref
.resource_id
);
369 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
372 virtio_gpu_resource_destroy(g
, res
);
375 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU
*g
,
376 struct virtio_gpu_ctrl_command
*cmd
)
378 struct virtio_gpu_simple_resource
*res
;
380 uint32_t src_offset
, dst_offset
, stride
;
382 pixman_format_code_t format
;
383 struct virtio_gpu_transfer_to_host_2d t2d
;
385 VIRTIO_GPU_FILL_CMD(t2d
);
386 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d
.resource_id
);
388 res
= virtio_gpu_find_resource(g
, t2d
.resource_id
);
389 if (!res
|| !res
->iov
) {
390 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
391 __func__
, t2d
.resource_id
);
392 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
396 if (t2d
.r
.x
> res
->width
||
397 t2d
.r
.y
> res
->height
||
398 t2d
.r
.width
> res
->width
||
399 t2d
.r
.height
> res
->height
||
400 t2d
.r
.x
+ t2d
.r
.width
> res
->width
||
401 t2d
.r
.y
+ t2d
.r
.height
> res
->height
) {
402 qemu_log_mask(LOG_GUEST_ERROR
, "%s: transfer bounds outside resource"
403 " bounds for resource %d: %d %d %d %d vs %d %d\n",
404 __func__
, t2d
.resource_id
, t2d
.r
.x
, t2d
.r
.y
,
405 t2d
.r
.width
, t2d
.r
.height
, res
->width
, res
->height
);
406 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
410 format
= pixman_image_get_format(res
->image
);
411 bpp
= (PIXMAN_FORMAT_BPP(format
) + 7) / 8;
412 stride
= pixman_image_get_stride(res
->image
);
414 if (t2d
.offset
|| t2d
.r
.x
|| t2d
.r
.y
||
415 t2d
.r
.width
!= pixman_image_get_width(res
->image
)) {
416 void *img_data
= pixman_image_get_data(res
->image
);
417 for (h
= 0; h
< t2d
.r
.height
; h
++) {
418 src_offset
= t2d
.offset
+ stride
* h
;
419 dst_offset
= (t2d
.r
.y
+ h
) * stride
+ (t2d
.r
.x
* bpp
);
421 iov_to_buf(res
->iov
, res
->iov_cnt
, src_offset
,
423 + dst_offset
, t2d
.r
.width
* bpp
);
426 iov_to_buf(res
->iov
, res
->iov_cnt
, 0,
427 pixman_image_get_data(res
->image
),
428 pixman_image_get_stride(res
->image
)
429 * pixman_image_get_height(res
->image
));
433 static void virtio_gpu_resource_flush(VirtIOGPU
*g
,
434 struct virtio_gpu_ctrl_command
*cmd
)
436 struct virtio_gpu_simple_resource
*res
;
437 struct virtio_gpu_resource_flush rf
;
438 pixman_region16_t flush_region
;
441 VIRTIO_GPU_FILL_CMD(rf
);
442 trace_virtio_gpu_cmd_res_flush(rf
.resource_id
,
443 rf
.r
.width
, rf
.r
.height
, rf
.r
.x
, rf
.r
.y
);
445 res
= virtio_gpu_find_resource(g
, rf
.resource_id
);
447 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
448 __func__
, rf
.resource_id
);
449 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
453 if (rf
.r
.x
> res
->width
||
454 rf
.r
.y
> res
->height
||
455 rf
.r
.width
> res
->width
||
456 rf
.r
.height
> res
->height
||
457 rf
.r
.x
+ rf
.r
.width
> res
->width
||
458 rf
.r
.y
+ rf
.r
.height
> res
->height
) {
459 qemu_log_mask(LOG_GUEST_ERROR
, "%s: flush bounds outside resource"
460 " bounds for resource %d: %d %d %d %d vs %d %d\n",
461 __func__
, rf
.resource_id
, rf
.r
.x
, rf
.r
.y
,
462 rf
.r
.width
, rf
.r
.height
, res
->width
, res
->height
);
463 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
467 pixman_region_init_rect(&flush_region
,
468 rf
.r
.x
, rf
.r
.y
, rf
.r
.width
, rf
.r
.height
);
469 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
470 struct virtio_gpu_scanout
*scanout
;
471 pixman_region16_t region
, finalregion
;
472 pixman_box16_t
*extents
;
474 if (!(res
->scanout_bitmask
& (1 << i
))) {
477 scanout
= &g
->scanout
[i
];
479 pixman_region_init(&finalregion
);
480 pixman_region_init_rect(®ion
, scanout
->x
, scanout
->y
,
481 scanout
->width
, scanout
->height
);
483 pixman_region_intersect(&finalregion
, &flush_region
, ®ion
);
484 pixman_region_translate(&finalregion
, -scanout
->x
, -scanout
->y
);
485 extents
= pixman_region_extents(&finalregion
);
486 /* work out the area we need to update for each console */
487 dpy_gfx_update(g
->scanout
[i
].con
,
488 extents
->x1
, extents
->y1
,
489 extents
->x2
- extents
->x1
,
490 extents
->y2
- extents
->y1
);
492 pixman_region_fini(®ion
);
493 pixman_region_fini(&finalregion
);
495 pixman_region_fini(&flush_region
);
498 static void virtio_unref_resource(pixman_image_t
*image
, void *data
)
500 pixman_image_unref(data
);
503 static void virtio_gpu_set_scanout(VirtIOGPU
*g
,
504 struct virtio_gpu_ctrl_command
*cmd
)
506 struct virtio_gpu_simple_resource
*res
;
507 struct virtio_gpu_scanout
*scanout
;
508 pixman_format_code_t format
;
511 struct virtio_gpu_set_scanout ss
;
513 VIRTIO_GPU_FILL_CMD(ss
);
514 trace_virtio_gpu_cmd_set_scanout(ss
.scanout_id
, ss
.resource_id
,
515 ss
.r
.width
, ss
.r
.height
, ss
.r
.x
, ss
.r
.y
);
517 if (ss
.scanout_id
>= g
->conf
.max_outputs
) {
518 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout id specified %d",
519 __func__
, ss
.scanout_id
);
520 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
525 if (ss
.resource_id
== 0) {
526 scanout
= &g
->scanout
[ss
.scanout_id
];
527 if (scanout
->resource_id
) {
528 res
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
530 res
->scanout_bitmask
&= ~(1 << ss
.scanout_id
);
533 if (ss
.scanout_id
== 0) {
534 qemu_log_mask(LOG_GUEST_ERROR
,
535 "%s: illegal scanout id specified %d",
536 __func__
, ss
.scanout_id
);
537 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
540 dpy_gfx_replace_surface(g
->scanout
[ss
.scanout_id
].con
, NULL
);
547 /* create a surface for this scanout */
548 res
= virtio_gpu_find_resource(g
, ss
.resource_id
);
550 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
551 __func__
, ss
.resource_id
);
552 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
556 if (ss
.r
.x
> res
->width
||
557 ss
.r
.y
> res
->height
||
558 ss
.r
.width
> res
->width
||
559 ss
.r
.height
> res
->height
||
560 ss
.r
.x
+ ss
.r
.width
> res
->width
||
561 ss
.r
.y
+ ss
.r
.height
> res
->height
) {
562 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout %d bounds for"
563 " resource %d, (%d,%d)+%d,%d vs %d %d\n",
564 __func__
, ss
.scanout_id
, ss
.resource_id
, ss
.r
.x
, ss
.r
.y
,
565 ss
.r
.width
, ss
.r
.height
, res
->width
, res
->height
);
566 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
570 scanout
= &g
->scanout
[ss
.scanout_id
];
572 format
= pixman_image_get_format(res
->image
);
573 bpp
= (PIXMAN_FORMAT_BPP(format
) + 7) / 8;
574 offset
= (ss
.r
.x
* bpp
) + ss
.r
.y
* pixman_image_get_stride(res
->image
);
575 if (!scanout
->ds
|| surface_data(scanout
->ds
)
576 != ((uint8_t *)pixman_image_get_data(res
->image
) + offset
) ||
577 scanout
->width
!= ss
.r
.width
||
578 scanout
->height
!= ss
.r
.height
) {
579 pixman_image_t
*rect
;
580 void *ptr
= (uint8_t *)pixman_image_get_data(res
->image
) + offset
;
581 rect
= pixman_image_create_bits(format
, ss
.r
.width
, ss
.r
.height
, ptr
,
582 pixman_image_get_stride(res
->image
));
583 pixman_image_ref(res
->image
);
584 pixman_image_set_destroy_function(rect
, virtio_unref_resource
,
586 /* realloc the surface ptr */
587 scanout
->ds
= qemu_create_displaysurface_pixman(rect
);
589 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
592 dpy_gfx_replace_surface(g
->scanout
[ss
.scanout_id
].con
, scanout
->ds
);
595 res
->scanout_bitmask
|= (1 << ss
.scanout_id
);
596 scanout
->resource_id
= ss
.resource_id
;
599 scanout
->width
= ss
.r
.width
;
600 scanout
->height
= ss
.r
.height
;
603 int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing
*ab
,
604 struct virtio_gpu_ctrl_command
*cmd
,
607 struct virtio_gpu_mem_entry
*ents
;
611 if (ab
->nr_entries
> 16384) {
612 qemu_log_mask(LOG_GUEST_ERROR
,
613 "%s: nr_entries is too big (%d > 16384)\n",
614 __func__
, ab
->nr_entries
);
618 esize
= sizeof(*ents
) * ab
->nr_entries
;
619 ents
= g_malloc(esize
);
620 s
= iov_to_buf(cmd
->elem
.out_sg
, cmd
->elem
.out_num
,
621 sizeof(*ab
), ents
, esize
);
623 qemu_log_mask(LOG_GUEST_ERROR
,
624 "%s: command data size incorrect %zu vs %zu\n",
630 *iov
= g_malloc0(sizeof(struct iovec
) * ab
->nr_entries
);
631 for (i
= 0; i
< ab
->nr_entries
; i
++) {
632 hwaddr len
= ents
[i
].length
;
633 (*iov
)[i
].iov_len
= ents
[i
].length
;
634 (*iov
)[i
].iov_base
= cpu_physical_memory_map(ents
[i
].addr
, &len
, 1);
635 if (!(*iov
)[i
].iov_base
|| len
!= ents
[i
].length
) {
636 qemu_log_mask(LOG_GUEST_ERROR
, "%s: failed to map MMIO memory for"
637 " resource %d element %d\n",
638 __func__
, ab
->resource_id
, i
);
639 virtio_gpu_cleanup_mapping_iov(*iov
, i
);
649 void virtio_gpu_cleanup_mapping_iov(struct iovec
*iov
, uint32_t count
)
653 for (i
= 0; i
< count
; i
++) {
654 cpu_physical_memory_unmap(iov
[i
].iov_base
, iov
[i
].iov_len
, 1,
660 static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource
*res
)
662 virtio_gpu_cleanup_mapping_iov(res
->iov
, res
->iov_cnt
);
668 virtio_gpu_resource_attach_backing(VirtIOGPU
*g
,
669 struct virtio_gpu_ctrl_command
*cmd
)
671 struct virtio_gpu_simple_resource
*res
;
672 struct virtio_gpu_resource_attach_backing ab
;
675 VIRTIO_GPU_FILL_CMD(ab
);
676 trace_virtio_gpu_cmd_res_back_attach(ab
.resource_id
);
678 res
= virtio_gpu_find_resource(g
, ab
.resource_id
);
680 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
681 __func__
, ab
.resource_id
);
682 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
686 ret
= virtio_gpu_create_mapping_iov(&ab
, cmd
, &res
->iov
);
688 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
692 res
->iov_cnt
= ab
.nr_entries
;
696 virtio_gpu_resource_detach_backing(VirtIOGPU
*g
,
697 struct virtio_gpu_ctrl_command
*cmd
)
699 struct virtio_gpu_simple_resource
*res
;
700 struct virtio_gpu_resource_detach_backing detach
;
702 VIRTIO_GPU_FILL_CMD(detach
);
703 trace_virtio_gpu_cmd_res_back_detach(detach
.resource_id
);
705 res
= virtio_gpu_find_resource(g
, detach
.resource_id
);
706 if (!res
|| !res
->iov
) {
707 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
708 __func__
, detach
.resource_id
);
709 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
712 virtio_gpu_cleanup_mapping(res
);
715 static void virtio_gpu_simple_process_cmd(VirtIOGPU
*g
,
716 struct virtio_gpu_ctrl_command
*cmd
)
718 VIRTIO_GPU_FILL_CMD(cmd
->cmd_hdr
);
720 switch (cmd
->cmd_hdr
.type
) {
721 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO
:
722 virtio_gpu_get_display_info(g
, cmd
);
724 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
:
725 virtio_gpu_resource_create_2d(g
, cmd
);
727 case VIRTIO_GPU_CMD_RESOURCE_UNREF
:
728 virtio_gpu_resource_unref(g
, cmd
);
730 case VIRTIO_GPU_CMD_RESOURCE_FLUSH
:
731 virtio_gpu_resource_flush(g
, cmd
);
733 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
:
734 virtio_gpu_transfer_to_host_2d(g
, cmd
);
736 case VIRTIO_GPU_CMD_SET_SCANOUT
:
737 virtio_gpu_set_scanout(g
, cmd
);
739 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
:
740 virtio_gpu_resource_attach_backing(g
, cmd
);
742 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
:
743 virtio_gpu_resource_detach_backing(g
, cmd
);
746 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
749 if (!cmd
->finished
) {
750 virtio_gpu_ctrl_response_nodata(g
, cmd
, cmd
->error
? cmd
->error
:
751 VIRTIO_GPU_RESP_OK_NODATA
);
755 static void virtio_gpu_handle_ctrl_cb(VirtIODevice
*vdev
, VirtQueue
*vq
)
757 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
758 qemu_bh_schedule(g
->ctrl_bh
);
761 static void virtio_gpu_handle_cursor_cb(VirtIODevice
*vdev
, VirtQueue
*vq
)
763 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
764 qemu_bh_schedule(g
->cursor_bh
);
767 void virtio_gpu_process_cmdq(VirtIOGPU
*g
)
769 struct virtio_gpu_ctrl_command
*cmd
;
771 while (!QTAILQ_EMPTY(&g
->cmdq
)) {
772 cmd
= QTAILQ_FIRST(&g
->cmdq
);
774 /* process command */
775 VIRGL(g
, virtio_gpu_virgl_process_cmd
, virtio_gpu_simple_process_cmd
,
780 QTAILQ_REMOVE(&g
->cmdq
, cmd
, next
);
781 if (virtio_gpu_stats_enabled(g
->conf
)) {
785 if (!cmd
->finished
) {
786 QTAILQ_INSERT_TAIL(&g
->fenceq
, cmd
, next
);
788 if (virtio_gpu_stats_enabled(g
->conf
)) {
789 if (g
->stats
.max_inflight
< g
->inflight
) {
790 g
->stats
.max_inflight
= g
->inflight
;
792 fprintf(stderr
, "inflight: %3d (+)\r", g
->inflight
);
800 static void virtio_gpu_handle_ctrl(VirtIODevice
*vdev
, VirtQueue
*vq
)
802 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
803 struct virtio_gpu_ctrl_command
*cmd
;
805 if (!virtio_queue_ready(vq
)) {
810 if (!g
->renderer_inited
&& g
->use_virgl_renderer
) {
811 virtio_gpu_virgl_init(g
);
812 g
->renderer_inited
= true;
816 cmd
= virtqueue_pop(vq
, sizeof(struct virtio_gpu_ctrl_command
));
820 cmd
->finished
= false;
821 cmd
->waiting
= false;
822 QTAILQ_INSERT_TAIL(&g
->cmdq
, cmd
, next
);
823 cmd
= virtqueue_pop(vq
, sizeof(struct virtio_gpu_ctrl_command
));
826 virtio_gpu_process_cmdq(g
);
829 if (g
->use_virgl_renderer
) {
830 virtio_gpu_virgl_fence_poll(g
);
835 static void virtio_gpu_ctrl_bh(void *opaque
)
837 VirtIOGPU
*g
= opaque
;
838 virtio_gpu_handle_ctrl(&g
->parent_obj
, g
->ctrl_vq
);
841 static void virtio_gpu_handle_cursor(VirtIODevice
*vdev
, VirtQueue
*vq
)
843 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
844 VirtQueueElement
*elem
;
846 struct virtio_gpu_update_cursor cursor_info
;
848 if (!virtio_queue_ready(vq
)) {
852 elem
= virtqueue_pop(vq
, sizeof(VirtQueueElement
));
857 s
= iov_to_buf(elem
->out_sg
, elem
->out_num
, 0,
858 &cursor_info
, sizeof(cursor_info
));
859 if (s
!= sizeof(cursor_info
)) {
860 qemu_log_mask(LOG_GUEST_ERROR
,
861 "%s: cursor size incorrect %zu vs %zu\n",
862 __func__
, s
, sizeof(cursor_info
));
864 update_cursor(g
, &cursor_info
);
866 virtqueue_push(vq
, elem
, 0);
867 virtio_notify(vdev
, vq
);
872 static void virtio_gpu_cursor_bh(void *opaque
)
874 VirtIOGPU
*g
= opaque
;
875 virtio_gpu_handle_cursor(&g
->parent_obj
, g
->cursor_vq
);
878 static void virtio_gpu_invalidate_display(void *opaque
)
882 static void virtio_gpu_update_display(void *opaque
)
886 static void virtio_gpu_text_update(void *opaque
, console_ch_t
*chardata
)
890 static int virtio_gpu_ui_info(void *opaque
, uint32_t idx
, QemuUIInfo
*info
)
892 VirtIOGPU
*g
= opaque
;
894 if (idx
>= g
->conf
.max_outputs
) {
898 g
->req_state
[idx
].x
= info
->xoff
;
899 g
->req_state
[idx
].y
= info
->yoff
;
900 g
->req_state
[idx
].width
= info
->width
;
901 g
->req_state
[idx
].height
= info
->height
;
903 if (info
->width
&& info
->height
) {
904 g
->enabled_output_bitmask
|= (1 << idx
);
906 g
->enabled_output_bitmask
&= ~(1 << idx
);
909 /* send event to guest */
910 virtio_gpu_notify_event(g
, VIRTIO_GPU_EVENT_DISPLAY
);
914 static void virtio_gpu_gl_block(void *opaque
, bool block
)
916 VirtIOGPU
*g
= opaque
;
918 g
->renderer_blocked
= block
;
920 virtio_gpu_process_cmdq(g
);
924 const GraphicHwOps virtio_gpu_ops
= {
925 .invalidate
= virtio_gpu_invalidate_display
,
926 .gfx_update
= virtio_gpu_update_display
,
927 .text_update
= virtio_gpu_text_update
,
928 .ui_info
= virtio_gpu_ui_info
,
929 .gl_block
= virtio_gpu_gl_block
,
932 static const VMStateDescription vmstate_virtio_gpu_unmigratable
= {
933 .name
= "virtio-gpu",
937 static void virtio_gpu_device_realize(DeviceState
*qdev
, Error
**errp
)
939 VirtIODevice
*vdev
= VIRTIO_DEVICE(qdev
);
940 VirtIOGPU
*g
= VIRTIO_GPU(qdev
);
944 if (g
->conf
.max_outputs
> VIRTIO_GPU_MAX_SCANOUTS
) {
945 error_setg(errp
, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS
);
949 g
->config_size
= sizeof(struct virtio_gpu_config
);
950 g
->virtio_config
.num_scanouts
= g
->conf
.max_outputs
;
951 virtio_init(VIRTIO_DEVICE(g
), "virtio-gpu", VIRTIO_ID_GPU
,
954 g
->req_state
[0].width
= 1024;
955 g
->req_state
[0].height
= 768;
957 g
->use_virgl_renderer
= false;
958 #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN)
961 have_virgl
= display_opengl
;
964 g
->conf
.flags
&= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED
);
967 if (virtio_gpu_virgl_enabled(g
->conf
)) {
968 /* use larger control queue in 3d mode */
969 g
->ctrl_vq
= virtio_add_queue(vdev
, 256, virtio_gpu_handle_ctrl_cb
);
970 g
->cursor_vq
= virtio_add_queue(vdev
, 16, virtio_gpu_handle_cursor_cb
);
971 g
->virtio_config
.num_capsets
= 1;
973 g
->ctrl_vq
= virtio_add_queue(vdev
, 64, virtio_gpu_handle_ctrl_cb
);
974 g
->cursor_vq
= virtio_add_queue(vdev
, 16, virtio_gpu_handle_cursor_cb
);
977 g
->ctrl_bh
= qemu_bh_new(virtio_gpu_ctrl_bh
, g
);
978 g
->cursor_bh
= qemu_bh_new(virtio_gpu_cursor_bh
, g
);
979 QTAILQ_INIT(&g
->reslist
);
980 QTAILQ_INIT(&g
->cmdq
);
981 QTAILQ_INIT(&g
->fenceq
);
983 g
->enabled_output_bitmask
= 1;
986 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
988 graphic_console_init(DEVICE(g
), i
, &virtio_gpu_ops
, g
);
990 dpy_gfx_replace_surface(g
->scanout
[i
].con
, NULL
);
994 vmstate_register(qdev
, -1, &vmstate_virtio_gpu_unmigratable
, g
);
997 static void virtio_gpu_instance_init(Object
*obj
)
1001 static void virtio_gpu_reset(VirtIODevice
*vdev
)
1003 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
1004 struct virtio_gpu_simple_resource
*res
, *tmp
;
1009 QTAILQ_FOREACH_SAFE(res
, &g
->reslist
, next
, tmp
) {
1010 virtio_gpu_resource_destroy(g
, res
);
1012 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
1014 g
->req_state
[i
].x
= 0;
1015 g
->req_state
[i
].y
= 0;
1017 g
->req_state
[0].width
= 1024;
1018 g
->req_state
[0].height
= 768;
1020 g
->req_state
[i
].width
= 0;
1021 g
->req_state
[i
].height
= 0;
1024 g
->scanout
[i
].resource_id
= 0;
1025 g
->scanout
[i
].width
= 0;
1026 g
->scanout
[i
].height
= 0;
1027 g
->scanout
[i
].x
= 0;
1028 g
->scanout
[i
].y
= 0;
1029 g
->scanout
[i
].ds
= NULL
;
1031 g
->enabled_output_bitmask
= 1;
1034 if (g
->use_virgl_renderer
) {
1035 virtio_gpu_virgl_reset(g
);
1036 g
->use_virgl_renderer
= 0;
1041 static Property virtio_gpu_properties
[] = {
1042 DEFINE_PROP_UINT32("max_outputs", VirtIOGPU
, conf
.max_outputs
, 1),
1044 DEFINE_PROP_BIT("virgl", VirtIOGPU
, conf
.flags
,
1045 VIRTIO_GPU_FLAG_VIRGL_ENABLED
, true),
1046 DEFINE_PROP_BIT("stats", VirtIOGPU
, conf
.flags
,
1047 VIRTIO_GPU_FLAG_STATS_ENABLED
, false),
1049 DEFINE_PROP_END_OF_LIST(),
1052 static void virtio_gpu_class_init(ObjectClass
*klass
, void *data
)
1054 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1055 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
1057 vdc
->realize
= virtio_gpu_device_realize
;
1058 vdc
->get_config
= virtio_gpu_get_config
;
1059 vdc
->set_config
= virtio_gpu_set_config
;
1060 vdc
->get_features
= virtio_gpu_get_features
;
1061 vdc
->set_features
= virtio_gpu_set_features
;
1063 vdc
->reset
= virtio_gpu_reset
;
1065 dc
->props
= virtio_gpu_properties
;
1068 static const TypeInfo virtio_gpu_info
= {
1069 .name
= TYPE_VIRTIO_GPU
,
1070 .parent
= TYPE_VIRTIO_DEVICE
,
1071 .instance_size
= sizeof(VirtIOGPU
),
1072 .instance_init
= virtio_gpu_instance_init
,
1073 .class_init
= virtio_gpu_class_init
,
1076 static void virtio_register_types(void)
1078 type_register_static(&virtio_gpu_info
);
1081 type_init(virtio_register_types
)
1083 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr
) != 24);
1084 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor
) != 56);
1085 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref
) != 32);
1086 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d
) != 40);
1087 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout
) != 48);
1088 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush
) != 48);
1089 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d
) != 56);
1090 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry
) != 16);
1091 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing
) != 32);
1092 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing
) != 32);
1093 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info
) != 408);
1095 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d
) != 72);
1096 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d
) != 72);
1097 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create
) != 96);
1098 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy
) != 24);
1099 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource
) != 32);
1100 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit
) != 32);
1101 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info
) != 32);
1102 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info
) != 40);
1103 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset
) != 32);
1104 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset
) != 24);