2 * Virtio vhost-user GPU Device
4 * Copyright Red Hat, Inc. 2013-2018
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
9 * Marc-André Lureau <marcandre.lureau@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
16 #include "qapi/error.h"
17 #include "qemu/sockets.h"
20 #include <glib-unix.h>
23 #include "hw/virtio/virtio-gpu-bswap.h"
24 #include "hw/virtio/virtio-gpu-pixman.h"
28 struct virtio_gpu_simple_resource
{
35 uint32_t scanout_bitmask
;
36 pixman_image_t
*image
;
37 struct vugbm_buffer buffer
;
38 QTAILQ_ENTRY(virtio_gpu_simple_resource
) next
;
41 static gboolean opt_print_caps
;
42 static int opt_fdnum
= -1;
43 static char *opt_socket_path
;
44 static char *opt_render_node
;
45 static gboolean opt_virgl
;
47 static void vg_handle_ctrl(VuDev
*dev
, int qidx
);
50 vg_cmd_to_string(int cmd
)
52 #define CMD(cmd) [cmd] = #cmd
53 static const char *vg_cmd_str
[] = {
54 CMD(VIRTIO_GPU_UNDEFINED
),
57 CMD(VIRTIO_GPU_CMD_GET_DISPLAY_INFO
),
58 CMD(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
),
59 CMD(VIRTIO_GPU_CMD_RESOURCE_UNREF
),
60 CMD(VIRTIO_GPU_CMD_SET_SCANOUT
),
61 CMD(VIRTIO_GPU_CMD_RESOURCE_FLUSH
),
62 CMD(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
),
63 CMD(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
),
64 CMD(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
),
65 CMD(VIRTIO_GPU_CMD_GET_CAPSET_INFO
),
66 CMD(VIRTIO_GPU_CMD_GET_CAPSET
),
69 CMD(VIRTIO_GPU_CMD_CTX_CREATE
),
70 CMD(VIRTIO_GPU_CMD_CTX_DESTROY
),
71 CMD(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE
),
72 CMD(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE
),
73 CMD(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D
),
74 CMD(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D
),
75 CMD(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D
),
76 CMD(VIRTIO_GPU_CMD_SUBMIT_3D
),
79 CMD(VIRTIO_GPU_CMD_UPDATE_CURSOR
),
80 CMD(VIRTIO_GPU_CMD_MOVE_CURSOR
),
84 if (cmd
>= 0 && cmd
< G_N_ELEMENTS(vg_cmd_str
)) {
85 return vg_cmd_str
[cmd
];
92 vg_sock_fd_read(int sock
, void *buf
, ssize_t buflen
)
97 ret
= read(sock
, buf
, buflen
);
98 } while (ret
< 0 && (errno
== EINTR
|| errno
== EAGAIN
));
100 g_warn_if_fail(ret
== buflen
);
105 vg_sock_fd_close(VuGpu
*g
)
107 if (g
->sock_fd
>= 0) {
114 source_wait_cb(gint fd
, GIOCondition condition
, gpointer user_data
)
116 VuGpu
*g
= user_data
;
118 if (!vg_recv_msg(g
, VHOST_USER_GPU_DMABUF_UPDATE
, 0, NULL
)) {
119 return G_SOURCE_CONTINUE
;
124 vg_handle_ctrl(&g
->dev
.parent
, 0);
126 return G_SOURCE_REMOVE
;
132 assert(g
->wait_ok
== 0);
133 g
->wait_ok
= g_unix_fd_add(g
->sock_fd
, G_IO_IN
| G_IO_HUP
,
138 vg_sock_fd_write(int sock
, const void *buf
, ssize_t buflen
, int fd
)
144 struct cmsghdr cmsghdr
;
145 char control
[CMSG_SPACE(sizeof(int))];
147 struct cmsghdr
*cmsg
;
149 iov
.iov_base
= (void *)buf
;
150 iov
.iov_len
= buflen
;
158 msg
.msg_control
= cmsgu
.control
;
159 msg
.msg_controllen
= sizeof(cmsgu
.control
);
161 cmsg
= CMSG_FIRSTHDR(&msg
);
162 cmsg
->cmsg_len
= CMSG_LEN(sizeof(int));
163 cmsg
->cmsg_level
= SOL_SOCKET
;
164 cmsg
->cmsg_type
= SCM_RIGHTS
;
166 *((int *)CMSG_DATA(cmsg
)) = fd
;
168 msg
.msg_control
= NULL
;
169 msg
.msg_controllen
= 0;
173 ret
= sendmsg(sock
, &msg
, 0);
174 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
176 g_warn_if_fail(ret
== buflen
);
181 vg_send_msg(VuGpu
*vg
, const VhostUserGpuMsg
*msg
, int fd
)
183 if (vg_sock_fd_write(vg
->sock_fd
, msg
,
184 VHOST_USER_GPU_HDR_SIZE
+ msg
->size
, fd
) < 0) {
185 vg_sock_fd_close(vg
);
190 vg_recv_msg(VuGpu
*g
, uint32_t expect_req
, uint32_t expect_size
,
193 uint32_t req
, flags
, size
;
195 if (vg_sock_fd_read(g
->sock_fd
, &req
, sizeof(req
)) < 0 ||
196 vg_sock_fd_read(g
->sock_fd
, &flags
, sizeof(flags
)) < 0 ||
197 vg_sock_fd_read(g
->sock_fd
, &size
, sizeof(size
)) < 0) {
201 g_return_val_if_fail(req
== expect_req
, false);
202 g_return_val_if_fail(flags
& VHOST_USER_GPU_MSG_FLAG_REPLY
, false);
203 g_return_val_if_fail(size
== expect_size
, false);
205 if (size
&& vg_sock_fd_read(g
->sock_fd
, payload
, size
) != size
) {
216 static struct virtio_gpu_simple_resource
*
217 virtio_gpu_find_resource(VuGpu
*g
, uint32_t resource_id
)
219 struct virtio_gpu_simple_resource
*res
;
221 QTAILQ_FOREACH(res
, &g
->reslist
, next
) {
222 if (res
->resource_id
== resource_id
) {
230 vg_ctrl_response(VuGpu
*g
,
231 struct virtio_gpu_ctrl_command
*cmd
,
232 struct virtio_gpu_ctrl_hdr
*resp
,
237 if (cmd
->cmd_hdr
.flags
& VIRTIO_GPU_FLAG_FENCE
) {
238 resp
->flags
|= VIRTIO_GPU_FLAG_FENCE
;
239 resp
->fence_id
= cmd
->cmd_hdr
.fence_id
;
240 resp
->ctx_id
= cmd
->cmd_hdr
.ctx_id
;
242 virtio_gpu_ctrl_hdr_bswap(resp
);
243 s
= iov_from_buf(cmd
->elem
.in_sg
, cmd
->elem
.in_num
, 0, resp
, resp_len
);
245 g_critical("%s: response size incorrect %zu vs %zu",
246 __func__
, s
, resp_len
);
248 vu_queue_push(&g
->dev
.parent
, cmd
->vq
, &cmd
->elem
, s
);
249 vu_queue_notify(&g
->dev
.parent
, cmd
->vq
);
250 cmd
->finished
= true;
254 vg_ctrl_response_nodata(VuGpu
*g
,
255 struct virtio_gpu_ctrl_command
*cmd
,
256 enum virtio_gpu_ctrl_type type
)
258 struct virtio_gpu_ctrl_hdr resp
= {
262 vg_ctrl_response(g
, cmd
, &resp
, sizeof(resp
));
266 vg_get_display_info(VuGpu
*vg
, struct virtio_gpu_ctrl_command
*cmd
)
268 struct virtio_gpu_resp_display_info dpy_info
= { {} };
269 VhostUserGpuMsg msg
= {
270 .request
= VHOST_USER_GPU_GET_DISPLAY_INFO
,
274 assert(vg
->wait_ok
== 0);
276 vg_send_msg(vg
, &msg
, -1);
277 if (!vg_recv_msg(vg
, msg
.request
, sizeof(dpy_info
), &dpy_info
)) {
281 vg_ctrl_response(vg
, cmd
, &dpy_info
.hdr
, sizeof(dpy_info
));
285 vg_resource_create_2d(VuGpu
*g
,
286 struct virtio_gpu_ctrl_command
*cmd
)
288 pixman_format_code_t pformat
;
289 struct virtio_gpu_simple_resource
*res
;
290 struct virtio_gpu_resource_create_2d c2d
;
293 virtio_gpu_bswap_32(&c2d
, sizeof(c2d
));
295 if (c2d
.resource_id
== 0) {
296 g_critical("%s: resource id 0 is not allowed", __func__
);
297 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
301 res
= virtio_gpu_find_resource(g
, c2d
.resource_id
);
303 g_critical("%s: resource already exists %d", __func__
, c2d
.resource_id
);
304 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
308 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
309 res
->width
= c2d
.width
;
310 res
->height
= c2d
.height
;
311 res
->format
= c2d
.format
;
312 res
->resource_id
= c2d
.resource_id
;
314 pformat
= virtio_gpu_get_pixman_format(c2d
.format
);
316 g_critical("%s: host couldn't handle guest format %d",
317 __func__
, c2d
.format
);
319 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
322 vugbm_buffer_create(&res
->buffer
, &g
->gdev
, c2d
.width
, c2d
.height
);
323 res
->image
= pixman_image_create_bits(pformat
,
326 (uint32_t *)res
->buffer
.mmap
,
329 g_critical("%s: resource creation failed %d %d %d",
330 __func__
, c2d
.resource_id
, c2d
.width
, c2d
.height
);
332 cmd
->error
= VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY
;
336 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
340 vg_disable_scanout(VuGpu
*g
, int scanout_id
)
342 struct virtio_gpu_scanout
*scanout
= &g
->scanout
[scanout_id
];
343 struct virtio_gpu_simple_resource
*res
;
345 if (scanout
->resource_id
== 0) {
349 res
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
351 res
->scanout_bitmask
&= ~(1 << scanout_id
);
358 VhostUserGpuMsg msg
= {
359 .request
= VHOST_USER_GPU_SCANOUT
,
360 .size
= sizeof(VhostUserGpuScanout
),
361 .payload
.scanout
.scanout_id
= scanout_id
,
363 vg_send_msg(g
, &msg
, -1);
368 vg_resource_destroy(VuGpu
*g
,
369 struct virtio_gpu_simple_resource
*res
)
373 if (res
->scanout_bitmask
) {
374 for (i
= 0; i
< VIRTIO_GPU_MAX_SCANOUTS
; i
++) {
375 if (res
->scanout_bitmask
& (1 << i
)) {
376 vg_disable_scanout(g
, i
);
381 vugbm_buffer_destroy(&res
->buffer
);
382 pixman_image_unref(res
->image
);
383 QTAILQ_REMOVE(&g
->reslist
, res
, next
);
388 vg_resource_unref(VuGpu
*g
,
389 struct virtio_gpu_ctrl_command
*cmd
)
391 struct virtio_gpu_simple_resource
*res
;
392 struct virtio_gpu_resource_unref unref
;
394 VUGPU_FILL_CMD(unref
);
395 virtio_gpu_bswap_32(&unref
, sizeof(unref
));
397 res
= virtio_gpu_find_resource(g
, unref
.resource_id
);
399 g_critical("%s: illegal resource specified %d",
400 __func__
, unref
.resource_id
);
401 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
404 vg_resource_destroy(g
, res
);
408 vg_create_mapping_iov(VuGpu
*g
,
409 struct virtio_gpu_resource_attach_backing
*ab
,
410 struct virtio_gpu_ctrl_command
*cmd
,
413 struct virtio_gpu_mem_entry
*ents
;
417 if (ab
->nr_entries
> 16384) {
418 g_critical("%s: nr_entries is too big (%d > 16384)",
419 __func__
, ab
->nr_entries
);
423 esize
= sizeof(*ents
) * ab
->nr_entries
;
424 ents
= g_malloc(esize
);
425 s
= iov_to_buf(cmd
->elem
.out_sg
, cmd
->elem
.out_num
,
426 sizeof(*ab
), ents
, esize
);
428 g_critical("%s: command data size incorrect %zu vs %zu",
434 *iov
= g_malloc0(sizeof(struct iovec
) * ab
->nr_entries
);
435 for (i
= 0; i
< ab
->nr_entries
; i
++) {
436 uint64_t len
= ents
[i
].length
;
437 (*iov
)[i
].iov_len
= ents
[i
].length
;
438 (*iov
)[i
].iov_base
= vu_gpa_to_va(&g
->dev
.parent
, &len
, ents
[i
].addr
);
439 if (!(*iov
)[i
].iov_base
|| len
!= ents
[i
].length
) {
440 g_critical("%s: resource %d element %d",
441 __func__
, ab
->resource_id
, i
);
453 vg_resource_attach_backing(VuGpu
*g
,
454 struct virtio_gpu_ctrl_command
*cmd
)
456 struct virtio_gpu_simple_resource
*res
;
457 struct virtio_gpu_resource_attach_backing ab
;
461 virtio_gpu_bswap_32(&ab
, sizeof(ab
));
463 res
= virtio_gpu_find_resource(g
, ab
.resource_id
);
465 g_critical("%s: illegal resource specified %d",
466 __func__
, ab
.resource_id
);
467 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
471 ret
= vg_create_mapping_iov(g
, &ab
, cmd
, &res
->iov
);
473 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
477 res
->iov_cnt
= ab
.nr_entries
;
481 vg_resource_detach_backing(VuGpu
*g
,
482 struct virtio_gpu_ctrl_command
*cmd
)
484 struct virtio_gpu_simple_resource
*res
;
485 struct virtio_gpu_resource_detach_backing detach
;
487 VUGPU_FILL_CMD(detach
);
488 virtio_gpu_bswap_32(&detach
, sizeof(detach
));
490 res
= virtio_gpu_find_resource(g
, detach
.resource_id
);
491 if (!res
|| !res
->iov
) {
492 g_critical("%s: illegal resource specified %d",
493 __func__
, detach
.resource_id
);
494 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
504 vg_transfer_to_host_2d(VuGpu
*g
,
505 struct virtio_gpu_ctrl_command
*cmd
)
507 struct virtio_gpu_simple_resource
*res
;
509 uint32_t src_offset
, dst_offset
, stride
;
511 pixman_format_code_t format
;
512 struct virtio_gpu_transfer_to_host_2d t2d
;
515 virtio_gpu_t2d_bswap(&t2d
);
517 res
= virtio_gpu_find_resource(g
, t2d
.resource_id
);
518 if (!res
|| !res
->iov
) {
519 g_critical("%s: illegal resource specified %d",
520 __func__
, t2d
.resource_id
);
521 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
525 if (t2d
.r
.x
> res
->width
||
526 t2d
.r
.y
> res
->height
||
527 t2d
.r
.width
> res
->width
||
528 t2d
.r
.height
> res
->height
||
529 t2d
.r
.x
+ t2d
.r
.width
> res
->width
||
530 t2d
.r
.y
+ t2d
.r
.height
> res
->height
) {
531 g_critical("%s: transfer bounds outside resource"
532 " bounds for resource %d: %d %d %d %d vs %d %d",
533 __func__
, t2d
.resource_id
, t2d
.r
.x
, t2d
.r
.y
,
534 t2d
.r
.width
, t2d
.r
.height
, res
->width
, res
->height
);
535 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
539 format
= pixman_image_get_format(res
->image
);
540 bpp
= (PIXMAN_FORMAT_BPP(format
) + 7) / 8;
541 stride
= pixman_image_get_stride(res
->image
);
543 if (t2d
.offset
|| t2d
.r
.x
|| t2d
.r
.y
||
544 t2d
.r
.width
!= pixman_image_get_width(res
->image
)) {
545 void *img_data
= pixman_image_get_data(res
->image
);
546 for (h
= 0; h
< t2d
.r
.height
; h
++) {
547 src_offset
= t2d
.offset
+ stride
* h
;
548 dst_offset
= (t2d
.r
.y
+ h
) * stride
+ (t2d
.r
.x
* bpp
);
550 iov_to_buf(res
->iov
, res
->iov_cnt
, src_offset
,
552 + dst_offset
, t2d
.r
.width
* bpp
);
555 iov_to_buf(res
->iov
, res
->iov_cnt
, 0,
556 pixman_image_get_data(res
->image
),
557 pixman_image_get_stride(res
->image
)
558 * pixman_image_get_height(res
->image
));
563 vg_set_scanout(VuGpu
*g
,
564 struct virtio_gpu_ctrl_command
*cmd
)
566 struct virtio_gpu_simple_resource
*res
, *ores
;
567 struct virtio_gpu_scanout
*scanout
;
568 struct virtio_gpu_set_scanout ss
;
572 virtio_gpu_bswap_32(&ss
, sizeof(ss
));
574 if (ss
.scanout_id
>= VIRTIO_GPU_MAX_SCANOUTS
) {
575 g_critical("%s: illegal scanout id specified %d",
576 __func__
, ss
.scanout_id
);
577 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
581 if (ss
.resource_id
== 0) {
582 vg_disable_scanout(g
, ss
.scanout_id
);
586 /* create a surface for this scanout */
587 res
= virtio_gpu_find_resource(g
, ss
.resource_id
);
589 g_critical("%s: illegal resource specified %d",
590 __func__
, ss
.resource_id
);
591 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
595 if (ss
.r
.x
> res
->width
||
596 ss
.r
.y
> res
->height
||
597 ss
.r
.width
> res
->width
||
598 ss
.r
.height
> res
->height
||
599 ss
.r
.x
+ ss
.r
.width
> res
->width
||
600 ss
.r
.y
+ ss
.r
.height
> res
->height
) {
601 g_critical("%s: illegal scanout %d bounds for"
602 " resource %d, (%d,%d)+%d,%d vs %d %d",
603 __func__
, ss
.scanout_id
, ss
.resource_id
, ss
.r
.x
, ss
.r
.y
,
604 ss
.r
.width
, ss
.r
.height
, res
->width
, res
->height
);
605 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
609 scanout
= &g
->scanout
[ss
.scanout_id
];
611 ores
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
613 ores
->scanout_bitmask
&= ~(1 << ss
.scanout_id
);
616 res
->scanout_bitmask
|= (1 << ss
.scanout_id
);
617 scanout
->resource_id
= ss
.resource_id
;
620 scanout
->width
= ss
.r
.width
;
621 scanout
->height
= ss
.r
.height
;
623 struct vugbm_buffer
*buffer
= &res
->buffer
;
625 if (vugbm_buffer_can_get_dmabuf_fd(buffer
)) {
626 VhostUserGpuMsg msg
= {
627 .request
= VHOST_USER_GPU_DMABUF_SCANOUT
,
628 .size
= sizeof(VhostUserGpuDMABUFScanout
),
629 .payload
.dmabuf_scanout
= (VhostUserGpuDMABUFScanout
) {
630 .scanout_id
= ss
.scanout_id
,
634 .height
= ss
.r
.height
,
635 .fd_width
= buffer
->width
,
636 .fd_height
= buffer
->height
,
637 .fd_stride
= buffer
->stride
,
638 .fd_drm_fourcc
= buffer
->format
642 if (vugbm_buffer_get_dmabuf_fd(buffer
, &fd
)) {
643 vg_send_msg(g
, &msg
, fd
);
647 VhostUserGpuMsg msg
= {
648 .request
= VHOST_USER_GPU_SCANOUT
,
649 .size
= sizeof(VhostUserGpuScanout
),
650 .payload
.scanout
= (VhostUserGpuScanout
) {
651 .scanout_id
= ss
.scanout_id
,
652 .width
= scanout
->width
,
653 .height
= scanout
->height
656 vg_send_msg(g
, &msg
, -1);
661 vg_resource_flush(VuGpu
*g
,
662 struct virtio_gpu_ctrl_command
*cmd
)
664 struct virtio_gpu_simple_resource
*res
;
665 struct virtio_gpu_resource_flush rf
;
666 pixman_region16_t flush_region
;
670 virtio_gpu_bswap_32(&rf
, sizeof(rf
));
672 res
= virtio_gpu_find_resource(g
, rf
.resource_id
);
674 g_critical("%s: illegal resource specified %d\n",
675 __func__
, rf
.resource_id
);
676 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
680 if (rf
.r
.x
> res
->width
||
681 rf
.r
.y
> res
->height
||
682 rf
.r
.width
> res
->width
||
683 rf
.r
.height
> res
->height
||
684 rf
.r
.x
+ rf
.r
.width
> res
->width
||
685 rf
.r
.y
+ rf
.r
.height
> res
->height
) {
686 g_critical("%s: flush bounds outside resource"
687 " bounds for resource %d: %d %d %d %d vs %d %d\n",
688 __func__
, rf
.resource_id
, rf
.r
.x
, rf
.r
.y
,
689 rf
.r
.width
, rf
.r
.height
, res
->width
, res
->height
);
690 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
694 pixman_region_init_rect(&flush_region
,
695 rf
.r
.x
, rf
.r
.y
, rf
.r
.width
, rf
.r
.height
);
696 for (i
= 0; i
< VIRTIO_GPU_MAX_SCANOUTS
; i
++) {
697 struct virtio_gpu_scanout
*scanout
;
698 pixman_region16_t region
, finalregion
;
699 pixman_box16_t
*extents
;
701 if (!(res
->scanout_bitmask
& (1 << i
))) {
704 scanout
= &g
->scanout
[i
];
706 pixman_region_init(&finalregion
);
707 pixman_region_init_rect(®ion
, scanout
->x
, scanout
->y
,
708 scanout
->width
, scanout
->height
);
710 pixman_region_intersect(&finalregion
, &flush_region
, ®ion
);
712 extents
= pixman_region_extents(&finalregion
);
713 size_t width
= extents
->x2
- extents
->x1
;
714 size_t height
= extents
->y2
- extents
->y1
;
716 if (vugbm_buffer_can_get_dmabuf_fd(&res
->buffer
)) {
717 VhostUserGpuMsg vmsg
= {
718 .request
= VHOST_USER_GPU_DMABUF_UPDATE
,
719 .size
= sizeof(VhostUserGpuUpdate
),
720 .payload
.update
= (VhostUserGpuUpdate
) {
728 vg_send_msg(g
, &vmsg
, -1);
732 PIXMAN_FORMAT_BPP(pixman_image_get_format(res
->image
)) / 8;
733 size_t size
= width
* height
* bpp
;
735 void *p
= g_malloc(VHOST_USER_GPU_HDR_SIZE
+
736 sizeof(VhostUserGpuUpdate
) + size
);
737 VhostUserGpuMsg
*msg
= p
;
738 msg
->request
= VHOST_USER_GPU_UPDATE
;
739 msg
->size
= sizeof(VhostUserGpuUpdate
) + size
;
740 msg
->payload
.update
= (VhostUserGpuUpdate
) {
748 pixman_image_create_bits(pixman_image_get_format(res
->image
),
749 msg
->payload
.update
.width
,
750 msg
->payload
.update
.height
,
751 p
+ offsetof(VhostUserGpuMsg
,
752 payload
.update
.data
),
754 pixman_image_composite(PIXMAN_OP_SRC
,
756 extents
->x1
, extents
->y1
,
759 pixman_image_unref(i
);
760 vg_send_msg(g
, msg
, -1);
763 pixman_region_fini(®ion
);
764 pixman_region_fini(&finalregion
);
766 pixman_region_fini(&flush_region
);
770 vg_process_cmd(VuGpu
*vg
, struct virtio_gpu_ctrl_command
*cmd
)
772 switch (cmd
->cmd_hdr
.type
) {
773 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO
:
774 vg_get_display_info(vg
, cmd
);
776 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
:
777 vg_resource_create_2d(vg
, cmd
);
779 case VIRTIO_GPU_CMD_RESOURCE_UNREF
:
780 vg_resource_unref(vg
, cmd
);
782 case VIRTIO_GPU_CMD_RESOURCE_FLUSH
:
783 vg_resource_flush(vg
, cmd
);
785 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
:
786 vg_transfer_to_host_2d(vg
, cmd
);
788 case VIRTIO_GPU_CMD_SET_SCANOUT
:
789 vg_set_scanout(vg
, cmd
);
791 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
:
792 vg_resource_attach_backing(vg
, cmd
);
794 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
:
795 vg_resource_detach_backing(vg
, cmd
);
797 /* case VIRTIO_GPU_CMD_GET_EDID: */
800 g_warning("TODO handle ctrl %x\n", cmd
->cmd_hdr
.type
);
801 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
804 if (!cmd
->finished
) {
805 vg_ctrl_response_nodata(vg
, cmd
, cmd
->error
? cmd
->error
:
806 VIRTIO_GPU_RESP_OK_NODATA
);
811 vg_handle_ctrl(VuDev
*dev
, int qidx
)
813 VuGpu
*vg
= container_of(dev
, VuGpu
, dev
.parent
);
814 VuVirtq
*vq
= vu_get_queue(dev
, qidx
);
815 struct virtio_gpu_ctrl_command
*cmd
= NULL
;
819 if (vg
->wait_ok
!= 0) {
823 cmd
= vu_queue_pop(dev
, vq
, sizeof(struct virtio_gpu_ctrl_command
));
829 cmd
->finished
= false;
831 len
= iov_to_buf(cmd
->elem
.out_sg
, cmd
->elem
.out_num
,
832 0, &cmd
->cmd_hdr
, sizeof(cmd
->cmd_hdr
));
833 if (len
!= sizeof(cmd
->cmd_hdr
)) {
834 g_warning("%s: command size incorrect %zu vs %zu\n",
835 __func__
, len
, sizeof(cmd
->cmd_hdr
));
838 virtio_gpu_ctrl_hdr_bswap(&cmd
->cmd_hdr
);
839 g_debug("%d %s\n", cmd
->cmd_hdr
.type
,
840 vg_cmd_to_string(cmd
->cmd_hdr
.type
));
843 vg_virgl_process_cmd(vg
, cmd
);
845 vg_process_cmd(vg
, cmd
);
848 if (!cmd
->finished
) {
849 QTAILQ_INSERT_TAIL(&vg
->fenceq
, cmd
, next
);
858 update_cursor_data_simple(VuGpu
*g
, uint32_t resource_id
, gpointer data
)
860 struct virtio_gpu_simple_resource
*res
;
862 res
= virtio_gpu_find_resource(g
, resource_id
);
863 g_return_if_fail(res
!= NULL
);
864 g_return_if_fail(pixman_image_get_width(res
->image
) == 64);
865 g_return_if_fail(pixman_image_get_height(res
->image
) == 64);
867 PIXMAN_FORMAT_BPP(pixman_image_get_format(res
->image
)) == 32);
869 memcpy(data
, pixman_image_get_data(res
->image
), 64 * 64 * sizeof(uint32_t));
873 vg_process_cursor_cmd(VuGpu
*g
, struct virtio_gpu_update_cursor
*cursor
)
875 bool move
= cursor
->hdr
.type
!= VIRTIO_GPU_CMD_MOVE_CURSOR
;
877 g_debug("%s move:%d\n", G_STRFUNC
, move
);
880 VhostUserGpuMsg msg
= {
881 .request
= cursor
->resource_id
?
882 VHOST_USER_GPU_CURSOR_POS
: VHOST_USER_GPU_CURSOR_POS_HIDE
,
883 .size
= sizeof(VhostUserGpuCursorPos
),
884 .payload
.cursor_pos
= {
885 .scanout_id
= cursor
->pos
.scanout_id
,
890 vg_send_msg(g
, &msg
, -1);
892 VhostUserGpuMsg msg
= {
893 .request
= VHOST_USER_GPU_CURSOR_UPDATE
,
894 .size
= sizeof(VhostUserGpuCursorUpdate
),
895 .payload
.cursor_update
= {
897 .scanout_id
= cursor
->pos
.scanout_id
,
901 .hot_x
= cursor
->hot_x
,
902 .hot_y
= cursor
->hot_y
,
906 vg_virgl_update_cursor_data(g
, cursor
->resource_id
,
907 msg
.payload
.cursor_update
.data
);
909 update_cursor_data_simple(g
, cursor
->resource_id
,
910 msg
.payload
.cursor_update
.data
);
912 vg_send_msg(g
, &msg
, -1);
917 vg_handle_cursor(VuDev
*dev
, int qidx
)
919 VuGpu
*g
= container_of(dev
, VuGpu
, dev
.parent
);
920 VuVirtq
*vq
= vu_get_queue(dev
, qidx
);
921 VuVirtqElement
*elem
;
923 struct virtio_gpu_update_cursor cursor
;
926 elem
= vu_queue_pop(dev
, vq
, sizeof(VuVirtqElement
));
930 g_debug("cursor out:%d in:%d\n", elem
->out_num
, elem
->in_num
);
932 len
= iov_to_buf(elem
->out_sg
, elem
->out_num
,
933 0, &cursor
, sizeof(cursor
));
934 if (len
!= sizeof(cursor
)) {
935 g_warning("%s: cursor size incorrect %zu vs %zu\n",
936 __func__
, len
, sizeof(cursor
));
938 virtio_gpu_bswap_32(&cursor
, sizeof(cursor
));
939 vg_process_cursor_cmd(g
, &cursor
);
941 vu_queue_push(dev
, vq
, elem
, 0);
942 vu_queue_notify(dev
, vq
);
948 vg_panic(VuDev
*dev
, const char *msg
)
950 g_critical("%s\n", msg
);
955 vg_queue_set_started(VuDev
*dev
, int qidx
, bool started
)
957 VuVirtq
*vq
= vu_get_queue(dev
, qidx
);
959 g_debug("queue started %d:%d\n", qidx
, started
);
963 vu_set_queue_handler(dev
, vq
, started
? vg_handle_ctrl
: NULL
);
966 vu_set_queue_handler(dev
, vq
, started
? vg_handle_cursor
: NULL
);
974 set_gpu_protocol_features(VuGpu
*g
)
977 VhostUserGpuMsg msg
= {
978 .request
= VHOST_USER_GPU_GET_PROTOCOL_FEATURES
981 assert(g
->wait_ok
== 0);
982 vg_send_msg(g
, &msg
, -1);
983 if (!vg_recv_msg(g
, msg
.request
, sizeof(u64
), &u64
)) {
987 msg
= (VhostUserGpuMsg
) {
988 .request
= VHOST_USER_GPU_SET_PROTOCOL_FEATURES
,
989 .size
= sizeof(uint64_t),
992 vg_send_msg(g
, &msg
, -1);
996 vg_process_msg(VuDev
*dev
, VhostUserMsg
*msg
, int *do_reply
)
998 VuGpu
*g
= container_of(dev
, VuGpu
, dev
.parent
);
1000 switch (msg
->request
) {
1001 case VHOST_USER_GPU_SET_SOCKET
: {
1002 g_return_val_if_fail(msg
->fd_num
== 1, 1);
1003 g_return_val_if_fail(g
->sock_fd
== -1, 1);
1004 g
->sock_fd
= msg
->fds
[0];
1005 set_gpu_protocol_features(g
);
1016 vg_get_features(VuDev
*dev
)
1018 uint64_t features
= 0;
1021 features
|= 1 << VIRTIO_GPU_F_VIRGL
;
1028 vg_set_features(VuDev
*dev
, uint64_t features
)
1030 VuGpu
*g
= container_of(dev
, VuGpu
, dev
.parent
);
1031 bool virgl
= features
& (1 << VIRTIO_GPU_F_VIRGL
);
1033 if (virgl
&& !g
->virgl_inited
) {
1034 if (!vg_virgl_init(g
)) {
1035 vg_panic(dev
, "Failed to initialize virgl");
1037 g
->virgl_inited
= true;
1044 vg_get_config(VuDev
*dev
, uint8_t *config
, uint32_t len
)
1046 VuGpu
*g
= container_of(dev
, VuGpu
, dev
.parent
);
1048 g_return_val_if_fail(len
<= sizeof(struct virtio_gpu_config
), -1);
1051 g
->virtio_config
.num_capsets
= vg_virgl_get_num_capsets();
1054 memcpy(config
, &g
->virtio_config
, len
);
1060 vg_set_config(VuDev
*dev
, const uint8_t *data
,
1061 uint32_t offset
, uint32_t size
,
1064 VuGpu
*g
= container_of(dev
, VuGpu
, dev
.parent
);
1065 struct virtio_gpu_config
*config
= (struct virtio_gpu_config
*)data
;
1067 if (config
->events_clear
) {
1068 g
->virtio_config
.events_read
&= ~config
->events_clear
;
1074 static const VuDevIface vuiface
= {
1075 .set_features
= vg_set_features
,
1076 .get_features
= vg_get_features
,
1077 .queue_set_started
= vg_queue_set_started
,
1078 .process_msg
= vg_process_msg
,
1079 .get_config
= vg_get_config
,
1080 .set_config
= vg_set_config
,
1084 vg_destroy(VuGpu
*g
)
1086 struct virtio_gpu_simple_resource
*res
, *tmp
;
1088 vug_deinit(&g
->dev
);
1090 vg_sock_fd_close(g
);
1092 QTAILQ_FOREACH_SAFE(res
, &g
->reslist
, next
, tmp
) {
1093 vg_resource_destroy(g
, res
);
1096 vugbm_device_destroy(&g
->gdev
);
1099 static GOptionEntry entries
[] = {
1100 { "print-capabilities", 'c', 0, G_OPTION_ARG_NONE
, &opt_print_caps
,
1101 "Print capabilities", NULL
},
1102 { "fd", 'f', 0, G_OPTION_ARG_INT
, &opt_fdnum
,
1103 "Use inherited fd socket", "FDNUM" },
1104 { "socket-path", 's', 0, G_OPTION_ARG_FILENAME
, &opt_socket_path
,
1105 "Use UNIX socket path", "PATH" },
1106 { "render-node", 'r', 0, G_OPTION_ARG_FILENAME
, &opt_render_node
,
1107 "Specify DRM render node", "PATH" },
1108 { "virgl", 'v', 0, G_OPTION_ARG_NONE
, &opt_virgl
,
1109 "Turn virgl rendering on", NULL
},
1114 main(int argc
, char *argv
[])
1116 GOptionContext
*context
;
1117 GError
*error
= NULL
;
1118 GMainLoop
*loop
= NULL
;
1120 VuGpu g
= { .sock_fd
= -1, .drm_rnode_fd
= -1 };
1122 QTAILQ_INIT(&g
.reslist
);
1123 QTAILQ_INIT(&g
.fenceq
);
1125 context
= g_option_context_new("QEMU vhost-user-gpu");
1126 g_option_context_add_main_entries(context
, entries
, NULL
);
1127 if (!g_option_context_parse(context
, &argc
, &argv
, &error
)) {
1128 g_printerr("Option parsing failed: %s\n", error
->message
);
1131 g_option_context_free(context
);
1133 if (opt_print_caps
) {
1135 g_print(" \"type\": \"gpu\",\n");
1136 g_print(" \"features\": [\n");
1137 g_print(" \"render-node\",\n");
1138 g_print(" \"virgl\"\n");
1144 g
.drm_rnode_fd
= qemu_drm_rendernode_open(opt_render_node
);
1145 if (opt_render_node
&& g
.drm_rnode_fd
== -1) {
1146 g_printerr("Failed to open DRM rendernode.\n");
1150 if (g
.drm_rnode_fd
>= 0) {
1151 if (!vugbm_device_init(&g
.gdev
, g
.drm_rnode_fd
)) {
1152 g_warning("Failed to init DRM device, using fallback path");
1156 if ((!!opt_socket_path
+ (opt_fdnum
!= -1)) != 1) {
1157 g_printerr("Please specify either --fd or --socket-path\n");
1161 if (opt_socket_path
) {
1162 int lsock
= unix_listen(opt_socket_path
, &error_fatal
);
1163 fd
= accept(lsock
, NULL
, NULL
);
1169 g_printerr("Invalid socket");
1173 vug_init(&g
.dev
, fd
, vg_panic
, &vuiface
);
1175 loop
= g_main_loop_new(NULL
, FALSE
);
1176 g_main_loop_run(loop
);
1177 g_main_loop_unref(loop
);
1180 if (g
.drm_rnode_fd
>= 0) {
1181 close(g
.drm_rnode_fd
);