2 * Virtio vhost-user GPU Device
4 * Copyright Red Hat, Inc. 2013-2018
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
9 * Marc-André Lureau <marcandre.lureau@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
16 #include "qapi/error.h"
17 #include "qemu/sockets.h"
20 #include <glib-unix.h>
23 #include "hw/virtio/virtio-gpu-bswap.h"
24 #include "hw/virtio/virtio-gpu-pixman.h"
29 VHOST_USER_GPU_MAX_QUEUES
= 2,
32 struct virtio_gpu_simple_resource
{
39 uint32_t scanout_bitmask
;
40 pixman_image_t
*image
;
41 struct vugbm_buffer buffer
;
42 QTAILQ_ENTRY(virtio_gpu_simple_resource
) next
;
45 static gboolean opt_print_caps
;
46 static int opt_fdnum
= -1;
47 static char *opt_socket_path
;
48 static char *opt_render_node
;
49 static gboolean opt_virgl
;
51 static void vg_handle_ctrl(VuDev
*dev
, int qidx
);
54 vg_cmd_to_string(int cmd
)
56 #define CMD(cmd) [cmd] = #cmd
57 static const char *vg_cmd_str
[] = {
58 CMD(VIRTIO_GPU_UNDEFINED
),
61 CMD(VIRTIO_GPU_CMD_GET_DISPLAY_INFO
),
62 CMD(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
),
63 CMD(VIRTIO_GPU_CMD_RESOURCE_UNREF
),
64 CMD(VIRTIO_GPU_CMD_SET_SCANOUT
),
65 CMD(VIRTIO_GPU_CMD_RESOURCE_FLUSH
),
66 CMD(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
),
67 CMD(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
),
68 CMD(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
),
69 CMD(VIRTIO_GPU_CMD_GET_CAPSET_INFO
),
70 CMD(VIRTIO_GPU_CMD_GET_CAPSET
),
73 CMD(VIRTIO_GPU_CMD_CTX_CREATE
),
74 CMD(VIRTIO_GPU_CMD_CTX_DESTROY
),
75 CMD(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE
),
76 CMD(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE
),
77 CMD(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D
),
78 CMD(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D
),
79 CMD(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D
),
80 CMD(VIRTIO_GPU_CMD_SUBMIT_3D
),
83 CMD(VIRTIO_GPU_CMD_UPDATE_CURSOR
),
84 CMD(VIRTIO_GPU_CMD_MOVE_CURSOR
),
88 if (cmd
>= 0 && cmd
< G_N_ELEMENTS(vg_cmd_str
)) {
89 return vg_cmd_str
[cmd
];
96 vg_sock_fd_read(int sock
, void *buf
, ssize_t buflen
)
101 ret
= read(sock
, buf
, buflen
);
102 } while (ret
< 0 && (errno
== EINTR
|| errno
== EAGAIN
));
104 g_warn_if_fail(ret
== buflen
);
109 vg_sock_fd_close(VuGpu
*g
)
111 if (g
->sock_fd
>= 0) {
118 source_wait_cb(gint fd
, GIOCondition condition
, gpointer user_data
)
120 VuGpu
*g
= user_data
;
122 if (!vg_recv_msg(g
, VHOST_USER_GPU_DMABUF_UPDATE
, 0, NULL
)) {
123 return G_SOURCE_CONTINUE
;
128 vg_handle_ctrl(&g
->dev
.parent
, 0);
130 return G_SOURCE_REMOVE
;
136 assert(g
->wait_ok
== 0);
137 g
->wait_ok
= g_unix_fd_add(g
->sock_fd
, G_IO_IN
| G_IO_HUP
,
142 vg_sock_fd_write(int sock
, const void *buf
, ssize_t buflen
, int fd
)
146 .iov_base
= (void *)buf
,
149 struct msghdr msg
= {
154 struct cmsghdr cmsghdr
;
155 char control
[CMSG_SPACE(sizeof(int))];
157 struct cmsghdr
*cmsg
;
160 msg
.msg_control
= cmsgu
.control
;
161 msg
.msg_controllen
= sizeof(cmsgu
.control
);
163 cmsg
= CMSG_FIRSTHDR(&msg
);
164 cmsg
->cmsg_len
= CMSG_LEN(sizeof(int));
165 cmsg
->cmsg_level
= SOL_SOCKET
;
166 cmsg
->cmsg_type
= SCM_RIGHTS
;
168 *((int *)CMSG_DATA(cmsg
)) = fd
;
172 ret
= sendmsg(sock
, &msg
, 0);
173 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
175 g_warn_if_fail(ret
== buflen
);
180 vg_send_msg(VuGpu
*vg
, const VhostUserGpuMsg
*msg
, int fd
)
182 if (vg_sock_fd_write(vg
->sock_fd
, msg
,
183 VHOST_USER_GPU_HDR_SIZE
+ msg
->size
, fd
) < 0) {
184 vg_sock_fd_close(vg
);
189 vg_recv_msg(VuGpu
*g
, uint32_t expect_req
, uint32_t expect_size
,
192 uint32_t req
, flags
, size
;
194 if (vg_sock_fd_read(g
->sock_fd
, &req
, sizeof(req
)) < 0 ||
195 vg_sock_fd_read(g
->sock_fd
, &flags
, sizeof(flags
)) < 0 ||
196 vg_sock_fd_read(g
->sock_fd
, &size
, sizeof(size
)) < 0) {
200 g_return_val_if_fail(req
== expect_req
, false);
201 g_return_val_if_fail(flags
& VHOST_USER_GPU_MSG_FLAG_REPLY
, false);
202 g_return_val_if_fail(size
== expect_size
, false);
204 if (size
&& vg_sock_fd_read(g
->sock_fd
, payload
, size
) != size
) {
215 static struct virtio_gpu_simple_resource
*
216 virtio_gpu_find_resource(VuGpu
*g
, uint32_t resource_id
)
218 struct virtio_gpu_simple_resource
*res
;
220 QTAILQ_FOREACH(res
, &g
->reslist
, next
) {
221 if (res
->resource_id
== resource_id
) {
229 vg_ctrl_response(VuGpu
*g
,
230 struct virtio_gpu_ctrl_command
*cmd
,
231 struct virtio_gpu_ctrl_hdr
*resp
,
236 if (cmd
->cmd_hdr
.flags
& VIRTIO_GPU_FLAG_FENCE
) {
237 resp
->flags
|= VIRTIO_GPU_FLAG_FENCE
;
238 resp
->fence_id
= cmd
->cmd_hdr
.fence_id
;
239 resp
->ctx_id
= cmd
->cmd_hdr
.ctx_id
;
241 virtio_gpu_ctrl_hdr_bswap(resp
);
242 s
= iov_from_buf(cmd
->elem
.in_sg
, cmd
->elem
.in_num
, 0, resp
, resp_len
);
244 g_critical("%s: response size incorrect %zu vs %zu",
245 __func__
, s
, resp_len
);
247 vu_queue_push(&g
->dev
.parent
, cmd
->vq
, &cmd
->elem
, s
);
248 vu_queue_notify(&g
->dev
.parent
, cmd
->vq
);
249 cmd
->finished
= true;
253 vg_ctrl_response_nodata(VuGpu
*g
,
254 struct virtio_gpu_ctrl_command
*cmd
,
255 enum virtio_gpu_ctrl_type type
)
257 struct virtio_gpu_ctrl_hdr resp
= {
261 vg_ctrl_response(g
, cmd
, &resp
, sizeof(resp
));
265 vg_get_display_info(VuGpu
*vg
, struct virtio_gpu_ctrl_command
*cmd
)
267 struct virtio_gpu_resp_display_info dpy_info
= { {} };
268 VhostUserGpuMsg msg
= {
269 .request
= VHOST_USER_GPU_GET_DISPLAY_INFO
,
273 assert(vg
->wait_ok
== 0);
275 vg_send_msg(vg
, &msg
, -1);
276 if (!vg_recv_msg(vg
, msg
.request
, sizeof(dpy_info
), &dpy_info
)) {
280 vg_ctrl_response(vg
, cmd
, &dpy_info
.hdr
, sizeof(dpy_info
));
284 vg_resource_create_2d(VuGpu
*g
,
285 struct virtio_gpu_ctrl_command
*cmd
)
287 pixman_format_code_t pformat
;
288 struct virtio_gpu_simple_resource
*res
;
289 struct virtio_gpu_resource_create_2d c2d
;
292 virtio_gpu_bswap_32(&c2d
, sizeof(c2d
));
294 if (c2d
.resource_id
== 0) {
295 g_critical("%s: resource id 0 is not allowed", __func__
);
296 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
300 res
= virtio_gpu_find_resource(g
, c2d
.resource_id
);
302 g_critical("%s: resource already exists %d", __func__
, c2d
.resource_id
);
303 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
307 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
308 res
->width
= c2d
.width
;
309 res
->height
= c2d
.height
;
310 res
->format
= c2d
.format
;
311 res
->resource_id
= c2d
.resource_id
;
313 pformat
= virtio_gpu_get_pixman_format(c2d
.format
);
315 g_critical("%s: host couldn't handle guest format %d",
316 __func__
, c2d
.format
);
318 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
321 vugbm_buffer_create(&res
->buffer
, &g
->gdev
, c2d
.width
, c2d
.height
);
322 res
->image
= pixman_image_create_bits(pformat
,
325 (uint32_t *)res
->buffer
.mmap
,
328 g_critical("%s: resource creation failed %d %d %d",
329 __func__
, c2d
.resource_id
, c2d
.width
, c2d
.height
);
331 cmd
->error
= VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY
;
335 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
339 vg_disable_scanout(VuGpu
*g
, int scanout_id
)
341 struct virtio_gpu_scanout
*scanout
= &g
->scanout
[scanout_id
];
342 struct virtio_gpu_simple_resource
*res
;
344 if (scanout
->resource_id
== 0) {
348 res
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
350 res
->scanout_bitmask
&= ~(1 << scanout_id
);
356 if (g
->sock_fd
>= 0) {
357 VhostUserGpuMsg msg
= {
358 .request
= VHOST_USER_GPU_SCANOUT
,
359 .size
= sizeof(VhostUserGpuScanout
),
360 .payload
.scanout
.scanout_id
= scanout_id
,
362 vg_send_msg(g
, &msg
, -1);
367 vg_resource_destroy(VuGpu
*g
,
368 struct virtio_gpu_simple_resource
*res
)
372 if (res
->scanout_bitmask
) {
373 for (i
= 0; i
< VIRTIO_GPU_MAX_SCANOUTS
; i
++) {
374 if (res
->scanout_bitmask
& (1 << i
)) {
375 vg_disable_scanout(g
, i
);
380 vugbm_buffer_destroy(&res
->buffer
);
381 pixman_image_unref(res
->image
);
382 QTAILQ_REMOVE(&g
->reslist
, res
, next
);
387 vg_resource_unref(VuGpu
*g
,
388 struct virtio_gpu_ctrl_command
*cmd
)
390 struct virtio_gpu_simple_resource
*res
;
391 struct virtio_gpu_resource_unref unref
;
393 VUGPU_FILL_CMD(unref
);
394 virtio_gpu_bswap_32(&unref
, sizeof(unref
));
396 res
= virtio_gpu_find_resource(g
, unref
.resource_id
);
398 g_critical("%s: illegal resource specified %d",
399 __func__
, unref
.resource_id
);
400 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
403 vg_resource_destroy(g
, res
);
407 vg_create_mapping_iov(VuGpu
*g
,
408 struct virtio_gpu_resource_attach_backing
*ab
,
409 struct virtio_gpu_ctrl_command
*cmd
,
412 struct virtio_gpu_mem_entry
*ents
;
416 if (ab
->nr_entries
> 16384) {
417 g_critical("%s: nr_entries is too big (%d > 16384)",
418 __func__
, ab
->nr_entries
);
422 esize
= sizeof(*ents
) * ab
->nr_entries
;
423 ents
= g_malloc(esize
);
424 s
= iov_to_buf(cmd
->elem
.out_sg
, cmd
->elem
.out_num
,
425 sizeof(*ab
), ents
, esize
);
427 g_critical("%s: command data size incorrect %zu vs %zu",
433 *iov
= g_malloc0(sizeof(struct iovec
) * ab
->nr_entries
);
434 for (i
= 0; i
< ab
->nr_entries
; i
++) {
435 uint64_t len
= ents
[i
].length
;
436 (*iov
)[i
].iov_len
= ents
[i
].length
;
437 (*iov
)[i
].iov_base
= vu_gpa_to_va(&g
->dev
.parent
, &len
, ents
[i
].addr
);
438 if (!(*iov
)[i
].iov_base
|| len
!= ents
[i
].length
) {
439 g_critical("%s: resource %d element %d",
440 __func__
, ab
->resource_id
, i
);
452 vg_resource_attach_backing(VuGpu
*g
,
453 struct virtio_gpu_ctrl_command
*cmd
)
455 struct virtio_gpu_simple_resource
*res
;
456 struct virtio_gpu_resource_attach_backing ab
;
460 virtio_gpu_bswap_32(&ab
, sizeof(ab
));
462 res
= virtio_gpu_find_resource(g
, ab
.resource_id
);
464 g_critical("%s: illegal resource specified %d",
465 __func__
, ab
.resource_id
);
466 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
470 ret
= vg_create_mapping_iov(g
, &ab
, cmd
, &res
->iov
);
472 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
476 res
->iov_cnt
= ab
.nr_entries
;
480 vg_resource_detach_backing(VuGpu
*g
,
481 struct virtio_gpu_ctrl_command
*cmd
)
483 struct virtio_gpu_simple_resource
*res
;
484 struct virtio_gpu_resource_detach_backing detach
;
486 VUGPU_FILL_CMD(detach
);
487 virtio_gpu_bswap_32(&detach
, sizeof(detach
));
489 res
= virtio_gpu_find_resource(g
, detach
.resource_id
);
490 if (!res
|| !res
->iov
) {
491 g_critical("%s: illegal resource specified %d",
492 __func__
, detach
.resource_id
);
493 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
503 vg_transfer_to_host_2d(VuGpu
*g
,
504 struct virtio_gpu_ctrl_command
*cmd
)
506 struct virtio_gpu_simple_resource
*res
;
508 uint32_t src_offset
, dst_offset
, stride
;
510 pixman_format_code_t format
;
511 struct virtio_gpu_transfer_to_host_2d t2d
;
514 virtio_gpu_t2d_bswap(&t2d
);
516 res
= virtio_gpu_find_resource(g
, t2d
.resource_id
);
517 if (!res
|| !res
->iov
) {
518 g_critical("%s: illegal resource specified %d",
519 __func__
, t2d
.resource_id
);
520 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
524 if (t2d
.r
.x
> res
->width
||
525 t2d
.r
.y
> res
->height
||
526 t2d
.r
.width
> res
->width
||
527 t2d
.r
.height
> res
->height
||
528 t2d
.r
.x
+ t2d
.r
.width
> res
->width
||
529 t2d
.r
.y
+ t2d
.r
.height
> res
->height
) {
530 g_critical("%s: transfer bounds outside resource"
531 " bounds for resource %d: %d %d %d %d vs %d %d",
532 __func__
, t2d
.resource_id
, t2d
.r
.x
, t2d
.r
.y
,
533 t2d
.r
.width
, t2d
.r
.height
, res
->width
, res
->height
);
534 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
538 format
= pixman_image_get_format(res
->image
);
539 bpp
= (PIXMAN_FORMAT_BPP(format
) + 7) / 8;
540 stride
= pixman_image_get_stride(res
->image
);
542 if (t2d
.offset
|| t2d
.r
.x
|| t2d
.r
.y
||
543 t2d
.r
.width
!= pixman_image_get_width(res
->image
)) {
544 void *img_data
= pixman_image_get_data(res
->image
);
545 for (h
= 0; h
< t2d
.r
.height
; h
++) {
546 src_offset
= t2d
.offset
+ stride
* h
;
547 dst_offset
= (t2d
.r
.y
+ h
) * stride
+ (t2d
.r
.x
* bpp
);
549 iov_to_buf(res
->iov
, res
->iov_cnt
, src_offset
,
551 + dst_offset
, t2d
.r
.width
* bpp
);
554 iov_to_buf(res
->iov
, res
->iov_cnt
, 0,
555 pixman_image_get_data(res
->image
),
556 pixman_image_get_stride(res
->image
)
557 * pixman_image_get_height(res
->image
));
562 vg_set_scanout(VuGpu
*g
,
563 struct virtio_gpu_ctrl_command
*cmd
)
565 struct virtio_gpu_simple_resource
*res
, *ores
;
566 struct virtio_gpu_scanout
*scanout
;
567 struct virtio_gpu_set_scanout ss
;
571 virtio_gpu_bswap_32(&ss
, sizeof(ss
));
573 if (ss
.scanout_id
>= VIRTIO_GPU_MAX_SCANOUTS
) {
574 g_critical("%s: illegal scanout id specified %d",
575 __func__
, ss
.scanout_id
);
576 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
580 if (ss
.resource_id
== 0) {
581 vg_disable_scanout(g
, ss
.scanout_id
);
585 /* create a surface for this scanout */
586 res
= virtio_gpu_find_resource(g
, ss
.resource_id
);
588 g_critical("%s: illegal resource specified %d",
589 __func__
, ss
.resource_id
);
590 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
594 if (ss
.r
.x
> res
->width
||
595 ss
.r
.y
> res
->height
||
596 ss
.r
.width
> res
->width
||
597 ss
.r
.height
> res
->height
||
598 ss
.r
.x
+ ss
.r
.width
> res
->width
||
599 ss
.r
.y
+ ss
.r
.height
> res
->height
) {
600 g_critical("%s: illegal scanout %d bounds for"
601 " resource %d, (%d,%d)+%d,%d vs %d %d",
602 __func__
, ss
.scanout_id
, ss
.resource_id
, ss
.r
.x
, ss
.r
.y
,
603 ss
.r
.width
, ss
.r
.height
, res
->width
, res
->height
);
604 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
608 scanout
= &g
->scanout
[ss
.scanout_id
];
610 ores
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
612 ores
->scanout_bitmask
&= ~(1 << ss
.scanout_id
);
615 res
->scanout_bitmask
|= (1 << ss
.scanout_id
);
616 scanout
->resource_id
= ss
.resource_id
;
619 scanout
->width
= ss
.r
.width
;
620 scanout
->height
= ss
.r
.height
;
622 struct vugbm_buffer
*buffer
= &res
->buffer
;
624 if (vugbm_buffer_can_get_dmabuf_fd(buffer
)) {
625 VhostUserGpuMsg msg
= {
626 .request
= VHOST_USER_GPU_DMABUF_SCANOUT
,
627 .size
= sizeof(VhostUserGpuDMABUFScanout
),
628 .payload
.dmabuf_scanout
= (VhostUserGpuDMABUFScanout
) {
629 .scanout_id
= ss
.scanout_id
,
633 .height
= ss
.r
.height
,
634 .fd_width
= buffer
->width
,
635 .fd_height
= buffer
->height
,
636 .fd_stride
= buffer
->stride
,
637 .fd_drm_fourcc
= buffer
->format
641 if (vugbm_buffer_get_dmabuf_fd(buffer
, &fd
)) {
642 vg_send_msg(g
, &msg
, fd
);
646 VhostUserGpuMsg msg
= {
647 .request
= VHOST_USER_GPU_SCANOUT
,
648 .size
= sizeof(VhostUserGpuScanout
),
649 .payload
.scanout
= (VhostUserGpuScanout
) {
650 .scanout_id
= ss
.scanout_id
,
651 .width
= scanout
->width
,
652 .height
= scanout
->height
655 vg_send_msg(g
, &msg
, -1);
660 vg_resource_flush(VuGpu
*g
,
661 struct virtio_gpu_ctrl_command
*cmd
)
663 struct virtio_gpu_simple_resource
*res
;
664 struct virtio_gpu_resource_flush rf
;
665 pixman_region16_t flush_region
;
669 virtio_gpu_bswap_32(&rf
, sizeof(rf
));
671 res
= virtio_gpu_find_resource(g
, rf
.resource_id
);
673 g_critical("%s: illegal resource specified %d\n",
674 __func__
, rf
.resource_id
);
675 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
679 if (rf
.r
.x
> res
->width
||
680 rf
.r
.y
> res
->height
||
681 rf
.r
.width
> res
->width
||
682 rf
.r
.height
> res
->height
||
683 rf
.r
.x
+ rf
.r
.width
> res
->width
||
684 rf
.r
.y
+ rf
.r
.height
> res
->height
) {
685 g_critical("%s: flush bounds outside resource"
686 " bounds for resource %d: %d %d %d %d vs %d %d\n",
687 __func__
, rf
.resource_id
, rf
.r
.x
, rf
.r
.y
,
688 rf
.r
.width
, rf
.r
.height
, res
->width
, res
->height
);
689 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
693 pixman_region_init_rect(&flush_region
,
694 rf
.r
.x
, rf
.r
.y
, rf
.r
.width
, rf
.r
.height
);
695 for (i
= 0; i
< VIRTIO_GPU_MAX_SCANOUTS
; i
++) {
696 struct virtio_gpu_scanout
*scanout
;
697 pixman_region16_t region
, finalregion
;
698 pixman_box16_t
*extents
;
700 if (!(res
->scanout_bitmask
& (1 << i
))) {
703 scanout
= &g
->scanout
[i
];
705 pixman_region_init(&finalregion
);
706 pixman_region_init_rect(®ion
, scanout
->x
, scanout
->y
,
707 scanout
->width
, scanout
->height
);
709 pixman_region_intersect(&finalregion
, &flush_region
, ®ion
);
711 extents
= pixman_region_extents(&finalregion
);
712 size_t width
= extents
->x2
- extents
->x1
;
713 size_t height
= extents
->y2
- extents
->y1
;
715 if (vugbm_buffer_can_get_dmabuf_fd(&res
->buffer
)) {
716 VhostUserGpuMsg vmsg
= {
717 .request
= VHOST_USER_GPU_DMABUF_UPDATE
,
718 .size
= sizeof(VhostUserGpuUpdate
),
719 .payload
.update
= (VhostUserGpuUpdate
) {
727 vg_send_msg(g
, &vmsg
, -1);
731 PIXMAN_FORMAT_BPP(pixman_image_get_format(res
->image
)) / 8;
732 size_t size
= width
* height
* bpp
;
734 void *p
= g_malloc(VHOST_USER_GPU_HDR_SIZE
+
735 sizeof(VhostUserGpuUpdate
) + size
);
736 VhostUserGpuMsg
*msg
= p
;
737 msg
->request
= VHOST_USER_GPU_UPDATE
;
738 msg
->size
= sizeof(VhostUserGpuUpdate
) + size
;
739 msg
->payload
.update
= (VhostUserGpuUpdate
) {
747 pixman_image_create_bits(pixman_image_get_format(res
->image
),
748 msg
->payload
.update
.width
,
749 msg
->payload
.update
.height
,
750 p
+ offsetof(VhostUserGpuMsg
,
751 payload
.update
.data
),
753 pixman_image_composite(PIXMAN_OP_SRC
,
755 extents
->x1
, extents
->y1
,
758 pixman_image_unref(i
);
759 vg_send_msg(g
, msg
, -1);
762 pixman_region_fini(®ion
);
763 pixman_region_fini(&finalregion
);
765 pixman_region_fini(&flush_region
);
769 vg_process_cmd(VuGpu
*vg
, struct virtio_gpu_ctrl_command
*cmd
)
771 switch (cmd
->cmd_hdr
.type
) {
772 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO
:
773 vg_get_display_info(vg
, cmd
);
775 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
:
776 vg_resource_create_2d(vg
, cmd
);
778 case VIRTIO_GPU_CMD_RESOURCE_UNREF
:
779 vg_resource_unref(vg
, cmd
);
781 case VIRTIO_GPU_CMD_RESOURCE_FLUSH
:
782 vg_resource_flush(vg
, cmd
);
784 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
:
785 vg_transfer_to_host_2d(vg
, cmd
);
787 case VIRTIO_GPU_CMD_SET_SCANOUT
:
788 vg_set_scanout(vg
, cmd
);
790 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
:
791 vg_resource_attach_backing(vg
, cmd
);
793 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
:
794 vg_resource_detach_backing(vg
, cmd
);
796 /* case VIRTIO_GPU_CMD_GET_EDID: */
799 g_warning("TODO handle ctrl %x\n", cmd
->cmd_hdr
.type
);
800 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
803 if (!cmd
->finished
) {
804 vg_ctrl_response_nodata(vg
, cmd
, cmd
->error
? cmd
->error
:
805 VIRTIO_GPU_RESP_OK_NODATA
);
810 vg_handle_ctrl(VuDev
*dev
, int qidx
)
812 VuGpu
*vg
= container_of(dev
, VuGpu
, dev
.parent
);
813 VuVirtq
*vq
= vu_get_queue(dev
, qidx
);
814 struct virtio_gpu_ctrl_command
*cmd
= NULL
;
818 if (vg
->wait_ok
!= 0) {
822 cmd
= vu_queue_pop(dev
, vq
, sizeof(struct virtio_gpu_ctrl_command
));
828 cmd
->finished
= false;
830 len
= iov_to_buf(cmd
->elem
.out_sg
, cmd
->elem
.out_num
,
831 0, &cmd
->cmd_hdr
, sizeof(cmd
->cmd_hdr
));
832 if (len
!= sizeof(cmd
->cmd_hdr
)) {
833 g_warning("%s: command size incorrect %zu vs %zu\n",
834 __func__
, len
, sizeof(cmd
->cmd_hdr
));
837 virtio_gpu_ctrl_hdr_bswap(&cmd
->cmd_hdr
);
838 g_debug("%d %s\n", cmd
->cmd_hdr
.type
,
839 vg_cmd_to_string(cmd
->cmd_hdr
.type
));
842 vg_virgl_process_cmd(vg
, cmd
);
844 vg_process_cmd(vg
, cmd
);
847 if (!cmd
->finished
) {
848 QTAILQ_INSERT_TAIL(&vg
->fenceq
, cmd
, next
);
857 update_cursor_data_simple(VuGpu
*g
, uint32_t resource_id
, gpointer data
)
859 struct virtio_gpu_simple_resource
*res
;
861 res
= virtio_gpu_find_resource(g
, resource_id
);
862 g_return_if_fail(res
!= NULL
);
863 g_return_if_fail(pixman_image_get_width(res
->image
) == 64);
864 g_return_if_fail(pixman_image_get_height(res
->image
) == 64);
866 PIXMAN_FORMAT_BPP(pixman_image_get_format(res
->image
)) == 32);
868 memcpy(data
, pixman_image_get_data(res
->image
), 64 * 64 * sizeof(uint32_t));
872 vg_process_cursor_cmd(VuGpu
*g
, struct virtio_gpu_update_cursor
*cursor
)
874 bool move
= cursor
->hdr
.type
!= VIRTIO_GPU_CMD_MOVE_CURSOR
;
876 g_debug("%s move:%d\n", G_STRFUNC
, move
);
879 VhostUserGpuMsg msg
= {
880 .request
= cursor
->resource_id
?
881 VHOST_USER_GPU_CURSOR_POS
: VHOST_USER_GPU_CURSOR_POS_HIDE
,
882 .size
= sizeof(VhostUserGpuCursorPos
),
883 .payload
.cursor_pos
= {
884 .scanout_id
= cursor
->pos
.scanout_id
,
889 vg_send_msg(g
, &msg
, -1);
891 VhostUserGpuMsg msg
= {
892 .request
= VHOST_USER_GPU_CURSOR_UPDATE
,
893 .size
= sizeof(VhostUserGpuCursorUpdate
),
894 .payload
.cursor_update
= {
896 .scanout_id
= cursor
->pos
.scanout_id
,
900 .hot_x
= cursor
->hot_x
,
901 .hot_y
= cursor
->hot_y
,
905 vg_virgl_update_cursor_data(g
, cursor
->resource_id
,
906 msg
.payload
.cursor_update
.data
);
908 update_cursor_data_simple(g
, cursor
->resource_id
,
909 msg
.payload
.cursor_update
.data
);
911 vg_send_msg(g
, &msg
, -1);
916 vg_handle_cursor(VuDev
*dev
, int qidx
)
918 VuGpu
*g
= container_of(dev
, VuGpu
, dev
.parent
);
919 VuVirtq
*vq
= vu_get_queue(dev
, qidx
);
920 VuVirtqElement
*elem
;
922 struct virtio_gpu_update_cursor cursor
;
925 elem
= vu_queue_pop(dev
, vq
, sizeof(VuVirtqElement
));
929 g_debug("cursor out:%d in:%d\n", elem
->out_num
, elem
->in_num
);
931 len
= iov_to_buf(elem
->out_sg
, elem
->out_num
,
932 0, &cursor
, sizeof(cursor
));
933 if (len
!= sizeof(cursor
)) {
934 g_warning("%s: cursor size incorrect %zu vs %zu\n",
935 __func__
, len
, sizeof(cursor
));
937 virtio_gpu_bswap_32(&cursor
, sizeof(cursor
));
938 vg_process_cursor_cmd(g
, &cursor
);
940 vu_queue_push(dev
, vq
, elem
, 0);
941 vu_queue_notify(dev
, vq
);
947 vg_panic(VuDev
*dev
, const char *msg
)
949 g_critical("%s\n", msg
);
954 vg_queue_set_started(VuDev
*dev
, int qidx
, bool started
)
956 VuVirtq
*vq
= vu_get_queue(dev
, qidx
);
958 g_debug("queue started %d:%d\n", qidx
, started
);
962 vu_set_queue_handler(dev
, vq
, started
? vg_handle_ctrl
: NULL
);
965 vu_set_queue_handler(dev
, vq
, started
? vg_handle_cursor
: NULL
);
973 set_gpu_protocol_features(VuGpu
*g
)
976 VhostUserGpuMsg msg
= {
977 .request
= VHOST_USER_GPU_GET_PROTOCOL_FEATURES
980 assert(g
->wait_ok
== 0);
981 vg_send_msg(g
, &msg
, -1);
982 if (!vg_recv_msg(g
, msg
.request
, sizeof(u64
), &u64
)) {
986 msg
= (VhostUserGpuMsg
) {
987 .request
= VHOST_USER_GPU_SET_PROTOCOL_FEATURES
,
988 .size
= sizeof(uint64_t),
991 vg_send_msg(g
, &msg
, -1);
995 vg_process_msg(VuDev
*dev
, VhostUserMsg
*msg
, int *do_reply
)
997 VuGpu
*g
= container_of(dev
, VuGpu
, dev
.parent
);
999 switch (msg
->request
) {
1000 case VHOST_USER_GPU_SET_SOCKET
: {
1001 g_return_val_if_fail(msg
->fd_num
== 1, 1);
1002 g_return_val_if_fail(g
->sock_fd
== -1, 1);
1003 g
->sock_fd
= msg
->fds
[0];
1004 set_gpu_protocol_features(g
);
1015 vg_get_features(VuDev
*dev
)
1017 uint64_t features
= 0;
1020 features
|= 1 << VIRTIO_GPU_F_VIRGL
;
1027 vg_set_features(VuDev
*dev
, uint64_t features
)
1029 VuGpu
*g
= container_of(dev
, VuGpu
, dev
.parent
);
1030 bool virgl
= features
& (1 << VIRTIO_GPU_F_VIRGL
);
1032 if (virgl
&& !g
->virgl_inited
) {
1033 if (!vg_virgl_init(g
)) {
1034 vg_panic(dev
, "Failed to initialize virgl");
1036 g
->virgl_inited
= true;
1043 vg_get_config(VuDev
*dev
, uint8_t *config
, uint32_t len
)
1045 VuGpu
*g
= container_of(dev
, VuGpu
, dev
.parent
);
1047 g_return_val_if_fail(len
<= sizeof(struct virtio_gpu_config
), -1);
1050 g
->virtio_config
.num_capsets
= vg_virgl_get_num_capsets();
1053 memcpy(config
, &g
->virtio_config
, len
);
1059 vg_set_config(VuDev
*dev
, const uint8_t *data
,
1060 uint32_t offset
, uint32_t size
,
1063 VuGpu
*g
= container_of(dev
, VuGpu
, dev
.parent
);
1064 struct virtio_gpu_config
*config
= (struct virtio_gpu_config
*)data
;
1066 if (config
->events_clear
) {
1067 g
->virtio_config
.events_read
&= ~config
->events_clear
;
1073 static const VuDevIface vuiface
= {
1074 .set_features
= vg_set_features
,
1075 .get_features
= vg_get_features
,
1076 .queue_set_started
= vg_queue_set_started
,
1077 .process_msg
= vg_process_msg
,
1078 .get_config
= vg_get_config
,
1079 .set_config
= vg_set_config
,
1083 vg_destroy(VuGpu
*g
)
1085 struct virtio_gpu_simple_resource
*res
, *tmp
;
1087 vug_deinit(&g
->dev
);
1089 vg_sock_fd_close(g
);
1091 QTAILQ_FOREACH_SAFE(res
, &g
->reslist
, next
, tmp
) {
1092 vg_resource_destroy(g
, res
);
1095 vugbm_device_destroy(&g
->gdev
);
1098 static GOptionEntry entries
[] = {
1099 { "print-capabilities", 'c', 0, G_OPTION_ARG_NONE
, &opt_print_caps
,
1100 "Print capabilities", NULL
},
1101 { "fd", 'f', 0, G_OPTION_ARG_INT
, &opt_fdnum
,
1102 "Use inherited fd socket", "FDNUM" },
1103 { "socket-path", 's', 0, G_OPTION_ARG_FILENAME
, &opt_socket_path
,
1104 "Use UNIX socket path", "PATH" },
1105 { "render-node", 'r', 0, G_OPTION_ARG_FILENAME
, &opt_render_node
,
1106 "Specify DRM render node", "PATH" },
1107 { "virgl", 'v', 0, G_OPTION_ARG_NONE
, &opt_virgl
,
1108 "Turn virgl rendering on", NULL
},
1113 main(int argc
, char *argv
[])
1115 GOptionContext
*context
;
1116 GError
*error
= NULL
;
1117 GMainLoop
*loop
= NULL
;
1119 VuGpu g
= { .sock_fd
= -1, .drm_rnode_fd
= -1 };
1121 QTAILQ_INIT(&g
.reslist
);
1122 QTAILQ_INIT(&g
.fenceq
);
1124 context
= g_option_context_new("QEMU vhost-user-gpu");
1125 g_option_context_add_main_entries(context
, entries
, NULL
);
1126 if (!g_option_context_parse(context
, &argc
, &argv
, &error
)) {
1127 g_printerr("Option parsing failed: %s\n", error
->message
);
1130 g_option_context_free(context
);
1132 if (opt_print_caps
) {
1134 g_print(" \"type\": \"gpu\",\n");
1135 g_print(" \"features\": [\n");
1136 g_print(" \"render-node\",\n");
1137 g_print(" \"virgl\"\n");
1143 g
.drm_rnode_fd
= qemu_drm_rendernode_open(opt_render_node
);
1144 if (opt_render_node
&& g
.drm_rnode_fd
== -1) {
1145 g_printerr("Failed to open DRM rendernode.\n");
1149 if (g
.drm_rnode_fd
>= 0) {
1150 if (!vugbm_device_init(&g
.gdev
, g
.drm_rnode_fd
)) {
1151 g_warning("Failed to init DRM device, using fallback path");
1155 if ((!!opt_socket_path
+ (opt_fdnum
!= -1)) != 1) {
1156 g_printerr("Please specify either --fd or --socket-path\n");
1160 if (opt_socket_path
) {
1161 int lsock
= unix_listen(opt_socket_path
, &error_fatal
);
1163 g_printerr("Failed to listen on %s.\n", opt_socket_path
);
1166 fd
= accept(lsock
, NULL
, NULL
);
1172 g_printerr("Invalid vhost-user socket.\n");
1176 if (!vug_init(&g
.dev
, VHOST_USER_GPU_MAX_QUEUES
, fd
, vg_panic
, &vuiface
)) {
1177 g_printerr("Failed to initialize libvhost-user-glib.\n");
1181 loop
= g_main_loop_new(NULL
, FALSE
);
1182 g_main_loop_run(loop
);
1183 g_main_loop_unref(loop
);
1186 if (g
.drm_rnode_fd
>= 0) {
1187 close(g
.drm_rnode_fd
);