2 * Virtio vhost-user GPU Device
4 * Copyright Red Hat, Inc. 2013-2018
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
9 * Marc-André Lureau <marcandre.lureau@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
16 #include "qapi/error.h"
17 #include "qemu/sockets.h"
20 #include <glib-unix.h>
23 #include "hw/virtio/virtio-gpu-bswap.h"
24 #include "hw/virtio/virtio-gpu-pixman.h"
29 VHOST_USER_GPU_MAX_QUEUES
= 2,
32 struct virtio_gpu_simple_resource
{
39 uint32_t scanout_bitmask
;
40 pixman_image_t
*image
;
41 struct vugbm_buffer buffer
;
42 QTAILQ_ENTRY(virtio_gpu_simple_resource
) next
;
45 static gboolean opt_print_caps
;
46 static int opt_fdnum
= -1;
47 static char *opt_socket_path
;
48 static char *opt_render_node
;
49 static gboolean opt_virgl
;
51 static void vg_handle_ctrl(VuDev
*dev
, int qidx
);
52 static void vg_cleanup_mapping(VuGpu
*g
,
53 struct virtio_gpu_simple_resource
*res
);
56 vg_cmd_to_string(int cmd
)
58 #define CMD(cmd) [cmd] = #cmd
59 static const char *vg_cmd_str
[] = {
60 CMD(VIRTIO_GPU_UNDEFINED
),
63 CMD(VIRTIO_GPU_CMD_GET_DISPLAY_INFO
),
64 CMD(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
),
65 CMD(VIRTIO_GPU_CMD_RESOURCE_UNREF
),
66 CMD(VIRTIO_GPU_CMD_SET_SCANOUT
),
67 CMD(VIRTIO_GPU_CMD_RESOURCE_FLUSH
),
68 CMD(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
),
69 CMD(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
),
70 CMD(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
),
71 CMD(VIRTIO_GPU_CMD_GET_CAPSET_INFO
),
72 CMD(VIRTIO_GPU_CMD_GET_CAPSET
),
75 CMD(VIRTIO_GPU_CMD_CTX_CREATE
),
76 CMD(VIRTIO_GPU_CMD_CTX_DESTROY
),
77 CMD(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE
),
78 CMD(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE
),
79 CMD(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D
),
80 CMD(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D
),
81 CMD(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D
),
82 CMD(VIRTIO_GPU_CMD_SUBMIT_3D
),
85 CMD(VIRTIO_GPU_CMD_UPDATE_CURSOR
),
86 CMD(VIRTIO_GPU_CMD_MOVE_CURSOR
),
90 if (cmd
>= 0 && cmd
< G_N_ELEMENTS(vg_cmd_str
)) {
91 return vg_cmd_str
[cmd
];
98 vg_sock_fd_read(int sock
, void *buf
, ssize_t buflen
)
103 ret
= read(sock
, buf
, buflen
);
104 } while (ret
< 0 && (errno
== EINTR
|| errno
== EAGAIN
));
106 g_warn_if_fail(ret
== buflen
);
111 vg_sock_fd_close(VuGpu
*g
)
113 if (g
->sock_fd
>= 0) {
120 source_wait_cb(gint fd
, GIOCondition condition
, gpointer user_data
)
122 VuGpu
*g
= user_data
;
124 if (!vg_recv_msg(g
, VHOST_USER_GPU_DMABUF_UPDATE
, 0, NULL
)) {
125 return G_SOURCE_CONTINUE
;
130 vg_handle_ctrl(&g
->dev
.parent
, 0);
132 return G_SOURCE_REMOVE
;
138 assert(g
->wait_in
== 0);
139 g
->wait_in
= g_unix_fd_add(g
->sock_fd
, G_IO_IN
| G_IO_HUP
,
144 vg_sock_fd_write(int sock
, const void *buf
, ssize_t buflen
, int fd
)
148 .iov_base
= (void *)buf
,
151 struct msghdr msg
= {
156 struct cmsghdr cmsghdr
;
157 char control
[CMSG_SPACE(sizeof(int))];
159 struct cmsghdr
*cmsg
;
162 msg
.msg_control
= cmsgu
.control
;
163 msg
.msg_controllen
= sizeof(cmsgu
.control
);
165 cmsg
= CMSG_FIRSTHDR(&msg
);
166 cmsg
->cmsg_len
= CMSG_LEN(sizeof(int));
167 cmsg
->cmsg_level
= SOL_SOCKET
;
168 cmsg
->cmsg_type
= SCM_RIGHTS
;
170 *((int *)CMSG_DATA(cmsg
)) = fd
;
174 ret
= sendmsg(sock
, &msg
, 0);
175 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
177 g_warn_if_fail(ret
== buflen
);
182 vg_send_msg(VuGpu
*vg
, const VhostUserGpuMsg
*msg
, int fd
)
184 if (vg_sock_fd_write(vg
->sock_fd
, msg
,
185 VHOST_USER_GPU_HDR_SIZE
+ msg
->size
, fd
) < 0) {
186 vg_sock_fd_close(vg
);
191 vg_recv_msg(VuGpu
*g
, uint32_t expect_req
, uint32_t expect_size
,
194 uint32_t req
, flags
, size
;
196 if (vg_sock_fd_read(g
->sock_fd
, &req
, sizeof(req
)) < 0 ||
197 vg_sock_fd_read(g
->sock_fd
, &flags
, sizeof(flags
)) < 0 ||
198 vg_sock_fd_read(g
->sock_fd
, &size
, sizeof(size
)) < 0) {
202 g_return_val_if_fail(req
== expect_req
, false);
203 g_return_val_if_fail(flags
& VHOST_USER_GPU_MSG_FLAG_REPLY
, false);
204 g_return_val_if_fail(size
== expect_size
, false);
206 if (size
&& vg_sock_fd_read(g
->sock_fd
, payload
, size
) != size
) {
217 static struct virtio_gpu_simple_resource
*
218 virtio_gpu_find_resource(VuGpu
*g
, uint32_t resource_id
)
220 struct virtio_gpu_simple_resource
*res
;
222 QTAILQ_FOREACH(res
, &g
->reslist
, next
) {
223 if (res
->resource_id
== resource_id
) {
231 vg_ctrl_response(VuGpu
*g
,
232 struct virtio_gpu_ctrl_command
*cmd
,
233 struct virtio_gpu_ctrl_hdr
*resp
,
238 if (cmd
->cmd_hdr
.flags
& VIRTIO_GPU_FLAG_FENCE
) {
239 resp
->flags
|= VIRTIO_GPU_FLAG_FENCE
;
240 resp
->fence_id
= cmd
->cmd_hdr
.fence_id
;
241 resp
->ctx_id
= cmd
->cmd_hdr
.ctx_id
;
243 virtio_gpu_ctrl_hdr_bswap(resp
);
244 s
= iov_from_buf(cmd
->elem
.in_sg
, cmd
->elem
.in_num
, 0, resp
, resp_len
);
246 g_critical("%s: response size incorrect %zu vs %zu",
247 __func__
, s
, resp_len
);
249 vu_queue_push(&g
->dev
.parent
, cmd
->vq
, &cmd
->elem
, s
);
250 vu_queue_notify(&g
->dev
.parent
, cmd
->vq
);
251 cmd
->state
= VG_CMD_STATE_FINISHED
;
255 vg_ctrl_response_nodata(VuGpu
*g
,
256 struct virtio_gpu_ctrl_command
*cmd
,
257 enum virtio_gpu_ctrl_type type
)
259 struct virtio_gpu_ctrl_hdr resp
= {
263 vg_ctrl_response(g
, cmd
, &resp
, sizeof(resp
));
268 get_display_info_cb(gint fd
, GIOCondition condition
, gpointer user_data
)
270 struct virtio_gpu_resp_display_info dpy_info
= { {} };
271 VuGpu
*vg
= user_data
;
272 struct virtio_gpu_ctrl_command
*cmd
= QTAILQ_LAST(&vg
->fenceq
);
274 g_debug("disp info cb");
275 assert(cmd
->cmd_hdr
.type
== VIRTIO_GPU_CMD_GET_DISPLAY_INFO
);
276 if (!vg_recv_msg(vg
, VHOST_USER_GPU_GET_DISPLAY_INFO
,
277 sizeof(dpy_info
), &dpy_info
)) {
278 return G_SOURCE_CONTINUE
;
281 QTAILQ_REMOVE(&vg
->fenceq
, cmd
, next
);
282 vg_ctrl_response(vg
, cmd
, &dpy_info
.hdr
, sizeof(dpy_info
));
285 vg_handle_ctrl(&vg
->dev
.parent
, 0);
287 return G_SOURCE_REMOVE
;
291 vg_get_display_info(VuGpu
*vg
, struct virtio_gpu_ctrl_command
*cmd
)
293 VhostUserGpuMsg msg
= {
294 .request
= VHOST_USER_GPU_GET_DISPLAY_INFO
,
298 assert(vg
->wait_in
== 0);
300 vg_send_msg(vg
, &msg
, -1);
301 vg
->wait_in
= g_unix_fd_add(vg
->sock_fd
, G_IO_IN
| G_IO_HUP
,
302 get_display_info_cb
, vg
);
303 cmd
->state
= VG_CMD_STATE_PENDING
;
307 get_edid_cb(gint fd
, GIOCondition condition
, gpointer user_data
)
309 struct virtio_gpu_resp_edid resp_edid
;
310 VuGpu
*vg
= user_data
;
311 struct virtio_gpu_ctrl_command
*cmd
= QTAILQ_LAST(&vg
->fenceq
);
313 g_debug("get edid cb");
314 assert(cmd
->cmd_hdr
.type
== VIRTIO_GPU_CMD_GET_EDID
);
315 if (!vg_recv_msg(vg
, VHOST_USER_GPU_GET_EDID
,
316 sizeof(resp_edid
), &resp_edid
)) {
317 return G_SOURCE_CONTINUE
;
320 QTAILQ_REMOVE(&vg
->fenceq
, cmd
, next
);
321 vg_ctrl_response(vg
, cmd
, &resp_edid
.hdr
, sizeof(resp_edid
));
324 vg_handle_ctrl(&vg
->dev
.parent
, 0);
326 return G_SOURCE_REMOVE
;
330 vg_get_edid(VuGpu
*vg
, struct virtio_gpu_ctrl_command
*cmd
)
332 struct virtio_gpu_cmd_get_edid get_edid
;
334 VUGPU_FILL_CMD(get_edid
);
335 virtio_gpu_bswap_32(&get_edid
, sizeof(get_edid
));
337 VhostUserGpuMsg msg
= {
338 .request
= VHOST_USER_GPU_GET_EDID
,
339 .size
= sizeof(VhostUserGpuEdidRequest
),
340 .payload
.edid_req
= {
341 .scanout_id
= get_edid
.scanout
,
345 assert(vg
->wait_in
== 0);
347 vg_send_msg(vg
, &msg
, -1);
348 vg
->wait_in
= g_unix_fd_add(vg
->sock_fd
, G_IO_IN
| G_IO_HUP
,
350 cmd
->state
= VG_CMD_STATE_PENDING
;
354 vg_resource_create_2d(VuGpu
*g
,
355 struct virtio_gpu_ctrl_command
*cmd
)
357 pixman_format_code_t pformat
;
358 struct virtio_gpu_simple_resource
*res
;
359 struct virtio_gpu_resource_create_2d c2d
;
362 virtio_gpu_bswap_32(&c2d
, sizeof(c2d
));
364 if (c2d
.resource_id
== 0) {
365 g_critical("%s: resource id 0 is not allowed", __func__
);
366 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
370 res
= virtio_gpu_find_resource(g
, c2d
.resource_id
);
372 g_critical("%s: resource already exists %d", __func__
, c2d
.resource_id
);
373 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
377 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
378 res
->width
= c2d
.width
;
379 res
->height
= c2d
.height
;
380 res
->format
= c2d
.format
;
381 res
->resource_id
= c2d
.resource_id
;
383 pformat
= virtio_gpu_get_pixman_format(c2d
.format
);
385 g_critical("%s: host couldn't handle guest format %d",
386 __func__
, c2d
.format
);
388 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
391 vugbm_buffer_create(&res
->buffer
, &g
->gdev
, c2d
.width
, c2d
.height
);
392 res
->image
= pixman_image_create_bits(pformat
,
395 (uint32_t *)res
->buffer
.mmap
,
398 g_critical("%s: resource creation failed %d %d %d",
399 __func__
, c2d
.resource_id
, c2d
.width
, c2d
.height
);
400 vugbm_buffer_destroy(&res
->buffer
);
402 cmd
->error
= VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY
;
406 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
410 vg_disable_scanout(VuGpu
*g
, int scanout_id
)
412 struct virtio_gpu_scanout
*scanout
= &g
->scanout
[scanout_id
];
413 struct virtio_gpu_simple_resource
*res
;
415 if (scanout
->resource_id
== 0) {
419 res
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
421 res
->scanout_bitmask
&= ~(1 << scanout_id
);
427 if (g
->sock_fd
>= 0) {
428 VhostUserGpuMsg msg
= {
429 .request
= VHOST_USER_GPU_SCANOUT
,
430 .size
= sizeof(VhostUserGpuScanout
),
431 .payload
.scanout
.scanout_id
= scanout_id
,
433 vg_send_msg(g
, &msg
, -1);
438 vg_resource_destroy(VuGpu
*g
,
439 struct virtio_gpu_simple_resource
*res
)
443 if (res
->scanout_bitmask
) {
444 for (i
= 0; i
< VIRTIO_GPU_MAX_SCANOUTS
; i
++) {
445 if (res
->scanout_bitmask
& (1 << i
)) {
446 vg_disable_scanout(g
, i
);
451 vugbm_buffer_destroy(&res
->buffer
);
452 vg_cleanup_mapping(g
, res
);
453 pixman_image_unref(res
->image
);
454 QTAILQ_REMOVE(&g
->reslist
, res
, next
);
459 vg_resource_unref(VuGpu
*g
,
460 struct virtio_gpu_ctrl_command
*cmd
)
462 struct virtio_gpu_simple_resource
*res
;
463 struct virtio_gpu_resource_unref unref
;
465 VUGPU_FILL_CMD(unref
);
466 virtio_gpu_bswap_32(&unref
, sizeof(unref
));
468 res
= virtio_gpu_find_resource(g
, unref
.resource_id
);
470 g_critical("%s: illegal resource specified %d",
471 __func__
, unref
.resource_id
);
472 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
475 vg_resource_destroy(g
, res
);
479 vg_create_mapping_iov(VuGpu
*g
,
480 struct virtio_gpu_resource_attach_backing
*ab
,
481 struct virtio_gpu_ctrl_command
*cmd
,
484 struct virtio_gpu_mem_entry
*ents
;
488 if (ab
->nr_entries
> 16384) {
489 g_critical("%s: nr_entries is too big (%d > 16384)",
490 __func__
, ab
->nr_entries
);
494 esize
= sizeof(*ents
) * ab
->nr_entries
;
495 ents
= g_malloc(esize
);
496 s
= iov_to_buf(cmd
->elem
.out_sg
, cmd
->elem
.out_num
,
497 sizeof(*ab
), ents
, esize
);
499 g_critical("%s: command data size incorrect %zu vs %zu",
505 *iov
= g_new0(struct iovec
, ab
->nr_entries
);
506 for (i
= 0; i
< ab
->nr_entries
; i
++) {
507 uint64_t len
= ents
[i
].length
;
508 (*iov
)[i
].iov_len
= ents
[i
].length
;
509 (*iov
)[i
].iov_base
= vu_gpa_to_va(&g
->dev
.parent
, &len
, ents
[i
].addr
);
510 if (!(*iov
)[i
].iov_base
|| len
!= ents
[i
].length
) {
511 g_critical("%s: resource %d element %d",
512 __func__
, ab
->resource_id
, i
);
524 vg_resource_attach_backing(VuGpu
*g
,
525 struct virtio_gpu_ctrl_command
*cmd
)
527 struct virtio_gpu_simple_resource
*res
;
528 struct virtio_gpu_resource_attach_backing ab
;
532 virtio_gpu_bswap_32(&ab
, sizeof(ab
));
534 res
= virtio_gpu_find_resource(g
, ab
.resource_id
);
536 g_critical("%s: illegal resource specified %d",
537 __func__
, ab
.resource_id
);
538 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
543 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
547 ret
= vg_create_mapping_iov(g
, &ab
, cmd
, &res
->iov
);
549 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
553 res
->iov_cnt
= ab
.nr_entries
;
556 /* Though currently only free iov, maybe later will do more work. */
557 void vg_cleanup_mapping_iov(VuGpu
*g
,
558 struct iovec
*iov
, uint32_t count
)
564 vg_cleanup_mapping(VuGpu
*g
,
565 struct virtio_gpu_simple_resource
*res
)
567 vg_cleanup_mapping_iov(g
, res
->iov
, res
->iov_cnt
);
573 vg_resource_detach_backing(VuGpu
*g
,
574 struct virtio_gpu_ctrl_command
*cmd
)
576 struct virtio_gpu_simple_resource
*res
;
577 struct virtio_gpu_resource_detach_backing detach
;
579 VUGPU_FILL_CMD(detach
);
580 virtio_gpu_bswap_32(&detach
, sizeof(detach
));
582 res
= virtio_gpu_find_resource(g
, detach
.resource_id
);
583 if (!res
|| !res
->iov
) {
584 g_critical("%s: illegal resource specified %d",
585 __func__
, detach
.resource_id
);
586 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
590 vg_cleanup_mapping(g
, res
);
594 vg_transfer_to_host_2d(VuGpu
*g
,
595 struct virtio_gpu_ctrl_command
*cmd
)
597 struct virtio_gpu_simple_resource
*res
;
599 uint32_t src_offset
, dst_offset
, stride
;
601 pixman_format_code_t format
;
602 struct virtio_gpu_transfer_to_host_2d t2d
;
605 virtio_gpu_t2d_bswap(&t2d
);
607 res
= virtio_gpu_find_resource(g
, t2d
.resource_id
);
608 if (!res
|| !res
->iov
) {
609 g_critical("%s: illegal resource specified %d",
610 __func__
, t2d
.resource_id
);
611 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
615 if (t2d
.r
.x
> res
->width
||
616 t2d
.r
.y
> res
->height
||
617 t2d
.r
.width
> res
->width
||
618 t2d
.r
.height
> res
->height
||
619 t2d
.r
.x
+ t2d
.r
.width
> res
->width
||
620 t2d
.r
.y
+ t2d
.r
.height
> res
->height
) {
621 g_critical("%s: transfer bounds outside resource"
622 " bounds for resource %d: %d %d %d %d vs %d %d",
623 __func__
, t2d
.resource_id
, t2d
.r
.x
, t2d
.r
.y
,
624 t2d
.r
.width
, t2d
.r
.height
, res
->width
, res
->height
);
625 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
629 format
= pixman_image_get_format(res
->image
);
630 bpp
= (PIXMAN_FORMAT_BPP(format
) + 7) / 8;
631 stride
= pixman_image_get_stride(res
->image
);
633 if (t2d
.offset
|| t2d
.r
.x
|| t2d
.r
.y
||
634 t2d
.r
.width
!= pixman_image_get_width(res
->image
)) {
635 void *img_data
= pixman_image_get_data(res
->image
);
636 for (h
= 0; h
< t2d
.r
.height
; h
++) {
637 src_offset
= t2d
.offset
+ stride
* h
;
638 dst_offset
= (t2d
.r
.y
+ h
) * stride
+ (t2d
.r
.x
* bpp
);
640 iov_to_buf(res
->iov
, res
->iov_cnt
, src_offset
,
642 + dst_offset
, t2d
.r
.width
* bpp
);
645 iov_to_buf(res
->iov
, res
->iov_cnt
, 0,
646 pixman_image_get_data(res
->image
),
647 pixman_image_get_stride(res
->image
)
648 * pixman_image_get_height(res
->image
));
653 vg_set_scanout(VuGpu
*g
,
654 struct virtio_gpu_ctrl_command
*cmd
)
656 struct virtio_gpu_simple_resource
*res
, *ores
;
657 struct virtio_gpu_scanout
*scanout
;
658 struct virtio_gpu_set_scanout ss
;
662 virtio_gpu_bswap_32(&ss
, sizeof(ss
));
664 if (ss
.scanout_id
>= VIRTIO_GPU_MAX_SCANOUTS
) {
665 g_critical("%s: illegal scanout id specified %d",
666 __func__
, ss
.scanout_id
);
667 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
671 if (ss
.resource_id
== 0) {
672 vg_disable_scanout(g
, ss
.scanout_id
);
676 /* create a surface for this scanout */
677 res
= virtio_gpu_find_resource(g
, ss
.resource_id
);
679 g_critical("%s: illegal resource specified %d",
680 __func__
, ss
.resource_id
);
681 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
685 if (ss
.r
.x
> res
->width
||
686 ss
.r
.y
> res
->height
||
687 ss
.r
.width
> res
->width
||
688 ss
.r
.height
> res
->height
||
689 ss
.r
.x
+ ss
.r
.width
> res
->width
||
690 ss
.r
.y
+ ss
.r
.height
> res
->height
) {
691 g_critical("%s: illegal scanout %d bounds for"
692 " resource %d, (%d,%d)+%d,%d vs %d %d",
693 __func__
, ss
.scanout_id
, ss
.resource_id
, ss
.r
.x
, ss
.r
.y
,
694 ss
.r
.width
, ss
.r
.height
, res
->width
, res
->height
);
695 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
699 scanout
= &g
->scanout
[ss
.scanout_id
];
701 ores
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
703 ores
->scanout_bitmask
&= ~(1 << ss
.scanout_id
);
706 res
->scanout_bitmask
|= (1 << ss
.scanout_id
);
707 scanout
->resource_id
= ss
.resource_id
;
710 scanout
->width
= ss
.r
.width
;
711 scanout
->height
= ss
.r
.height
;
713 struct vugbm_buffer
*buffer
= &res
->buffer
;
715 if (vugbm_buffer_can_get_dmabuf_fd(buffer
)) {
716 VhostUserGpuMsg msg
= {
717 .request
= VHOST_USER_GPU_DMABUF_SCANOUT
,
718 .size
= sizeof(VhostUserGpuDMABUFScanout
),
719 .payload
.dmabuf_scanout
= (VhostUserGpuDMABUFScanout
) {
720 .scanout_id
= ss
.scanout_id
,
724 .height
= ss
.r
.height
,
725 .fd_width
= buffer
->width
,
726 .fd_height
= buffer
->height
,
727 .fd_stride
= buffer
->stride
,
728 .fd_drm_fourcc
= buffer
->format
732 if (vugbm_buffer_get_dmabuf_fd(buffer
, &fd
)) {
733 vg_send_msg(g
, &msg
, fd
);
737 VhostUserGpuMsg msg
= {
738 .request
= VHOST_USER_GPU_SCANOUT
,
739 .size
= sizeof(VhostUserGpuScanout
),
740 .payload
.scanout
= (VhostUserGpuScanout
) {
741 .scanout_id
= ss
.scanout_id
,
742 .width
= scanout
->width
,
743 .height
= scanout
->height
746 vg_send_msg(g
, &msg
, -1);
751 vg_resource_flush(VuGpu
*g
,
752 struct virtio_gpu_ctrl_command
*cmd
)
754 struct virtio_gpu_simple_resource
*res
;
755 struct virtio_gpu_resource_flush rf
;
756 pixman_region16_t flush_region
;
760 virtio_gpu_bswap_32(&rf
, sizeof(rf
));
762 res
= virtio_gpu_find_resource(g
, rf
.resource_id
);
764 g_critical("%s: illegal resource specified %d\n",
765 __func__
, rf
.resource_id
);
766 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
770 if (rf
.r
.x
> res
->width
||
771 rf
.r
.y
> res
->height
||
772 rf
.r
.width
> res
->width
||
773 rf
.r
.height
> res
->height
||
774 rf
.r
.x
+ rf
.r
.width
> res
->width
||
775 rf
.r
.y
+ rf
.r
.height
> res
->height
) {
776 g_critical("%s: flush bounds outside resource"
777 " bounds for resource %d: %d %d %d %d vs %d %d\n",
778 __func__
, rf
.resource_id
, rf
.r
.x
, rf
.r
.y
,
779 rf
.r
.width
, rf
.r
.height
, res
->width
, res
->height
);
780 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
784 pixman_region_init_rect(&flush_region
,
785 rf
.r
.x
, rf
.r
.y
, rf
.r
.width
, rf
.r
.height
);
786 for (i
= 0; i
< VIRTIO_GPU_MAX_SCANOUTS
; i
++) {
787 struct virtio_gpu_scanout
*scanout
;
788 pixman_region16_t region
, finalregion
;
789 pixman_box16_t
*extents
;
791 if (!(res
->scanout_bitmask
& (1 << i
))) {
794 scanout
= &g
->scanout
[i
];
796 pixman_region_init(&finalregion
);
797 pixman_region_init_rect(®ion
, scanout
->x
, scanout
->y
,
798 scanout
->width
, scanout
->height
);
800 pixman_region_intersect(&finalregion
, &flush_region
, ®ion
);
802 extents
= pixman_region_extents(&finalregion
);
803 size_t width
= extents
->x2
- extents
->x1
;
804 size_t height
= extents
->y2
- extents
->y1
;
806 if (vugbm_buffer_can_get_dmabuf_fd(&res
->buffer
)) {
807 VhostUserGpuMsg vmsg
= {
808 .request
= VHOST_USER_GPU_DMABUF_UPDATE
,
809 .size
= sizeof(VhostUserGpuUpdate
),
810 .payload
.update
= (VhostUserGpuUpdate
) {
818 vg_send_msg(g
, &vmsg
, -1);
822 PIXMAN_FORMAT_BPP(pixman_image_get_format(res
->image
)) / 8;
823 size_t size
= width
* height
* bpp
;
825 void *p
= g_malloc(VHOST_USER_GPU_HDR_SIZE
+
826 sizeof(VhostUserGpuUpdate
) + size
);
827 VhostUserGpuMsg
*msg
= p
;
828 msg
->request
= VHOST_USER_GPU_UPDATE
;
829 msg
->size
= sizeof(VhostUserGpuUpdate
) + size
;
830 msg
->payload
.update
= (VhostUserGpuUpdate
) {
837 pixman_image_t
*img
=
838 pixman_image_create_bits(pixman_image_get_format(res
->image
),
839 msg
->payload
.update
.width
,
840 msg
->payload
.update
.height
,
841 p
+ offsetof(VhostUserGpuMsg
,
842 payload
.update
.data
),
844 pixman_image_composite(PIXMAN_OP_SRC
,
845 res
->image
, NULL
, img
,
846 extents
->x1
, extents
->y1
,
849 pixman_image_unref(img
);
850 vg_send_msg(g
, msg
, -1);
853 pixman_region_fini(®ion
);
854 pixman_region_fini(&finalregion
);
856 pixman_region_fini(&flush_region
);
860 vg_process_cmd(VuGpu
*vg
, struct virtio_gpu_ctrl_command
*cmd
)
862 switch (cmd
->cmd_hdr
.type
) {
863 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO
:
864 vg_get_display_info(vg
, cmd
);
866 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
:
867 vg_resource_create_2d(vg
, cmd
);
869 case VIRTIO_GPU_CMD_RESOURCE_UNREF
:
870 vg_resource_unref(vg
, cmd
);
872 case VIRTIO_GPU_CMD_RESOURCE_FLUSH
:
873 vg_resource_flush(vg
, cmd
);
875 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
:
876 vg_transfer_to_host_2d(vg
, cmd
);
878 case VIRTIO_GPU_CMD_SET_SCANOUT
:
879 vg_set_scanout(vg
, cmd
);
881 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
:
882 vg_resource_attach_backing(vg
, cmd
);
884 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
:
885 vg_resource_detach_backing(vg
, cmd
);
887 case VIRTIO_GPU_CMD_GET_EDID
:
888 vg_get_edid(vg
, cmd
);
891 g_warning("TODO handle ctrl %x\n", cmd
->cmd_hdr
.type
);
892 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
895 if (cmd
->state
== VG_CMD_STATE_NEW
) {
896 vg_ctrl_response_nodata(vg
, cmd
, cmd
->error
? cmd
->error
:
897 VIRTIO_GPU_RESP_OK_NODATA
);
902 vg_handle_ctrl(VuDev
*dev
, int qidx
)
904 VuGpu
*vg
= container_of(dev
, VuGpu
, dev
.parent
);
905 VuVirtq
*vq
= vu_get_queue(dev
, qidx
);
906 struct virtio_gpu_ctrl_command
*cmd
= NULL
;
910 if (vg
->wait_in
!= 0) {
914 cmd
= vu_queue_pop(dev
, vq
, sizeof(struct virtio_gpu_ctrl_command
));
920 cmd
->state
= VG_CMD_STATE_NEW
;
922 len
= iov_to_buf(cmd
->elem
.out_sg
, cmd
->elem
.out_num
,
923 0, &cmd
->cmd_hdr
, sizeof(cmd
->cmd_hdr
));
924 if (len
!= sizeof(cmd
->cmd_hdr
)) {
925 g_warning("%s: command size incorrect %zu vs %zu\n",
926 __func__
, len
, sizeof(cmd
->cmd_hdr
));
929 virtio_gpu_ctrl_hdr_bswap(&cmd
->cmd_hdr
);
930 g_debug("%d %s\n", cmd
->cmd_hdr
.type
,
931 vg_cmd_to_string(cmd
->cmd_hdr
.type
));
934 vg_virgl_process_cmd(vg
, cmd
);
936 vg_process_cmd(vg
, cmd
);
939 if (cmd
->state
!= VG_CMD_STATE_FINISHED
) {
940 QTAILQ_INSERT_TAIL(&vg
->fenceq
, cmd
, next
);
949 update_cursor_data_simple(VuGpu
*g
, uint32_t resource_id
, gpointer data
)
951 struct virtio_gpu_simple_resource
*res
;
953 res
= virtio_gpu_find_resource(g
, resource_id
);
954 g_return_if_fail(res
!= NULL
);
955 g_return_if_fail(pixman_image_get_width(res
->image
) == 64);
956 g_return_if_fail(pixman_image_get_height(res
->image
) == 64);
958 PIXMAN_FORMAT_BPP(pixman_image_get_format(res
->image
)) == 32);
960 memcpy(data
, pixman_image_get_data(res
->image
), 64 * 64 * sizeof(uint32_t));
964 vg_process_cursor_cmd(VuGpu
*g
, struct virtio_gpu_update_cursor
*cursor
)
966 switch (cursor
->hdr
.type
) {
967 case VIRTIO_GPU_CMD_MOVE_CURSOR
: {
968 VhostUserGpuMsg msg
= {
969 .request
= cursor
->resource_id
?
970 VHOST_USER_GPU_CURSOR_POS
: VHOST_USER_GPU_CURSOR_POS_HIDE
,
971 .size
= sizeof(VhostUserGpuCursorPos
),
972 .payload
.cursor_pos
= {
973 .scanout_id
= cursor
->pos
.scanout_id
,
978 g_debug("%s: move", G_STRFUNC
);
979 vg_send_msg(g
, &msg
, -1);
982 case VIRTIO_GPU_CMD_UPDATE_CURSOR
: {
983 VhostUserGpuMsg msg
= {
984 .request
= VHOST_USER_GPU_CURSOR_UPDATE
,
985 .size
= sizeof(VhostUserGpuCursorUpdate
),
986 .payload
.cursor_update
= {
988 .scanout_id
= cursor
->pos
.scanout_id
,
992 .hot_x
= cursor
->hot_x
,
993 .hot_y
= cursor
->hot_y
,
996 g_debug("%s: update", G_STRFUNC
);
998 vg_virgl_update_cursor_data(g
, cursor
->resource_id
,
999 msg
.payload
.cursor_update
.data
);
1001 update_cursor_data_simple(g
, cursor
->resource_id
,
1002 msg
.payload
.cursor_update
.data
);
1004 vg_send_msg(g
, &msg
, -1);
1008 g_debug("%s: unknown cmd %d", G_STRFUNC
, cursor
->hdr
.type
);
1014 vg_handle_cursor(VuDev
*dev
, int qidx
)
1016 VuGpu
*g
= container_of(dev
, VuGpu
, dev
.parent
);
1017 VuVirtq
*vq
= vu_get_queue(dev
, qidx
);
1018 VuVirtqElement
*elem
;
1020 struct virtio_gpu_update_cursor cursor
;
1023 elem
= vu_queue_pop(dev
, vq
, sizeof(VuVirtqElement
));
1027 g_debug("cursor out:%d in:%d\n", elem
->out_num
, elem
->in_num
);
1029 len
= iov_to_buf(elem
->out_sg
, elem
->out_num
,
1030 0, &cursor
, sizeof(cursor
));
1031 if (len
!= sizeof(cursor
)) {
1032 g_warning("%s: cursor size incorrect %zu vs %zu\n",
1033 __func__
, len
, sizeof(cursor
));
1035 virtio_gpu_bswap_32(&cursor
, sizeof(cursor
));
1036 vg_process_cursor_cmd(g
, &cursor
);
1038 vu_queue_push(dev
, vq
, elem
, 0);
1039 vu_queue_notify(dev
, vq
);
1045 vg_panic(VuDev
*dev
, const char *msg
)
1047 g_critical("%s\n", msg
);
1052 vg_queue_set_started(VuDev
*dev
, int qidx
, bool started
)
1054 VuVirtq
*vq
= vu_get_queue(dev
, qidx
);
1056 g_debug("queue started %d:%d\n", qidx
, started
);
1060 vu_set_queue_handler(dev
, vq
, started
? vg_handle_ctrl
: NULL
);
1063 vu_set_queue_handler(dev
, vq
, started
? vg_handle_cursor
: NULL
);
1071 protocol_features_cb(gint fd
, GIOCondition condition
, gpointer user_data
)
1073 const uint64_t protocol_edid
= (1 << VHOST_USER_GPU_PROTOCOL_F_EDID
);
1074 const uint64_t protocol_dmabuf2
= (1 << VHOST_USER_GPU_PROTOCOL_F_DMABUF2
);
1075 VuGpu
*g
= user_data
;
1076 uint64_t protocol_features
;
1077 VhostUserGpuMsg msg
= {
1078 .request
= VHOST_USER_GPU_GET_PROTOCOL_FEATURES
1081 if (!vg_recv_msg(g
, msg
.request
,
1082 sizeof(protocol_features
), &protocol_features
)) {
1083 return G_SOURCE_CONTINUE
;
1086 protocol_features
&= (protocol_edid
| protocol_dmabuf2
);
1088 msg
= (VhostUserGpuMsg
) {
1089 .request
= VHOST_USER_GPU_SET_PROTOCOL_FEATURES
,
1090 .size
= sizeof(uint64_t),
1091 .payload
.u64
= protocol_features
,
1093 vg_send_msg(g
, &msg
, -1);
1096 vg_handle_ctrl(&g
->dev
.parent
, 0);
1098 if (g
->edid_inited
&& !(protocol_features
& protocol_edid
)) {
1099 g_printerr("EDID feature set by the frontend but it does not support "
1100 "the EDID vhost-user-gpu protocol.\n");
1104 g
->use_modifiers
= !!(protocol_features
& protocol_dmabuf2
);
1106 return G_SOURCE_REMOVE
;
1110 set_gpu_protocol_features(VuGpu
*g
)
1112 VhostUserGpuMsg msg
= {
1113 .request
= VHOST_USER_GPU_GET_PROTOCOL_FEATURES
,
1116 vg_send_msg(g
, &msg
, -1);
1117 assert(g
->wait_in
== 0);
1118 g
->wait_in
= g_unix_fd_add(g
->sock_fd
, G_IO_IN
| G_IO_HUP
,
1119 protocol_features_cb
, g
);
1123 vg_process_msg(VuDev
*dev
, VhostUserMsg
*msg
, int *do_reply
)
1125 VuGpu
*g
= container_of(dev
, VuGpu
, dev
.parent
);
1127 switch (msg
->request
) {
1128 case VHOST_USER_GPU_SET_SOCKET
: {
1129 g_return_val_if_fail(msg
->fd_num
== 1, 1);
1130 g_return_val_if_fail(g
->sock_fd
== -1, 1);
1131 g
->sock_fd
= msg
->fds
[0];
1132 set_gpu_protocol_features(g
);
1143 vg_get_features(VuDev
*dev
)
1145 uint64_t features
= 0;
1148 features
|= 1 << VIRTIO_GPU_F_VIRGL
;
1150 features
|= 1 << VIRTIO_GPU_F_EDID
;
1156 vg_set_features(VuDev
*dev
, uint64_t features
)
1158 VuGpu
*g
= container_of(dev
, VuGpu
, dev
.parent
);
1159 bool virgl
= features
& (1 << VIRTIO_GPU_F_VIRGL
);
1161 if (virgl
&& !g
->virgl_inited
) {
1162 if (!vg_virgl_init(g
)) {
1163 vg_panic(dev
, "Failed to initialize virgl");
1165 g
->virgl_inited
= true;
1168 g
->edid_inited
= !!(features
& (1 << VIRTIO_GPU_F_EDID
));
1174 vg_get_config(VuDev
*dev
, uint8_t *config
, uint32_t len
)
1176 VuGpu
*g
= container_of(dev
, VuGpu
, dev
.parent
);
1178 if (len
> sizeof(struct virtio_gpu_config
)) {
1183 g
->virtio_config
.num_capsets
= vg_virgl_get_num_capsets();
1186 memcpy(config
, &g
->virtio_config
, len
);
1192 vg_set_config(VuDev
*dev
, const uint8_t *data
,
1193 uint32_t offset
, uint32_t size
,
1196 VuGpu
*g
= container_of(dev
, VuGpu
, dev
.parent
);
1197 struct virtio_gpu_config
*config
= (struct virtio_gpu_config
*)data
;
1199 if (config
->events_clear
) {
1200 g
->virtio_config
.events_read
&= ~config
->events_clear
;
1206 static const VuDevIface vuiface
= {
1207 .set_features
= vg_set_features
,
1208 .get_features
= vg_get_features
,
1209 .queue_set_started
= vg_queue_set_started
,
1210 .process_msg
= vg_process_msg
,
1211 .get_config
= vg_get_config
,
1212 .set_config
= vg_set_config
,
1216 vg_destroy(VuGpu
*g
)
1218 struct virtio_gpu_simple_resource
*res
, *tmp
;
1220 vug_deinit(&g
->dev
);
1222 vg_sock_fd_close(g
);
1224 QTAILQ_FOREACH_SAFE(res
, &g
->reslist
, next
, tmp
) {
1225 vg_resource_destroy(g
, res
);
1228 vugbm_device_destroy(&g
->gdev
);
1231 static GOptionEntry entries
[] = {
1232 { "print-capabilities", 'c', 0, G_OPTION_ARG_NONE
, &opt_print_caps
,
1233 "Print capabilities", NULL
},
1234 { "fd", 'f', 0, G_OPTION_ARG_INT
, &opt_fdnum
,
1235 "Use inherited fd socket", "FDNUM" },
1236 { "socket-path", 's', 0, G_OPTION_ARG_FILENAME
, &opt_socket_path
,
1237 "Use UNIX socket path", "PATH" },
1238 { "render-node", 'r', 0, G_OPTION_ARG_FILENAME
, &opt_render_node
,
1239 "Specify DRM render node", "PATH" },
1240 { "virgl", 'v', 0, G_OPTION_ARG_NONE
, &opt_virgl
,
1241 "Turn virgl rendering on", NULL
},
1246 main(int argc
, char *argv
[])
1248 GOptionContext
*context
;
1249 GError
*error
= NULL
;
1250 GMainLoop
*loop
= NULL
;
1252 VuGpu g
= { .sock_fd
= -1, .drm_rnode_fd
= -1 };
1254 QTAILQ_INIT(&g
.reslist
);
1255 QTAILQ_INIT(&g
.fenceq
);
1257 context
= g_option_context_new("QEMU vhost-user-gpu");
1258 g_option_context_add_main_entries(context
, entries
, NULL
);
1259 if (!g_option_context_parse(context
, &argc
, &argv
, &error
)) {
1260 g_printerr("Option parsing failed: %s\n", error
->message
);
1263 g_option_context_free(context
);
1265 if (opt_print_caps
) {
1267 g_print(" \"type\": \"gpu\",\n");
1268 g_print(" \"features\": [\n");
1269 g_print(" \"render-node\",\n");
1270 g_print(" \"virgl\"\n");
1276 g
.drm_rnode_fd
= qemu_drm_rendernode_open(opt_render_node
);
1277 if (opt_render_node
&& g
.drm_rnode_fd
== -1) {
1278 g_printerr("Failed to open DRM rendernode.\n");
1282 vugbm_device_init(&g
.gdev
, g
.drm_rnode_fd
);
1284 if ((!!opt_socket_path
+ (opt_fdnum
!= -1)) != 1) {
1285 g_printerr("Please specify either --fd or --socket-path\n");
1289 if (opt_socket_path
) {
1290 int lsock
= unix_listen(opt_socket_path
, &error_fatal
);
1292 g_printerr("Failed to listen on %s.\n", opt_socket_path
);
1295 fd
= accept(lsock
, NULL
, NULL
);
1301 g_printerr("Invalid vhost-user socket.\n");
1305 if (!vug_init(&g
.dev
, VHOST_USER_GPU_MAX_QUEUES
, fd
, vg_panic
, &vuiface
)) {
1306 g_printerr("Failed to initialize libvhost-user-glib.\n");
1310 loop
= g_main_loop_new(NULL
, FALSE
);
1311 g_main_loop_run(loop
);
1312 g_main_loop_unref(loop
);
1315 if (g
.drm_rnode_fd
>= 0) {
1316 close(g
.drm_rnode_fd
);