2 * Virtio vhost-user GPU Device
4 * Copyright Red Hat, Inc. 2013-2018
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
9 * Marc-André Lureau <marcandre.lureau@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
16 #include "qapi/error.h"
17 #include "qemu/sockets.h"
20 #include <glib-unix.h>
23 #include "hw/virtio/virtio-gpu-bswap.h"
24 #include "hw/virtio/virtio-gpu-pixman.h"
29 VHOST_USER_GPU_MAX_QUEUES
= 2,
32 struct virtio_gpu_simple_resource
{
39 uint32_t scanout_bitmask
;
40 pixman_image_t
*image
;
41 struct vugbm_buffer buffer
;
42 QTAILQ_ENTRY(virtio_gpu_simple_resource
) next
;
45 static gboolean opt_print_caps
;
46 static int opt_fdnum
= -1;
47 static char *opt_socket_path
;
48 static char *opt_render_node
;
49 static gboolean opt_virgl
;
51 static void vg_handle_ctrl(VuDev
*dev
, int qidx
);
52 static void vg_cleanup_mapping(VuGpu
*g
,
53 struct virtio_gpu_simple_resource
*res
);
56 vg_cmd_to_string(int cmd
)
58 #define CMD(cmd) [cmd] = #cmd
59 static const char *vg_cmd_str
[] = {
60 CMD(VIRTIO_GPU_UNDEFINED
),
63 CMD(VIRTIO_GPU_CMD_GET_DISPLAY_INFO
),
64 CMD(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
),
65 CMD(VIRTIO_GPU_CMD_RESOURCE_UNREF
),
66 CMD(VIRTIO_GPU_CMD_SET_SCANOUT
),
67 CMD(VIRTIO_GPU_CMD_RESOURCE_FLUSH
),
68 CMD(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
),
69 CMD(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
),
70 CMD(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
),
71 CMD(VIRTIO_GPU_CMD_GET_CAPSET_INFO
),
72 CMD(VIRTIO_GPU_CMD_GET_CAPSET
),
75 CMD(VIRTIO_GPU_CMD_CTX_CREATE
),
76 CMD(VIRTIO_GPU_CMD_CTX_DESTROY
),
77 CMD(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE
),
78 CMD(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE
),
79 CMD(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D
),
80 CMD(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D
),
81 CMD(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D
),
82 CMD(VIRTIO_GPU_CMD_SUBMIT_3D
),
85 CMD(VIRTIO_GPU_CMD_UPDATE_CURSOR
),
86 CMD(VIRTIO_GPU_CMD_MOVE_CURSOR
),
90 if (cmd
>= 0 && cmd
< G_N_ELEMENTS(vg_cmd_str
)) {
91 return vg_cmd_str
[cmd
];
98 vg_sock_fd_read(int sock
, void *buf
, ssize_t buflen
)
103 ret
= read(sock
, buf
, buflen
);
104 } while (ret
< 0 && (errno
== EINTR
|| errno
== EAGAIN
));
106 g_warn_if_fail(ret
== buflen
);
111 vg_sock_fd_close(VuGpu
*g
)
113 if (g
->sock_fd
>= 0) {
120 source_wait_cb(gint fd
, GIOCondition condition
, gpointer user_data
)
122 VuGpu
*g
= user_data
;
124 if (!vg_recv_msg(g
, VHOST_USER_GPU_DMABUF_UPDATE
, 0, NULL
)) {
125 return G_SOURCE_CONTINUE
;
130 vg_handle_ctrl(&g
->dev
.parent
, 0);
132 return G_SOURCE_REMOVE
;
138 assert(g
->wait_in
== 0);
139 g
->wait_in
= g_unix_fd_add(g
->sock_fd
, G_IO_IN
| G_IO_HUP
,
144 vg_sock_fd_write(int sock
, const void *buf
, ssize_t buflen
, int fd
)
148 .iov_base
= (void *)buf
,
151 struct msghdr msg
= {
156 struct cmsghdr cmsghdr
;
157 char control
[CMSG_SPACE(sizeof(int))];
159 struct cmsghdr
*cmsg
;
162 msg
.msg_control
= cmsgu
.control
;
163 msg
.msg_controllen
= sizeof(cmsgu
.control
);
165 cmsg
= CMSG_FIRSTHDR(&msg
);
166 cmsg
->cmsg_len
= CMSG_LEN(sizeof(int));
167 cmsg
->cmsg_level
= SOL_SOCKET
;
168 cmsg
->cmsg_type
= SCM_RIGHTS
;
170 *((int *)CMSG_DATA(cmsg
)) = fd
;
174 ret
= sendmsg(sock
, &msg
, 0);
175 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
177 g_warn_if_fail(ret
== buflen
);
182 vg_send_msg(VuGpu
*vg
, const VhostUserGpuMsg
*msg
, int fd
)
184 if (vg_sock_fd_write(vg
->sock_fd
, msg
,
185 VHOST_USER_GPU_HDR_SIZE
+ msg
->size
, fd
) < 0) {
186 vg_sock_fd_close(vg
);
191 vg_recv_msg(VuGpu
*g
, uint32_t expect_req
, uint32_t expect_size
,
194 uint32_t req
, flags
, size
;
196 if (vg_sock_fd_read(g
->sock_fd
, &req
, sizeof(req
)) < 0 ||
197 vg_sock_fd_read(g
->sock_fd
, &flags
, sizeof(flags
)) < 0 ||
198 vg_sock_fd_read(g
->sock_fd
, &size
, sizeof(size
)) < 0) {
202 g_return_val_if_fail(req
== expect_req
, false);
203 g_return_val_if_fail(flags
& VHOST_USER_GPU_MSG_FLAG_REPLY
, false);
204 g_return_val_if_fail(size
== expect_size
, false);
206 if (size
&& vg_sock_fd_read(g
->sock_fd
, payload
, size
) != size
) {
217 static struct virtio_gpu_simple_resource
*
218 virtio_gpu_find_resource(VuGpu
*g
, uint32_t resource_id
)
220 struct virtio_gpu_simple_resource
*res
;
222 QTAILQ_FOREACH(res
, &g
->reslist
, next
) {
223 if (res
->resource_id
== resource_id
) {
231 vg_ctrl_response(VuGpu
*g
,
232 struct virtio_gpu_ctrl_command
*cmd
,
233 struct virtio_gpu_ctrl_hdr
*resp
,
238 if (cmd
->cmd_hdr
.flags
& VIRTIO_GPU_FLAG_FENCE
) {
239 resp
->flags
|= VIRTIO_GPU_FLAG_FENCE
;
240 resp
->fence_id
= cmd
->cmd_hdr
.fence_id
;
241 resp
->ctx_id
= cmd
->cmd_hdr
.ctx_id
;
243 virtio_gpu_ctrl_hdr_bswap(resp
);
244 s
= iov_from_buf(cmd
->elem
.in_sg
, cmd
->elem
.in_num
, 0, resp
, resp_len
);
246 g_critical("%s: response size incorrect %zu vs %zu",
247 __func__
, s
, resp_len
);
249 vu_queue_push(&g
->dev
.parent
, cmd
->vq
, &cmd
->elem
, s
);
250 vu_queue_notify(&g
->dev
.parent
, cmd
->vq
);
251 cmd
->state
= VG_CMD_STATE_FINISHED
;
255 vg_ctrl_response_nodata(VuGpu
*g
,
256 struct virtio_gpu_ctrl_command
*cmd
,
257 enum virtio_gpu_ctrl_type type
)
259 struct virtio_gpu_ctrl_hdr resp
= {
263 vg_ctrl_response(g
, cmd
, &resp
, sizeof(resp
));
268 get_display_info_cb(gint fd
, GIOCondition condition
, gpointer user_data
)
270 struct virtio_gpu_resp_display_info dpy_info
= { {} };
271 VuGpu
*vg
= user_data
;
272 struct virtio_gpu_ctrl_command
*cmd
= QTAILQ_LAST(&vg
->fenceq
);
274 g_debug("disp info cb");
275 assert(cmd
->cmd_hdr
.type
== VIRTIO_GPU_CMD_GET_DISPLAY_INFO
);
276 if (!vg_recv_msg(vg
, VHOST_USER_GPU_GET_DISPLAY_INFO
,
277 sizeof(dpy_info
), &dpy_info
)) {
278 return G_SOURCE_CONTINUE
;
281 QTAILQ_REMOVE(&vg
->fenceq
, cmd
, next
);
282 vg_ctrl_response(vg
, cmd
, &dpy_info
.hdr
, sizeof(dpy_info
));
285 vg_handle_ctrl(&vg
->dev
.parent
, 0);
287 return G_SOURCE_REMOVE
;
291 vg_get_display_info(VuGpu
*vg
, struct virtio_gpu_ctrl_command
*cmd
)
293 VhostUserGpuMsg msg
= {
294 .request
= VHOST_USER_GPU_GET_DISPLAY_INFO
,
298 assert(vg
->wait_in
== 0);
300 vg_send_msg(vg
, &msg
, -1);
301 vg
->wait_in
= g_unix_fd_add(vg
->sock_fd
, G_IO_IN
| G_IO_HUP
,
302 get_display_info_cb
, vg
);
303 cmd
->state
= VG_CMD_STATE_PENDING
;
307 vg_resource_create_2d(VuGpu
*g
,
308 struct virtio_gpu_ctrl_command
*cmd
)
310 pixman_format_code_t pformat
;
311 struct virtio_gpu_simple_resource
*res
;
312 struct virtio_gpu_resource_create_2d c2d
;
315 virtio_gpu_bswap_32(&c2d
, sizeof(c2d
));
317 if (c2d
.resource_id
== 0) {
318 g_critical("%s: resource id 0 is not allowed", __func__
);
319 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
323 res
= virtio_gpu_find_resource(g
, c2d
.resource_id
);
325 g_critical("%s: resource already exists %d", __func__
, c2d
.resource_id
);
326 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
330 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
331 res
->width
= c2d
.width
;
332 res
->height
= c2d
.height
;
333 res
->format
= c2d
.format
;
334 res
->resource_id
= c2d
.resource_id
;
336 pformat
= virtio_gpu_get_pixman_format(c2d
.format
);
338 g_critical("%s: host couldn't handle guest format %d",
339 __func__
, c2d
.format
);
341 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
344 vugbm_buffer_create(&res
->buffer
, &g
->gdev
, c2d
.width
, c2d
.height
);
345 res
->image
= pixman_image_create_bits(pformat
,
348 (uint32_t *)res
->buffer
.mmap
,
351 g_critical("%s: resource creation failed %d %d %d",
352 __func__
, c2d
.resource_id
, c2d
.width
, c2d
.height
);
353 vugbm_buffer_destroy(&res
->buffer
);
355 cmd
->error
= VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY
;
359 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
363 vg_disable_scanout(VuGpu
*g
, int scanout_id
)
365 struct virtio_gpu_scanout
*scanout
= &g
->scanout
[scanout_id
];
366 struct virtio_gpu_simple_resource
*res
;
368 if (scanout
->resource_id
== 0) {
372 res
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
374 res
->scanout_bitmask
&= ~(1 << scanout_id
);
380 if (g
->sock_fd
>= 0) {
381 VhostUserGpuMsg msg
= {
382 .request
= VHOST_USER_GPU_SCANOUT
,
383 .size
= sizeof(VhostUserGpuScanout
),
384 .payload
.scanout
.scanout_id
= scanout_id
,
386 vg_send_msg(g
, &msg
, -1);
391 vg_resource_destroy(VuGpu
*g
,
392 struct virtio_gpu_simple_resource
*res
)
396 if (res
->scanout_bitmask
) {
397 for (i
= 0; i
< VIRTIO_GPU_MAX_SCANOUTS
; i
++) {
398 if (res
->scanout_bitmask
& (1 << i
)) {
399 vg_disable_scanout(g
, i
);
404 vugbm_buffer_destroy(&res
->buffer
);
405 vg_cleanup_mapping(g
, res
);
406 pixman_image_unref(res
->image
);
407 QTAILQ_REMOVE(&g
->reslist
, res
, next
);
412 vg_resource_unref(VuGpu
*g
,
413 struct virtio_gpu_ctrl_command
*cmd
)
415 struct virtio_gpu_simple_resource
*res
;
416 struct virtio_gpu_resource_unref unref
;
418 VUGPU_FILL_CMD(unref
);
419 virtio_gpu_bswap_32(&unref
, sizeof(unref
));
421 res
= virtio_gpu_find_resource(g
, unref
.resource_id
);
423 g_critical("%s: illegal resource specified %d",
424 __func__
, unref
.resource_id
);
425 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
428 vg_resource_destroy(g
, res
);
432 vg_create_mapping_iov(VuGpu
*g
,
433 struct virtio_gpu_resource_attach_backing
*ab
,
434 struct virtio_gpu_ctrl_command
*cmd
,
437 struct virtio_gpu_mem_entry
*ents
;
441 if (ab
->nr_entries
> 16384) {
442 g_critical("%s: nr_entries is too big (%d > 16384)",
443 __func__
, ab
->nr_entries
);
447 esize
= sizeof(*ents
) * ab
->nr_entries
;
448 ents
= g_malloc(esize
);
449 s
= iov_to_buf(cmd
->elem
.out_sg
, cmd
->elem
.out_num
,
450 sizeof(*ab
), ents
, esize
);
452 g_critical("%s: command data size incorrect %zu vs %zu",
458 *iov
= g_malloc0(sizeof(struct iovec
) * ab
->nr_entries
);
459 for (i
= 0; i
< ab
->nr_entries
; i
++) {
460 uint64_t len
= ents
[i
].length
;
461 (*iov
)[i
].iov_len
= ents
[i
].length
;
462 (*iov
)[i
].iov_base
= vu_gpa_to_va(&g
->dev
.parent
, &len
, ents
[i
].addr
);
463 if (!(*iov
)[i
].iov_base
|| len
!= ents
[i
].length
) {
464 g_critical("%s: resource %d element %d",
465 __func__
, ab
->resource_id
, i
);
477 vg_resource_attach_backing(VuGpu
*g
,
478 struct virtio_gpu_ctrl_command
*cmd
)
480 struct virtio_gpu_simple_resource
*res
;
481 struct virtio_gpu_resource_attach_backing ab
;
485 virtio_gpu_bswap_32(&ab
, sizeof(ab
));
487 res
= virtio_gpu_find_resource(g
, ab
.resource_id
);
489 g_critical("%s: illegal resource specified %d",
490 __func__
, ab
.resource_id
);
491 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
496 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
500 ret
= vg_create_mapping_iov(g
, &ab
, cmd
, &res
->iov
);
502 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
506 res
->iov_cnt
= ab
.nr_entries
;
509 /* Though currently only free iov, maybe later will do more work. */
510 void vg_cleanup_mapping_iov(VuGpu
*g
,
511 struct iovec
*iov
, uint32_t count
)
517 vg_cleanup_mapping(VuGpu
*g
,
518 struct virtio_gpu_simple_resource
*res
)
520 vg_cleanup_mapping_iov(g
, res
->iov
, res
->iov_cnt
);
526 vg_resource_detach_backing(VuGpu
*g
,
527 struct virtio_gpu_ctrl_command
*cmd
)
529 struct virtio_gpu_simple_resource
*res
;
530 struct virtio_gpu_resource_detach_backing detach
;
532 VUGPU_FILL_CMD(detach
);
533 virtio_gpu_bswap_32(&detach
, sizeof(detach
));
535 res
= virtio_gpu_find_resource(g
, detach
.resource_id
);
536 if (!res
|| !res
->iov
) {
537 g_critical("%s: illegal resource specified %d",
538 __func__
, detach
.resource_id
);
539 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
543 vg_cleanup_mapping(g
, res
);
547 vg_transfer_to_host_2d(VuGpu
*g
,
548 struct virtio_gpu_ctrl_command
*cmd
)
550 struct virtio_gpu_simple_resource
*res
;
552 uint32_t src_offset
, dst_offset
, stride
;
554 pixman_format_code_t format
;
555 struct virtio_gpu_transfer_to_host_2d t2d
;
558 virtio_gpu_t2d_bswap(&t2d
);
560 res
= virtio_gpu_find_resource(g
, t2d
.resource_id
);
561 if (!res
|| !res
->iov
) {
562 g_critical("%s: illegal resource specified %d",
563 __func__
, t2d
.resource_id
);
564 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
568 if (t2d
.r
.x
> res
->width
||
569 t2d
.r
.y
> res
->height
||
570 t2d
.r
.width
> res
->width
||
571 t2d
.r
.height
> res
->height
||
572 t2d
.r
.x
+ t2d
.r
.width
> res
->width
||
573 t2d
.r
.y
+ t2d
.r
.height
> res
->height
) {
574 g_critical("%s: transfer bounds outside resource"
575 " bounds for resource %d: %d %d %d %d vs %d %d",
576 __func__
, t2d
.resource_id
, t2d
.r
.x
, t2d
.r
.y
,
577 t2d
.r
.width
, t2d
.r
.height
, res
->width
, res
->height
);
578 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
582 format
= pixman_image_get_format(res
->image
);
583 bpp
= (PIXMAN_FORMAT_BPP(format
) + 7) / 8;
584 stride
= pixman_image_get_stride(res
->image
);
586 if (t2d
.offset
|| t2d
.r
.x
|| t2d
.r
.y
||
587 t2d
.r
.width
!= pixman_image_get_width(res
->image
)) {
588 void *img_data
= pixman_image_get_data(res
->image
);
589 for (h
= 0; h
< t2d
.r
.height
; h
++) {
590 src_offset
= t2d
.offset
+ stride
* h
;
591 dst_offset
= (t2d
.r
.y
+ h
) * stride
+ (t2d
.r
.x
* bpp
);
593 iov_to_buf(res
->iov
, res
->iov_cnt
, src_offset
,
595 + dst_offset
, t2d
.r
.width
* bpp
);
598 iov_to_buf(res
->iov
, res
->iov_cnt
, 0,
599 pixman_image_get_data(res
->image
),
600 pixman_image_get_stride(res
->image
)
601 * pixman_image_get_height(res
->image
));
606 vg_set_scanout(VuGpu
*g
,
607 struct virtio_gpu_ctrl_command
*cmd
)
609 struct virtio_gpu_simple_resource
*res
, *ores
;
610 struct virtio_gpu_scanout
*scanout
;
611 struct virtio_gpu_set_scanout ss
;
615 virtio_gpu_bswap_32(&ss
, sizeof(ss
));
617 if (ss
.scanout_id
>= VIRTIO_GPU_MAX_SCANOUTS
) {
618 g_critical("%s: illegal scanout id specified %d",
619 __func__
, ss
.scanout_id
);
620 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
624 if (ss
.resource_id
== 0) {
625 vg_disable_scanout(g
, ss
.scanout_id
);
629 /* create a surface for this scanout */
630 res
= virtio_gpu_find_resource(g
, ss
.resource_id
);
632 g_critical("%s: illegal resource specified %d",
633 __func__
, ss
.resource_id
);
634 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
638 if (ss
.r
.x
> res
->width
||
639 ss
.r
.y
> res
->height
||
640 ss
.r
.width
> res
->width
||
641 ss
.r
.height
> res
->height
||
642 ss
.r
.x
+ ss
.r
.width
> res
->width
||
643 ss
.r
.y
+ ss
.r
.height
> res
->height
) {
644 g_critical("%s: illegal scanout %d bounds for"
645 " resource %d, (%d,%d)+%d,%d vs %d %d",
646 __func__
, ss
.scanout_id
, ss
.resource_id
, ss
.r
.x
, ss
.r
.y
,
647 ss
.r
.width
, ss
.r
.height
, res
->width
, res
->height
);
648 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
652 scanout
= &g
->scanout
[ss
.scanout_id
];
654 ores
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
656 ores
->scanout_bitmask
&= ~(1 << ss
.scanout_id
);
659 res
->scanout_bitmask
|= (1 << ss
.scanout_id
);
660 scanout
->resource_id
= ss
.resource_id
;
663 scanout
->width
= ss
.r
.width
;
664 scanout
->height
= ss
.r
.height
;
666 struct vugbm_buffer
*buffer
= &res
->buffer
;
668 if (vugbm_buffer_can_get_dmabuf_fd(buffer
)) {
669 VhostUserGpuMsg msg
= {
670 .request
= VHOST_USER_GPU_DMABUF_SCANOUT
,
671 .size
= sizeof(VhostUserGpuDMABUFScanout
),
672 .payload
.dmabuf_scanout
= (VhostUserGpuDMABUFScanout
) {
673 .scanout_id
= ss
.scanout_id
,
677 .height
= ss
.r
.height
,
678 .fd_width
= buffer
->width
,
679 .fd_height
= buffer
->height
,
680 .fd_stride
= buffer
->stride
,
681 .fd_drm_fourcc
= buffer
->format
685 if (vugbm_buffer_get_dmabuf_fd(buffer
, &fd
)) {
686 vg_send_msg(g
, &msg
, fd
);
690 VhostUserGpuMsg msg
= {
691 .request
= VHOST_USER_GPU_SCANOUT
,
692 .size
= sizeof(VhostUserGpuScanout
),
693 .payload
.scanout
= (VhostUserGpuScanout
) {
694 .scanout_id
= ss
.scanout_id
,
695 .width
= scanout
->width
,
696 .height
= scanout
->height
699 vg_send_msg(g
, &msg
, -1);
704 vg_resource_flush(VuGpu
*g
,
705 struct virtio_gpu_ctrl_command
*cmd
)
707 struct virtio_gpu_simple_resource
*res
;
708 struct virtio_gpu_resource_flush rf
;
709 pixman_region16_t flush_region
;
713 virtio_gpu_bswap_32(&rf
, sizeof(rf
));
715 res
= virtio_gpu_find_resource(g
, rf
.resource_id
);
717 g_critical("%s: illegal resource specified %d\n",
718 __func__
, rf
.resource_id
);
719 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
723 if (rf
.r
.x
> res
->width
||
724 rf
.r
.y
> res
->height
||
725 rf
.r
.width
> res
->width
||
726 rf
.r
.height
> res
->height
||
727 rf
.r
.x
+ rf
.r
.width
> res
->width
||
728 rf
.r
.y
+ rf
.r
.height
> res
->height
) {
729 g_critical("%s: flush bounds outside resource"
730 " bounds for resource %d: %d %d %d %d vs %d %d\n",
731 __func__
, rf
.resource_id
, rf
.r
.x
, rf
.r
.y
,
732 rf
.r
.width
, rf
.r
.height
, res
->width
, res
->height
);
733 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
737 pixman_region_init_rect(&flush_region
,
738 rf
.r
.x
, rf
.r
.y
, rf
.r
.width
, rf
.r
.height
);
739 for (i
= 0; i
< VIRTIO_GPU_MAX_SCANOUTS
; i
++) {
740 struct virtio_gpu_scanout
*scanout
;
741 pixman_region16_t region
, finalregion
;
742 pixman_box16_t
*extents
;
744 if (!(res
->scanout_bitmask
& (1 << i
))) {
747 scanout
= &g
->scanout
[i
];
749 pixman_region_init(&finalregion
);
750 pixman_region_init_rect(®ion
, scanout
->x
, scanout
->y
,
751 scanout
->width
, scanout
->height
);
753 pixman_region_intersect(&finalregion
, &flush_region
, ®ion
);
755 extents
= pixman_region_extents(&finalregion
);
756 size_t width
= extents
->x2
- extents
->x1
;
757 size_t height
= extents
->y2
- extents
->y1
;
759 if (vugbm_buffer_can_get_dmabuf_fd(&res
->buffer
)) {
760 VhostUserGpuMsg vmsg
= {
761 .request
= VHOST_USER_GPU_DMABUF_UPDATE
,
762 .size
= sizeof(VhostUserGpuUpdate
),
763 .payload
.update
= (VhostUserGpuUpdate
) {
771 vg_send_msg(g
, &vmsg
, -1);
775 PIXMAN_FORMAT_BPP(pixman_image_get_format(res
->image
)) / 8;
776 size_t size
= width
* height
* bpp
;
778 void *p
= g_malloc(VHOST_USER_GPU_HDR_SIZE
+
779 sizeof(VhostUserGpuUpdate
) + size
);
780 VhostUserGpuMsg
*msg
= p
;
781 msg
->request
= VHOST_USER_GPU_UPDATE
;
782 msg
->size
= sizeof(VhostUserGpuUpdate
) + size
;
783 msg
->payload
.update
= (VhostUserGpuUpdate
) {
791 pixman_image_create_bits(pixman_image_get_format(res
->image
),
792 msg
->payload
.update
.width
,
793 msg
->payload
.update
.height
,
794 p
+ offsetof(VhostUserGpuMsg
,
795 payload
.update
.data
),
797 pixman_image_composite(PIXMAN_OP_SRC
,
799 extents
->x1
, extents
->y1
,
802 pixman_image_unref(i
);
803 vg_send_msg(g
, msg
, -1);
806 pixman_region_fini(®ion
);
807 pixman_region_fini(&finalregion
);
809 pixman_region_fini(&flush_region
);
813 vg_process_cmd(VuGpu
*vg
, struct virtio_gpu_ctrl_command
*cmd
)
815 switch (cmd
->cmd_hdr
.type
) {
816 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO
:
817 vg_get_display_info(vg
, cmd
);
819 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
:
820 vg_resource_create_2d(vg
, cmd
);
822 case VIRTIO_GPU_CMD_RESOURCE_UNREF
:
823 vg_resource_unref(vg
, cmd
);
825 case VIRTIO_GPU_CMD_RESOURCE_FLUSH
:
826 vg_resource_flush(vg
, cmd
);
828 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
:
829 vg_transfer_to_host_2d(vg
, cmd
);
831 case VIRTIO_GPU_CMD_SET_SCANOUT
:
832 vg_set_scanout(vg
, cmd
);
834 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
:
835 vg_resource_attach_backing(vg
, cmd
);
837 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
:
838 vg_resource_detach_backing(vg
, cmd
);
840 /* case VIRTIO_GPU_CMD_GET_EDID: */
843 g_warning("TODO handle ctrl %x\n", cmd
->cmd_hdr
.type
);
844 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
847 if (cmd
->state
== VG_CMD_STATE_NEW
) {
848 vg_ctrl_response_nodata(vg
, cmd
, cmd
->error
? cmd
->error
:
849 VIRTIO_GPU_RESP_OK_NODATA
);
854 vg_handle_ctrl(VuDev
*dev
, int qidx
)
856 VuGpu
*vg
= container_of(dev
, VuGpu
, dev
.parent
);
857 VuVirtq
*vq
= vu_get_queue(dev
, qidx
);
858 struct virtio_gpu_ctrl_command
*cmd
= NULL
;
862 if (vg
->wait_in
!= 0) {
866 cmd
= vu_queue_pop(dev
, vq
, sizeof(struct virtio_gpu_ctrl_command
));
872 cmd
->state
= VG_CMD_STATE_NEW
;
874 len
= iov_to_buf(cmd
->elem
.out_sg
, cmd
->elem
.out_num
,
875 0, &cmd
->cmd_hdr
, sizeof(cmd
->cmd_hdr
));
876 if (len
!= sizeof(cmd
->cmd_hdr
)) {
877 g_warning("%s: command size incorrect %zu vs %zu\n",
878 __func__
, len
, sizeof(cmd
->cmd_hdr
));
881 virtio_gpu_ctrl_hdr_bswap(&cmd
->cmd_hdr
);
882 g_debug("%d %s\n", cmd
->cmd_hdr
.type
,
883 vg_cmd_to_string(cmd
->cmd_hdr
.type
));
886 vg_virgl_process_cmd(vg
, cmd
);
888 vg_process_cmd(vg
, cmd
);
891 if (cmd
->state
!= VG_CMD_STATE_FINISHED
) {
892 QTAILQ_INSERT_TAIL(&vg
->fenceq
, cmd
, next
);
901 update_cursor_data_simple(VuGpu
*g
, uint32_t resource_id
, gpointer data
)
903 struct virtio_gpu_simple_resource
*res
;
905 res
= virtio_gpu_find_resource(g
, resource_id
);
906 g_return_if_fail(res
!= NULL
);
907 g_return_if_fail(pixman_image_get_width(res
->image
) == 64);
908 g_return_if_fail(pixman_image_get_height(res
->image
) == 64);
910 PIXMAN_FORMAT_BPP(pixman_image_get_format(res
->image
)) == 32);
912 memcpy(data
, pixman_image_get_data(res
->image
), 64 * 64 * sizeof(uint32_t));
916 vg_process_cursor_cmd(VuGpu
*g
, struct virtio_gpu_update_cursor
*cursor
)
918 switch (cursor
->hdr
.type
) {
919 case VIRTIO_GPU_CMD_MOVE_CURSOR
: {
920 VhostUserGpuMsg msg
= {
921 .request
= cursor
->resource_id
?
922 VHOST_USER_GPU_CURSOR_POS
: VHOST_USER_GPU_CURSOR_POS_HIDE
,
923 .size
= sizeof(VhostUserGpuCursorPos
),
924 .payload
.cursor_pos
= {
925 .scanout_id
= cursor
->pos
.scanout_id
,
930 g_debug("%s: move", G_STRFUNC
);
931 vg_send_msg(g
, &msg
, -1);
934 case VIRTIO_GPU_CMD_UPDATE_CURSOR
: {
935 VhostUserGpuMsg msg
= {
936 .request
= VHOST_USER_GPU_CURSOR_UPDATE
,
937 .size
= sizeof(VhostUserGpuCursorUpdate
),
938 .payload
.cursor_update
= {
940 .scanout_id
= cursor
->pos
.scanout_id
,
944 .hot_x
= cursor
->hot_x
,
945 .hot_y
= cursor
->hot_y
,
948 g_debug("%s: update", G_STRFUNC
);
950 vg_virgl_update_cursor_data(g
, cursor
->resource_id
,
951 msg
.payload
.cursor_update
.data
);
953 update_cursor_data_simple(g
, cursor
->resource_id
,
954 msg
.payload
.cursor_update
.data
);
956 vg_send_msg(g
, &msg
, -1);
960 g_debug("%s: unknown cmd %d", G_STRFUNC
, cursor
->hdr
.type
);
966 vg_handle_cursor(VuDev
*dev
, int qidx
)
968 VuGpu
*g
= container_of(dev
, VuGpu
, dev
.parent
);
969 VuVirtq
*vq
= vu_get_queue(dev
, qidx
);
970 VuVirtqElement
*elem
;
972 struct virtio_gpu_update_cursor cursor
;
975 elem
= vu_queue_pop(dev
, vq
, sizeof(VuVirtqElement
));
979 g_debug("cursor out:%d in:%d\n", elem
->out_num
, elem
->in_num
);
981 len
= iov_to_buf(elem
->out_sg
, elem
->out_num
,
982 0, &cursor
, sizeof(cursor
));
983 if (len
!= sizeof(cursor
)) {
984 g_warning("%s: cursor size incorrect %zu vs %zu\n",
985 __func__
, len
, sizeof(cursor
));
987 virtio_gpu_bswap_32(&cursor
, sizeof(cursor
));
988 vg_process_cursor_cmd(g
, &cursor
);
990 vu_queue_push(dev
, vq
, elem
, 0);
991 vu_queue_notify(dev
, vq
);
997 vg_panic(VuDev
*dev
, const char *msg
)
999 g_critical("%s\n", msg
);
1004 vg_queue_set_started(VuDev
*dev
, int qidx
, bool started
)
1006 VuVirtq
*vq
= vu_get_queue(dev
, qidx
);
1008 g_debug("queue started %d:%d\n", qidx
, started
);
1012 vu_set_queue_handler(dev
, vq
, started
? vg_handle_ctrl
: NULL
);
1015 vu_set_queue_handler(dev
, vq
, started
? vg_handle_cursor
: NULL
);
1023 protocol_features_cb(gint fd
, GIOCondition condition
, gpointer user_data
)
1025 VuGpu
*g
= user_data
;
1027 VhostUserGpuMsg msg
= {
1028 .request
= VHOST_USER_GPU_GET_PROTOCOL_FEATURES
1031 if (!vg_recv_msg(g
, msg
.request
, sizeof(u64
), &u64
)) {
1032 return G_SOURCE_CONTINUE
;
1035 msg
= (VhostUserGpuMsg
) {
1036 .request
= VHOST_USER_GPU_SET_PROTOCOL_FEATURES
,
1037 .size
= sizeof(uint64_t),
1040 vg_send_msg(g
, &msg
, -1);
1043 vg_handle_ctrl(&g
->dev
.parent
, 0);
1045 return G_SOURCE_REMOVE
;
1049 set_gpu_protocol_features(VuGpu
*g
)
1051 VhostUserGpuMsg msg
= {
1052 .request
= VHOST_USER_GPU_GET_PROTOCOL_FEATURES
1055 vg_send_msg(g
, &msg
, -1);
1056 assert(g
->wait_in
== 0);
1057 g
->wait_in
= g_unix_fd_add(g
->sock_fd
, G_IO_IN
| G_IO_HUP
,
1058 protocol_features_cb
, g
);
1062 vg_process_msg(VuDev
*dev
, VhostUserMsg
*msg
, int *do_reply
)
1064 VuGpu
*g
= container_of(dev
, VuGpu
, dev
.parent
);
1066 switch (msg
->request
) {
1067 case VHOST_USER_GPU_SET_SOCKET
: {
1068 g_return_val_if_fail(msg
->fd_num
== 1, 1);
1069 g_return_val_if_fail(g
->sock_fd
== -1, 1);
1070 g
->sock_fd
= msg
->fds
[0];
1071 set_gpu_protocol_features(g
);
1082 vg_get_features(VuDev
*dev
)
1084 uint64_t features
= 0;
1087 features
|= 1 << VIRTIO_GPU_F_VIRGL
;
1094 vg_set_features(VuDev
*dev
, uint64_t features
)
1096 VuGpu
*g
= container_of(dev
, VuGpu
, dev
.parent
);
1097 bool virgl
= features
& (1 << VIRTIO_GPU_F_VIRGL
);
1099 if (virgl
&& !g
->virgl_inited
) {
1100 if (!vg_virgl_init(g
)) {
1101 vg_panic(dev
, "Failed to initialize virgl");
1103 g
->virgl_inited
= true;
1110 vg_get_config(VuDev
*dev
, uint8_t *config
, uint32_t len
)
1112 VuGpu
*g
= container_of(dev
, VuGpu
, dev
.parent
);
1114 if (len
> sizeof(struct virtio_gpu_config
)) {
1119 g
->virtio_config
.num_capsets
= vg_virgl_get_num_capsets();
1122 memcpy(config
, &g
->virtio_config
, len
);
1128 vg_set_config(VuDev
*dev
, const uint8_t *data
,
1129 uint32_t offset
, uint32_t size
,
1132 VuGpu
*g
= container_of(dev
, VuGpu
, dev
.parent
);
1133 struct virtio_gpu_config
*config
= (struct virtio_gpu_config
*)data
;
1135 if (config
->events_clear
) {
1136 g
->virtio_config
.events_read
&= ~config
->events_clear
;
1142 static const VuDevIface vuiface
= {
1143 .set_features
= vg_set_features
,
1144 .get_features
= vg_get_features
,
1145 .queue_set_started
= vg_queue_set_started
,
1146 .process_msg
= vg_process_msg
,
1147 .get_config
= vg_get_config
,
1148 .set_config
= vg_set_config
,
1152 vg_destroy(VuGpu
*g
)
1154 struct virtio_gpu_simple_resource
*res
, *tmp
;
1156 vug_deinit(&g
->dev
);
1158 vg_sock_fd_close(g
);
1160 QTAILQ_FOREACH_SAFE(res
, &g
->reslist
, next
, tmp
) {
1161 vg_resource_destroy(g
, res
);
1164 vugbm_device_destroy(&g
->gdev
);
1167 static GOptionEntry entries
[] = {
1168 { "print-capabilities", 'c', 0, G_OPTION_ARG_NONE
, &opt_print_caps
,
1169 "Print capabilities", NULL
},
1170 { "fd", 'f', 0, G_OPTION_ARG_INT
, &opt_fdnum
,
1171 "Use inherited fd socket", "FDNUM" },
1172 { "socket-path", 's', 0, G_OPTION_ARG_FILENAME
, &opt_socket_path
,
1173 "Use UNIX socket path", "PATH" },
1174 { "render-node", 'r', 0, G_OPTION_ARG_FILENAME
, &opt_render_node
,
1175 "Specify DRM render node", "PATH" },
1176 { "virgl", 'v', 0, G_OPTION_ARG_NONE
, &opt_virgl
,
1177 "Turn virgl rendering on", NULL
},
1182 main(int argc
, char *argv
[])
1184 GOptionContext
*context
;
1185 GError
*error
= NULL
;
1186 GMainLoop
*loop
= NULL
;
1188 VuGpu g
= { .sock_fd
= -1, .drm_rnode_fd
= -1 };
1190 QTAILQ_INIT(&g
.reslist
);
1191 QTAILQ_INIT(&g
.fenceq
);
1193 context
= g_option_context_new("QEMU vhost-user-gpu");
1194 g_option_context_add_main_entries(context
, entries
, NULL
);
1195 if (!g_option_context_parse(context
, &argc
, &argv
, &error
)) {
1196 g_printerr("Option parsing failed: %s\n", error
->message
);
1199 g_option_context_free(context
);
1201 if (opt_print_caps
) {
1203 g_print(" \"type\": \"gpu\",\n");
1204 g_print(" \"features\": [\n");
1205 g_print(" \"render-node\",\n");
1206 g_print(" \"virgl\"\n");
1212 g
.drm_rnode_fd
= qemu_drm_rendernode_open(opt_render_node
);
1213 if (opt_render_node
&& g
.drm_rnode_fd
== -1) {
1214 g_printerr("Failed to open DRM rendernode.\n");
1218 vugbm_device_init(&g
.gdev
, g
.drm_rnode_fd
);
1220 if ((!!opt_socket_path
+ (opt_fdnum
!= -1)) != 1) {
1221 g_printerr("Please specify either --fd or --socket-path\n");
1225 if (opt_socket_path
) {
1226 int lsock
= unix_listen(opt_socket_path
, &error_fatal
);
1228 g_printerr("Failed to listen on %s.\n", opt_socket_path
);
1231 fd
= accept(lsock
, NULL
, NULL
);
1237 g_printerr("Invalid vhost-user socket.\n");
1241 if (!vug_init(&g
.dev
, VHOST_USER_GPU_MAX_QUEUES
, fd
, vg_panic
, &vuiface
)) {
1242 g_printerr("Failed to initialize libvhost-user-glib.\n");
1246 loop
= g_main_loop_new(NULL
, FALSE
);
1247 g_main_loop_run(loop
);
1248 g_main_loop_unref(loop
);
1251 if (g
.drm_rnode_fd
>= 0) {
1252 close(g
.drm_rnode_fd
);