spapr: Don't allow multiple active vCPUs at CAS
[qemu/ar7.git] / contrib / vhost-user-gpu / main.c
blobb45d2019b46bdfac64b59d5702aebd504bfca646
1 /*
2 * Virtio vhost-user GPU Device
4 * Copyright Red Hat, Inc. 2013-2018
6 * Authors:
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
9 * Marc-André Lureau <marcandre.lureau@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/drm.h"
16 #include "qapi/error.h"
17 #include "qemu/sockets.h"
19 #include <pixman.h>
20 #include <glib-unix.h>
22 #include "vugpu.h"
23 #include "hw/virtio/virtio-gpu-bswap.h"
24 #include "hw/virtio/virtio-gpu-pixman.h"
25 #include "virgl.h"
26 #include "vugbm.h"
28 enum {
29 VHOST_USER_GPU_MAX_QUEUES = 2,
32 struct virtio_gpu_simple_resource {
33 uint32_t resource_id;
34 uint32_t width;
35 uint32_t height;
36 uint32_t format;
37 struct iovec *iov;
38 unsigned int iov_cnt;
39 uint32_t scanout_bitmask;
40 pixman_image_t *image;
41 struct vugbm_buffer buffer;
42 QTAILQ_ENTRY(virtio_gpu_simple_resource) next;
45 static gboolean opt_print_caps;
46 static int opt_fdnum = -1;
47 static char *opt_socket_path;
48 static char *opt_render_node;
49 static gboolean opt_virgl;
51 static void vg_handle_ctrl(VuDev *dev, int qidx);
53 static const char *
54 vg_cmd_to_string(int cmd)
56 #define CMD(cmd) [cmd] = #cmd
57 static const char *vg_cmd_str[] = {
58 CMD(VIRTIO_GPU_UNDEFINED),
60 /* 2d commands */
61 CMD(VIRTIO_GPU_CMD_GET_DISPLAY_INFO),
62 CMD(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D),
63 CMD(VIRTIO_GPU_CMD_RESOURCE_UNREF),
64 CMD(VIRTIO_GPU_CMD_SET_SCANOUT),
65 CMD(VIRTIO_GPU_CMD_RESOURCE_FLUSH),
66 CMD(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D),
67 CMD(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING),
68 CMD(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING),
69 CMD(VIRTIO_GPU_CMD_GET_CAPSET_INFO),
70 CMD(VIRTIO_GPU_CMD_GET_CAPSET),
72 /* 3d commands */
73 CMD(VIRTIO_GPU_CMD_CTX_CREATE),
74 CMD(VIRTIO_GPU_CMD_CTX_DESTROY),
75 CMD(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE),
76 CMD(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE),
77 CMD(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D),
78 CMD(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D),
79 CMD(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D),
80 CMD(VIRTIO_GPU_CMD_SUBMIT_3D),
82 /* cursor commands */
83 CMD(VIRTIO_GPU_CMD_UPDATE_CURSOR),
84 CMD(VIRTIO_GPU_CMD_MOVE_CURSOR),
86 #undef REQ
88 if (cmd >= 0 && cmd < G_N_ELEMENTS(vg_cmd_str)) {
89 return vg_cmd_str[cmd];
90 } else {
91 return "unknown";
95 static int
96 vg_sock_fd_read(int sock, void *buf, ssize_t buflen)
98 int ret;
100 do {
101 ret = read(sock, buf, buflen);
102 } while (ret < 0 && (errno == EINTR || errno == EAGAIN));
104 g_warn_if_fail(ret == buflen);
105 return ret;
108 static void
109 vg_sock_fd_close(VuGpu *g)
111 if (g->sock_fd >= 0) {
112 close(g->sock_fd);
113 g->sock_fd = -1;
117 static gboolean
118 source_wait_cb(gint fd, GIOCondition condition, gpointer user_data)
120 VuGpu *g = user_data;
122 if (!vg_recv_msg(g, VHOST_USER_GPU_DMABUF_UPDATE, 0, NULL)) {
123 return G_SOURCE_CONTINUE;
126 /* resume */
127 g->wait_ok = 0;
128 vg_handle_ctrl(&g->dev.parent, 0);
130 return G_SOURCE_REMOVE;
133 void
134 vg_wait_ok(VuGpu *g)
136 assert(g->wait_ok == 0);
137 g->wait_ok = g_unix_fd_add(g->sock_fd, G_IO_IN | G_IO_HUP,
138 source_wait_cb, g);
141 static int
142 vg_sock_fd_write(int sock, const void *buf, ssize_t buflen, int fd)
144 ssize_t ret;
145 struct iovec iov = {
146 .iov_base = (void *)buf,
147 .iov_len = buflen,
149 struct msghdr msg = {
150 .msg_iov = &iov,
151 .msg_iovlen = 1,
153 union {
154 struct cmsghdr cmsghdr;
155 char control[CMSG_SPACE(sizeof(int))];
156 } cmsgu;
157 struct cmsghdr *cmsg;
159 if (fd != -1) {
160 msg.msg_control = cmsgu.control;
161 msg.msg_controllen = sizeof(cmsgu.control);
163 cmsg = CMSG_FIRSTHDR(&msg);
164 cmsg->cmsg_len = CMSG_LEN(sizeof(int));
165 cmsg->cmsg_level = SOL_SOCKET;
166 cmsg->cmsg_type = SCM_RIGHTS;
168 *((int *)CMSG_DATA(cmsg)) = fd;
171 do {
172 ret = sendmsg(sock, &msg, 0);
173 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
175 g_warn_if_fail(ret == buflen);
176 return ret;
179 void
180 vg_send_msg(VuGpu *vg, const VhostUserGpuMsg *msg, int fd)
182 if (vg_sock_fd_write(vg->sock_fd, msg,
183 VHOST_USER_GPU_HDR_SIZE + msg->size, fd) < 0) {
184 vg_sock_fd_close(vg);
188 bool
189 vg_recv_msg(VuGpu *g, uint32_t expect_req, uint32_t expect_size,
190 gpointer payload)
192 uint32_t req, flags, size;
194 if (vg_sock_fd_read(g->sock_fd, &req, sizeof(req)) < 0 ||
195 vg_sock_fd_read(g->sock_fd, &flags, sizeof(flags)) < 0 ||
196 vg_sock_fd_read(g->sock_fd, &size, sizeof(size)) < 0) {
197 goto err;
200 g_return_val_if_fail(req == expect_req, false);
201 g_return_val_if_fail(flags & VHOST_USER_GPU_MSG_FLAG_REPLY, false);
202 g_return_val_if_fail(size == expect_size, false);
204 if (size && vg_sock_fd_read(g->sock_fd, payload, size) != size) {
205 goto err;
208 return true;
210 err:
211 vg_sock_fd_close(g);
212 return false;
215 static struct virtio_gpu_simple_resource *
216 virtio_gpu_find_resource(VuGpu *g, uint32_t resource_id)
218 struct virtio_gpu_simple_resource *res;
220 QTAILQ_FOREACH(res, &g->reslist, next) {
221 if (res->resource_id == resource_id) {
222 return res;
225 return NULL;
228 void
229 vg_ctrl_response(VuGpu *g,
230 struct virtio_gpu_ctrl_command *cmd,
231 struct virtio_gpu_ctrl_hdr *resp,
232 size_t resp_len)
234 size_t s;
236 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
237 resp->flags |= VIRTIO_GPU_FLAG_FENCE;
238 resp->fence_id = cmd->cmd_hdr.fence_id;
239 resp->ctx_id = cmd->cmd_hdr.ctx_id;
241 virtio_gpu_ctrl_hdr_bswap(resp);
242 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
243 if (s != resp_len) {
244 g_critical("%s: response size incorrect %zu vs %zu",
245 __func__, s, resp_len);
247 vu_queue_push(&g->dev.parent, cmd->vq, &cmd->elem, s);
248 vu_queue_notify(&g->dev.parent, cmd->vq);
249 cmd->finished = true;
252 void
253 vg_ctrl_response_nodata(VuGpu *g,
254 struct virtio_gpu_ctrl_command *cmd,
255 enum virtio_gpu_ctrl_type type)
257 struct virtio_gpu_ctrl_hdr resp = {
258 .type = type,
261 vg_ctrl_response(g, cmd, &resp, sizeof(resp));
264 void
265 vg_get_display_info(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd)
267 struct virtio_gpu_resp_display_info dpy_info = { {} };
268 VhostUserGpuMsg msg = {
269 .request = VHOST_USER_GPU_GET_DISPLAY_INFO,
270 .size = 0,
273 assert(vg->wait_ok == 0);
275 vg_send_msg(vg, &msg, -1);
276 if (!vg_recv_msg(vg, msg.request, sizeof(dpy_info), &dpy_info)) {
277 return;
280 vg_ctrl_response(vg, cmd, &dpy_info.hdr, sizeof(dpy_info));
283 static void
284 vg_resource_create_2d(VuGpu *g,
285 struct virtio_gpu_ctrl_command *cmd)
287 pixman_format_code_t pformat;
288 struct virtio_gpu_simple_resource *res;
289 struct virtio_gpu_resource_create_2d c2d;
291 VUGPU_FILL_CMD(c2d);
292 virtio_gpu_bswap_32(&c2d, sizeof(c2d));
294 if (c2d.resource_id == 0) {
295 g_critical("%s: resource id 0 is not allowed", __func__);
296 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
297 return;
300 res = virtio_gpu_find_resource(g, c2d.resource_id);
301 if (res) {
302 g_critical("%s: resource already exists %d", __func__, c2d.resource_id);
303 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
304 return;
307 res = g_new0(struct virtio_gpu_simple_resource, 1);
308 res->width = c2d.width;
309 res->height = c2d.height;
310 res->format = c2d.format;
311 res->resource_id = c2d.resource_id;
313 pformat = virtio_gpu_get_pixman_format(c2d.format);
314 if (!pformat) {
315 g_critical("%s: host couldn't handle guest format %d",
316 __func__, c2d.format);
317 g_free(res);
318 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
319 return;
321 vugbm_buffer_create(&res->buffer, &g->gdev, c2d.width, c2d.height);
322 res->image = pixman_image_create_bits(pformat,
323 c2d.width,
324 c2d.height,
325 (uint32_t *)res->buffer.mmap,
326 res->buffer.stride);
327 if (!res->image) {
328 g_critical("%s: resource creation failed %d %d %d",
329 __func__, c2d.resource_id, c2d.width, c2d.height);
330 g_free(res);
331 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
332 return;
335 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
338 static void
339 vg_disable_scanout(VuGpu *g, int scanout_id)
341 struct virtio_gpu_scanout *scanout = &g->scanout[scanout_id];
342 struct virtio_gpu_simple_resource *res;
344 if (scanout->resource_id == 0) {
345 return;
348 res = virtio_gpu_find_resource(g, scanout->resource_id);
349 if (res) {
350 res->scanout_bitmask &= ~(1 << scanout_id);
353 scanout->width = 0;
354 scanout->height = 0;
356 if (g->sock_fd >= 0) {
357 VhostUserGpuMsg msg = {
358 .request = VHOST_USER_GPU_SCANOUT,
359 .size = sizeof(VhostUserGpuScanout),
360 .payload.scanout.scanout_id = scanout_id,
362 vg_send_msg(g, &msg, -1);
366 static void
367 vg_resource_destroy(VuGpu *g,
368 struct virtio_gpu_simple_resource *res)
370 int i;
372 if (res->scanout_bitmask) {
373 for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) {
374 if (res->scanout_bitmask & (1 << i)) {
375 vg_disable_scanout(g, i);
380 vugbm_buffer_destroy(&res->buffer);
381 pixman_image_unref(res->image);
382 QTAILQ_REMOVE(&g->reslist, res, next);
383 g_free(res);
386 static void
387 vg_resource_unref(VuGpu *g,
388 struct virtio_gpu_ctrl_command *cmd)
390 struct virtio_gpu_simple_resource *res;
391 struct virtio_gpu_resource_unref unref;
393 VUGPU_FILL_CMD(unref);
394 virtio_gpu_bswap_32(&unref, sizeof(unref));
396 res = virtio_gpu_find_resource(g, unref.resource_id);
397 if (!res) {
398 g_critical("%s: illegal resource specified %d",
399 __func__, unref.resource_id);
400 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
401 return;
403 vg_resource_destroy(g, res);
407 vg_create_mapping_iov(VuGpu *g,
408 struct virtio_gpu_resource_attach_backing *ab,
409 struct virtio_gpu_ctrl_command *cmd,
410 struct iovec **iov)
412 struct virtio_gpu_mem_entry *ents;
413 size_t esize, s;
414 int i;
416 if (ab->nr_entries > 16384) {
417 g_critical("%s: nr_entries is too big (%d > 16384)",
418 __func__, ab->nr_entries);
419 return -1;
422 esize = sizeof(*ents) * ab->nr_entries;
423 ents = g_malloc(esize);
424 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
425 sizeof(*ab), ents, esize);
426 if (s != esize) {
427 g_critical("%s: command data size incorrect %zu vs %zu",
428 __func__, s, esize);
429 g_free(ents);
430 return -1;
433 *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries);
434 for (i = 0; i < ab->nr_entries; i++) {
435 uint64_t len = ents[i].length;
436 (*iov)[i].iov_len = ents[i].length;
437 (*iov)[i].iov_base = vu_gpa_to_va(&g->dev.parent, &len, ents[i].addr);
438 if (!(*iov)[i].iov_base || len != ents[i].length) {
439 g_critical("%s: resource %d element %d",
440 __func__, ab->resource_id, i);
441 g_free(*iov);
442 g_free(ents);
443 *iov = NULL;
444 return -1;
447 g_free(ents);
448 return 0;
451 static void
452 vg_resource_attach_backing(VuGpu *g,
453 struct virtio_gpu_ctrl_command *cmd)
455 struct virtio_gpu_simple_resource *res;
456 struct virtio_gpu_resource_attach_backing ab;
457 int ret;
459 VUGPU_FILL_CMD(ab);
460 virtio_gpu_bswap_32(&ab, sizeof(ab));
462 res = virtio_gpu_find_resource(g, ab.resource_id);
463 if (!res) {
464 g_critical("%s: illegal resource specified %d",
465 __func__, ab.resource_id);
466 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
467 return;
470 ret = vg_create_mapping_iov(g, &ab, cmd, &res->iov);
471 if (ret != 0) {
472 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
473 return;
476 res->iov_cnt = ab.nr_entries;
479 static void
480 vg_resource_detach_backing(VuGpu *g,
481 struct virtio_gpu_ctrl_command *cmd)
483 struct virtio_gpu_simple_resource *res;
484 struct virtio_gpu_resource_detach_backing detach;
486 VUGPU_FILL_CMD(detach);
487 virtio_gpu_bswap_32(&detach, sizeof(detach));
489 res = virtio_gpu_find_resource(g, detach.resource_id);
490 if (!res || !res->iov) {
491 g_critical("%s: illegal resource specified %d",
492 __func__, detach.resource_id);
493 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
494 return;
497 g_free(res->iov);
498 res->iov = NULL;
499 res->iov_cnt = 0;
502 static void
503 vg_transfer_to_host_2d(VuGpu *g,
504 struct virtio_gpu_ctrl_command *cmd)
506 struct virtio_gpu_simple_resource *res;
507 int h;
508 uint32_t src_offset, dst_offset, stride;
509 int bpp;
510 pixman_format_code_t format;
511 struct virtio_gpu_transfer_to_host_2d t2d;
513 VUGPU_FILL_CMD(t2d);
514 virtio_gpu_t2d_bswap(&t2d);
516 res = virtio_gpu_find_resource(g, t2d.resource_id);
517 if (!res || !res->iov) {
518 g_critical("%s: illegal resource specified %d",
519 __func__, t2d.resource_id);
520 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
521 return;
524 if (t2d.r.x > res->width ||
525 t2d.r.y > res->height ||
526 t2d.r.width > res->width ||
527 t2d.r.height > res->height ||
528 t2d.r.x + t2d.r.width > res->width ||
529 t2d.r.y + t2d.r.height > res->height) {
530 g_critical("%s: transfer bounds outside resource"
531 " bounds for resource %d: %d %d %d %d vs %d %d",
532 __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
533 t2d.r.width, t2d.r.height, res->width, res->height);
534 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
535 return;
538 format = pixman_image_get_format(res->image);
539 bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8;
540 stride = pixman_image_get_stride(res->image);
542 if (t2d.offset || t2d.r.x || t2d.r.y ||
543 t2d.r.width != pixman_image_get_width(res->image)) {
544 void *img_data = pixman_image_get_data(res->image);
545 for (h = 0; h < t2d.r.height; h++) {
546 src_offset = t2d.offset + stride * h;
547 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
549 iov_to_buf(res->iov, res->iov_cnt, src_offset,
550 img_data
551 + dst_offset, t2d.r.width * bpp);
553 } else {
554 iov_to_buf(res->iov, res->iov_cnt, 0,
555 pixman_image_get_data(res->image),
556 pixman_image_get_stride(res->image)
557 * pixman_image_get_height(res->image));
561 static void
562 vg_set_scanout(VuGpu *g,
563 struct virtio_gpu_ctrl_command *cmd)
565 struct virtio_gpu_simple_resource *res, *ores;
566 struct virtio_gpu_scanout *scanout;
567 struct virtio_gpu_set_scanout ss;
568 int fd;
570 VUGPU_FILL_CMD(ss);
571 virtio_gpu_bswap_32(&ss, sizeof(ss));
573 if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUTS) {
574 g_critical("%s: illegal scanout id specified %d",
575 __func__, ss.scanout_id);
576 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
577 return;
580 if (ss.resource_id == 0) {
581 vg_disable_scanout(g, ss.scanout_id);
582 return;
585 /* create a surface for this scanout */
586 res = virtio_gpu_find_resource(g, ss.resource_id);
587 if (!res) {
588 g_critical("%s: illegal resource specified %d",
589 __func__, ss.resource_id);
590 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
591 return;
594 if (ss.r.x > res->width ||
595 ss.r.y > res->height ||
596 ss.r.width > res->width ||
597 ss.r.height > res->height ||
598 ss.r.x + ss.r.width > res->width ||
599 ss.r.y + ss.r.height > res->height) {
600 g_critical("%s: illegal scanout %d bounds for"
601 " resource %d, (%d,%d)+%d,%d vs %d %d",
602 __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y,
603 ss.r.width, ss.r.height, res->width, res->height);
604 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
605 return;
608 scanout = &g->scanout[ss.scanout_id];
610 ores = virtio_gpu_find_resource(g, scanout->resource_id);
611 if (ores) {
612 ores->scanout_bitmask &= ~(1 << ss.scanout_id);
615 res->scanout_bitmask |= (1 << ss.scanout_id);
616 scanout->resource_id = ss.resource_id;
617 scanout->x = ss.r.x;
618 scanout->y = ss.r.y;
619 scanout->width = ss.r.width;
620 scanout->height = ss.r.height;
622 struct vugbm_buffer *buffer = &res->buffer;
624 if (vugbm_buffer_can_get_dmabuf_fd(buffer)) {
625 VhostUserGpuMsg msg = {
626 .request = VHOST_USER_GPU_DMABUF_SCANOUT,
627 .size = sizeof(VhostUserGpuDMABUFScanout),
628 .payload.dmabuf_scanout = (VhostUserGpuDMABUFScanout) {
629 .scanout_id = ss.scanout_id,
630 .x = ss.r.x,
631 .y = ss.r.y,
632 .width = ss.r.width,
633 .height = ss.r.height,
634 .fd_width = buffer->width,
635 .fd_height = buffer->height,
636 .fd_stride = buffer->stride,
637 .fd_drm_fourcc = buffer->format
641 if (vugbm_buffer_get_dmabuf_fd(buffer, &fd)) {
642 vg_send_msg(g, &msg, fd);
643 close(fd);
645 } else {
646 VhostUserGpuMsg msg = {
647 .request = VHOST_USER_GPU_SCANOUT,
648 .size = sizeof(VhostUserGpuScanout),
649 .payload.scanout = (VhostUserGpuScanout) {
650 .scanout_id = ss.scanout_id,
651 .width = scanout->width,
652 .height = scanout->height
655 vg_send_msg(g, &msg, -1);
659 static void
660 vg_resource_flush(VuGpu *g,
661 struct virtio_gpu_ctrl_command *cmd)
663 struct virtio_gpu_simple_resource *res;
664 struct virtio_gpu_resource_flush rf;
665 pixman_region16_t flush_region;
666 int i;
668 VUGPU_FILL_CMD(rf);
669 virtio_gpu_bswap_32(&rf, sizeof(rf));
671 res = virtio_gpu_find_resource(g, rf.resource_id);
672 if (!res) {
673 g_critical("%s: illegal resource specified %d\n",
674 __func__, rf.resource_id);
675 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
676 return;
679 if (rf.r.x > res->width ||
680 rf.r.y > res->height ||
681 rf.r.width > res->width ||
682 rf.r.height > res->height ||
683 rf.r.x + rf.r.width > res->width ||
684 rf.r.y + rf.r.height > res->height) {
685 g_critical("%s: flush bounds outside resource"
686 " bounds for resource %d: %d %d %d %d vs %d %d\n",
687 __func__, rf.resource_id, rf.r.x, rf.r.y,
688 rf.r.width, rf.r.height, res->width, res->height);
689 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
690 return;
693 pixman_region_init_rect(&flush_region,
694 rf.r.x, rf.r.y, rf.r.width, rf.r.height);
695 for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) {
696 struct virtio_gpu_scanout *scanout;
697 pixman_region16_t region, finalregion;
698 pixman_box16_t *extents;
700 if (!(res->scanout_bitmask & (1 << i))) {
701 continue;
703 scanout = &g->scanout[i];
705 pixman_region_init(&finalregion);
706 pixman_region_init_rect(&region, scanout->x, scanout->y,
707 scanout->width, scanout->height);
709 pixman_region_intersect(&finalregion, &flush_region, &region);
711 extents = pixman_region_extents(&finalregion);
712 size_t width = extents->x2 - extents->x1;
713 size_t height = extents->y2 - extents->y1;
715 if (vugbm_buffer_can_get_dmabuf_fd(&res->buffer)) {
716 VhostUserGpuMsg vmsg = {
717 .request = VHOST_USER_GPU_DMABUF_UPDATE,
718 .size = sizeof(VhostUserGpuUpdate),
719 .payload.update = (VhostUserGpuUpdate) {
720 .scanout_id = i,
721 .x = extents->x1,
722 .y = extents->y1,
723 .width = width,
724 .height = height,
727 vg_send_msg(g, &vmsg, -1);
728 vg_wait_ok(g);
729 } else {
730 size_t bpp =
731 PIXMAN_FORMAT_BPP(pixman_image_get_format(res->image)) / 8;
732 size_t size = width * height * bpp;
734 void *p = g_malloc(VHOST_USER_GPU_HDR_SIZE +
735 sizeof(VhostUserGpuUpdate) + size);
736 VhostUserGpuMsg *msg = p;
737 msg->request = VHOST_USER_GPU_UPDATE;
738 msg->size = sizeof(VhostUserGpuUpdate) + size;
739 msg->payload.update = (VhostUserGpuUpdate) {
740 .scanout_id = i,
741 .x = extents->x1,
742 .y = extents->y1,
743 .width = width,
744 .height = height,
746 pixman_image_t *i =
747 pixman_image_create_bits(pixman_image_get_format(res->image),
748 msg->payload.update.width,
749 msg->payload.update.height,
750 p + offsetof(VhostUserGpuMsg,
751 payload.update.data),
752 width * bpp);
753 pixman_image_composite(PIXMAN_OP_SRC,
754 res->image, NULL, i,
755 extents->x1, extents->y1,
756 0, 0, 0, 0,
757 width, height);
758 pixman_image_unref(i);
759 vg_send_msg(g, msg, -1);
760 g_free(msg);
762 pixman_region_fini(&region);
763 pixman_region_fini(&finalregion);
765 pixman_region_fini(&flush_region);
768 static void
769 vg_process_cmd(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd)
771 switch (cmd->cmd_hdr.type) {
772 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
773 vg_get_display_info(vg, cmd);
774 break;
775 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
776 vg_resource_create_2d(vg, cmd);
777 break;
778 case VIRTIO_GPU_CMD_RESOURCE_UNREF:
779 vg_resource_unref(vg, cmd);
780 break;
781 case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
782 vg_resource_flush(vg, cmd);
783 break;
784 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
785 vg_transfer_to_host_2d(vg, cmd);
786 break;
787 case VIRTIO_GPU_CMD_SET_SCANOUT:
788 vg_set_scanout(vg, cmd);
789 break;
790 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
791 vg_resource_attach_backing(vg, cmd);
792 break;
793 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
794 vg_resource_detach_backing(vg, cmd);
795 break;
796 /* case VIRTIO_GPU_CMD_GET_EDID: */
797 /* break */
798 default:
799 g_warning("TODO handle ctrl %x\n", cmd->cmd_hdr.type);
800 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
801 break;
803 if (!cmd->finished) {
804 vg_ctrl_response_nodata(vg, cmd, cmd->error ? cmd->error :
805 VIRTIO_GPU_RESP_OK_NODATA);
809 static void
810 vg_handle_ctrl(VuDev *dev, int qidx)
812 VuGpu *vg = container_of(dev, VuGpu, dev.parent);
813 VuVirtq *vq = vu_get_queue(dev, qidx);
814 struct virtio_gpu_ctrl_command *cmd = NULL;
815 size_t len;
817 for (;;) {
818 if (vg->wait_ok != 0) {
819 return;
822 cmd = vu_queue_pop(dev, vq, sizeof(struct virtio_gpu_ctrl_command));
823 if (!cmd) {
824 break;
826 cmd->vq = vq;
827 cmd->error = 0;
828 cmd->finished = false;
830 len = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
831 0, &cmd->cmd_hdr, sizeof(cmd->cmd_hdr));
832 if (len != sizeof(cmd->cmd_hdr)) {
833 g_warning("%s: command size incorrect %zu vs %zu\n",
834 __func__, len, sizeof(cmd->cmd_hdr));
837 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
838 g_debug("%d %s\n", cmd->cmd_hdr.type,
839 vg_cmd_to_string(cmd->cmd_hdr.type));
841 if (vg->virgl) {
842 vg_virgl_process_cmd(vg, cmd);
843 } else {
844 vg_process_cmd(vg, cmd);
847 if (!cmd->finished) {
848 QTAILQ_INSERT_TAIL(&vg->fenceq, cmd, next);
849 vg->inflight++;
850 } else {
851 g_free(cmd);
856 static void
857 update_cursor_data_simple(VuGpu *g, uint32_t resource_id, gpointer data)
859 struct virtio_gpu_simple_resource *res;
861 res = virtio_gpu_find_resource(g, resource_id);
862 g_return_if_fail(res != NULL);
863 g_return_if_fail(pixman_image_get_width(res->image) == 64);
864 g_return_if_fail(pixman_image_get_height(res->image) == 64);
865 g_return_if_fail(
866 PIXMAN_FORMAT_BPP(pixman_image_get_format(res->image)) == 32);
868 memcpy(data, pixman_image_get_data(res->image), 64 * 64 * sizeof(uint32_t));
871 static void
872 vg_process_cursor_cmd(VuGpu *g, struct virtio_gpu_update_cursor *cursor)
874 bool move = cursor->hdr.type != VIRTIO_GPU_CMD_MOVE_CURSOR;
876 g_debug("%s move:%d\n", G_STRFUNC, move);
878 if (move) {
879 VhostUserGpuMsg msg = {
880 .request = cursor->resource_id ?
881 VHOST_USER_GPU_CURSOR_POS : VHOST_USER_GPU_CURSOR_POS_HIDE,
882 .size = sizeof(VhostUserGpuCursorPos),
883 .payload.cursor_pos = {
884 .scanout_id = cursor->pos.scanout_id,
885 .x = cursor->pos.x,
886 .y = cursor->pos.y,
889 vg_send_msg(g, &msg, -1);
890 } else {
891 VhostUserGpuMsg msg = {
892 .request = VHOST_USER_GPU_CURSOR_UPDATE,
893 .size = sizeof(VhostUserGpuCursorUpdate),
894 .payload.cursor_update = {
895 .pos = {
896 .scanout_id = cursor->pos.scanout_id,
897 .x = cursor->pos.x,
898 .y = cursor->pos.y,
900 .hot_x = cursor->hot_x,
901 .hot_y = cursor->hot_y,
904 if (g->virgl) {
905 vg_virgl_update_cursor_data(g, cursor->resource_id,
906 msg.payload.cursor_update.data);
907 } else {
908 update_cursor_data_simple(g, cursor->resource_id,
909 msg.payload.cursor_update.data);
911 vg_send_msg(g, &msg, -1);
915 static void
916 vg_handle_cursor(VuDev *dev, int qidx)
918 VuGpu *g = container_of(dev, VuGpu, dev.parent);
919 VuVirtq *vq = vu_get_queue(dev, qidx);
920 VuVirtqElement *elem;
921 size_t len;
922 struct virtio_gpu_update_cursor cursor;
924 for (;;) {
925 elem = vu_queue_pop(dev, vq, sizeof(VuVirtqElement));
926 if (!elem) {
927 break;
929 g_debug("cursor out:%d in:%d\n", elem->out_num, elem->in_num);
931 len = iov_to_buf(elem->out_sg, elem->out_num,
932 0, &cursor, sizeof(cursor));
933 if (len != sizeof(cursor)) {
934 g_warning("%s: cursor size incorrect %zu vs %zu\n",
935 __func__, len, sizeof(cursor));
936 } else {
937 virtio_gpu_bswap_32(&cursor, sizeof(cursor));
938 vg_process_cursor_cmd(g, &cursor);
940 vu_queue_push(dev, vq, elem, 0);
941 vu_queue_notify(dev, vq);
942 g_free(elem);
946 static void
947 vg_panic(VuDev *dev, const char *msg)
949 g_critical("%s\n", msg);
950 exit(1);
953 static void
954 vg_queue_set_started(VuDev *dev, int qidx, bool started)
956 VuVirtq *vq = vu_get_queue(dev, qidx);
958 g_debug("queue started %d:%d\n", qidx, started);
960 switch (qidx) {
961 case 0:
962 vu_set_queue_handler(dev, vq, started ? vg_handle_ctrl : NULL);
963 break;
964 case 1:
965 vu_set_queue_handler(dev, vq, started ? vg_handle_cursor : NULL);
966 break;
967 default:
968 break;
972 static void
973 set_gpu_protocol_features(VuGpu *g)
975 uint64_t u64;
976 VhostUserGpuMsg msg = {
977 .request = VHOST_USER_GPU_GET_PROTOCOL_FEATURES
980 assert(g->wait_ok == 0);
981 vg_send_msg(g, &msg, -1);
982 if (!vg_recv_msg(g, msg.request, sizeof(u64), &u64)) {
983 return;
986 msg = (VhostUserGpuMsg) {
987 .request = VHOST_USER_GPU_SET_PROTOCOL_FEATURES,
988 .size = sizeof(uint64_t),
989 .payload.u64 = 0
991 vg_send_msg(g, &msg, -1);
994 static int
995 vg_process_msg(VuDev *dev, VhostUserMsg *msg, int *do_reply)
997 VuGpu *g = container_of(dev, VuGpu, dev.parent);
999 switch (msg->request) {
1000 case VHOST_USER_GPU_SET_SOCKET: {
1001 g_return_val_if_fail(msg->fd_num == 1, 1);
1002 g_return_val_if_fail(g->sock_fd == -1, 1);
1003 g->sock_fd = msg->fds[0];
1004 set_gpu_protocol_features(g);
1005 return 1;
1007 default:
1008 return 0;
1011 return 0;
1014 static uint64_t
1015 vg_get_features(VuDev *dev)
1017 uint64_t features = 0;
1019 if (opt_virgl) {
1020 features |= 1 << VIRTIO_GPU_F_VIRGL;
1023 return features;
1026 static void
1027 vg_set_features(VuDev *dev, uint64_t features)
1029 VuGpu *g = container_of(dev, VuGpu, dev.parent);
1030 bool virgl = features & (1 << VIRTIO_GPU_F_VIRGL);
1032 if (virgl && !g->virgl_inited) {
1033 if (!vg_virgl_init(g)) {
1034 vg_panic(dev, "Failed to initialize virgl");
1036 g->virgl_inited = true;
1039 g->virgl = virgl;
1042 static int
1043 vg_get_config(VuDev *dev, uint8_t *config, uint32_t len)
1045 VuGpu *g = container_of(dev, VuGpu, dev.parent);
1047 g_return_val_if_fail(len <= sizeof(struct virtio_gpu_config), -1);
1049 if (opt_virgl) {
1050 g->virtio_config.num_capsets = vg_virgl_get_num_capsets();
1053 memcpy(config, &g->virtio_config, len);
1055 return 0;
1058 static int
1059 vg_set_config(VuDev *dev, const uint8_t *data,
1060 uint32_t offset, uint32_t size,
1061 uint32_t flags)
1063 VuGpu *g = container_of(dev, VuGpu, dev.parent);
1064 struct virtio_gpu_config *config = (struct virtio_gpu_config *)data;
1066 if (config->events_clear) {
1067 g->virtio_config.events_read &= ~config->events_clear;
1070 return 0;
1073 static const VuDevIface vuiface = {
1074 .set_features = vg_set_features,
1075 .get_features = vg_get_features,
1076 .queue_set_started = vg_queue_set_started,
1077 .process_msg = vg_process_msg,
1078 .get_config = vg_get_config,
1079 .set_config = vg_set_config,
1082 static void
1083 vg_destroy(VuGpu *g)
1085 struct virtio_gpu_simple_resource *res, *tmp;
1087 vug_deinit(&g->dev);
1089 vg_sock_fd_close(g);
1091 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
1092 vg_resource_destroy(g, res);
1095 vugbm_device_destroy(&g->gdev);
1098 static GOptionEntry entries[] = {
1099 { "print-capabilities", 'c', 0, G_OPTION_ARG_NONE, &opt_print_caps,
1100 "Print capabilities", NULL },
1101 { "fd", 'f', 0, G_OPTION_ARG_INT, &opt_fdnum,
1102 "Use inherited fd socket", "FDNUM" },
1103 { "socket-path", 's', 0, G_OPTION_ARG_FILENAME, &opt_socket_path,
1104 "Use UNIX socket path", "PATH" },
1105 { "render-node", 'r', 0, G_OPTION_ARG_FILENAME, &opt_render_node,
1106 "Specify DRM render node", "PATH" },
1107 { "virgl", 'v', 0, G_OPTION_ARG_NONE, &opt_virgl,
1108 "Turn virgl rendering on", NULL },
1109 { NULL, }
1113 main(int argc, char *argv[])
1115 GOptionContext *context;
1116 GError *error = NULL;
1117 GMainLoop *loop = NULL;
1118 int fd;
1119 VuGpu g = { .sock_fd = -1, .drm_rnode_fd = -1 };
1121 QTAILQ_INIT(&g.reslist);
1122 QTAILQ_INIT(&g.fenceq);
1124 context = g_option_context_new("QEMU vhost-user-gpu");
1125 g_option_context_add_main_entries(context, entries, NULL);
1126 if (!g_option_context_parse(context, &argc, &argv, &error)) {
1127 g_printerr("Option parsing failed: %s\n", error->message);
1128 exit(EXIT_FAILURE);
1130 g_option_context_free(context);
1132 if (opt_print_caps) {
1133 g_print("{\n");
1134 g_print(" \"type\": \"gpu\",\n");
1135 g_print(" \"features\": [\n");
1136 g_print(" \"render-node\",\n");
1137 g_print(" \"virgl\"\n");
1138 g_print(" ]\n");
1139 g_print("}\n");
1140 exit(EXIT_SUCCESS);
1143 g.drm_rnode_fd = qemu_drm_rendernode_open(opt_render_node);
1144 if (opt_render_node && g.drm_rnode_fd == -1) {
1145 g_printerr("Failed to open DRM rendernode.\n");
1146 exit(EXIT_FAILURE);
1149 if (g.drm_rnode_fd >= 0) {
1150 if (!vugbm_device_init(&g.gdev, g.drm_rnode_fd)) {
1151 g_warning("Failed to init DRM device, using fallback path");
1155 if ((!!opt_socket_path + (opt_fdnum != -1)) != 1) {
1156 g_printerr("Please specify either --fd or --socket-path\n");
1157 exit(EXIT_FAILURE);
1160 if (opt_socket_path) {
1161 int lsock = unix_listen(opt_socket_path, &error_fatal);
1162 if (lsock < 0) {
1163 g_printerr("Failed to listen on %s.\n", opt_socket_path);
1164 exit(EXIT_FAILURE);
1166 fd = accept(lsock, NULL, NULL);
1167 close(lsock);
1168 } else {
1169 fd = opt_fdnum;
1171 if (fd == -1) {
1172 g_printerr("Invalid vhost-user socket.\n");
1173 exit(EXIT_FAILURE);
1176 if (!vug_init(&g.dev, VHOST_USER_GPU_MAX_QUEUES, fd, vg_panic, &vuiface)) {
1177 g_printerr("Failed to initialize libvhost-user-glib.\n");
1178 exit(EXIT_FAILURE);
1181 loop = g_main_loop_new(NULL, FALSE);
1182 g_main_loop_run(loop);
1183 g_main_loop_unref(loop);
1185 vg_destroy(&g);
1186 if (g.drm_rnode_fd >= 0) {
1187 close(g.drm_rnode_fd);
1190 return 0;