xics/spapr: Rename xics_kvm_init()
[qemu/ar7.git] / contrib / vhost-user-gpu / main.c
blob04b753046f80bdb17444392dfbe792895da76529
1 /*
2 * Virtio vhost-user GPU Device
4 * Copyright Red Hat, Inc. 2013-2018
6 * Authors:
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
9 * Marc-André Lureau <marcandre.lureau@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/drm.h"
16 #include "qapi/error.h"
17 #include "qemu/sockets.h"
19 #include <pixman.h>
20 #include <glib-unix.h>
22 #include "vugpu.h"
23 #include "hw/virtio/virtio-gpu-bswap.h"
24 #include "hw/virtio/virtio-gpu-pixman.h"
25 #include "virgl.h"
26 #include "vugbm.h"
28 struct virtio_gpu_simple_resource {
29 uint32_t resource_id;
30 uint32_t width;
31 uint32_t height;
32 uint32_t format;
33 struct iovec *iov;
34 unsigned int iov_cnt;
35 uint32_t scanout_bitmask;
36 pixman_image_t *image;
37 struct vugbm_buffer buffer;
38 QTAILQ_ENTRY(virtio_gpu_simple_resource) next;
41 static gboolean opt_print_caps;
42 static int opt_fdnum = -1;
43 static char *opt_socket_path;
44 static char *opt_render_node;
45 static gboolean opt_virgl;
47 static void vg_handle_ctrl(VuDev *dev, int qidx);
49 static const char *
50 vg_cmd_to_string(int cmd)
52 #define CMD(cmd) [cmd] = #cmd
53 static const char *vg_cmd_str[] = {
54 CMD(VIRTIO_GPU_UNDEFINED),
56 /* 2d commands */
57 CMD(VIRTIO_GPU_CMD_GET_DISPLAY_INFO),
58 CMD(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D),
59 CMD(VIRTIO_GPU_CMD_RESOURCE_UNREF),
60 CMD(VIRTIO_GPU_CMD_SET_SCANOUT),
61 CMD(VIRTIO_GPU_CMD_RESOURCE_FLUSH),
62 CMD(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D),
63 CMD(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING),
64 CMD(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING),
65 CMD(VIRTIO_GPU_CMD_GET_CAPSET_INFO),
66 CMD(VIRTIO_GPU_CMD_GET_CAPSET),
68 /* 3d commands */
69 CMD(VIRTIO_GPU_CMD_CTX_CREATE),
70 CMD(VIRTIO_GPU_CMD_CTX_DESTROY),
71 CMD(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE),
72 CMD(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE),
73 CMD(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D),
74 CMD(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D),
75 CMD(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D),
76 CMD(VIRTIO_GPU_CMD_SUBMIT_3D),
78 /* cursor commands */
79 CMD(VIRTIO_GPU_CMD_UPDATE_CURSOR),
80 CMD(VIRTIO_GPU_CMD_MOVE_CURSOR),
82 #undef REQ
84 if (cmd >= 0 && cmd < G_N_ELEMENTS(vg_cmd_str)) {
85 return vg_cmd_str[cmd];
86 } else {
87 return "unknown";
91 static int
92 vg_sock_fd_read(int sock, void *buf, ssize_t buflen)
94 int ret;
96 do {
97 ret = read(sock, buf, buflen);
98 } while (ret < 0 && (errno == EINTR || errno == EAGAIN));
100 g_warn_if_fail(ret == buflen);
101 return ret;
104 static void
105 vg_sock_fd_close(VuGpu *g)
107 if (g->sock_fd >= 0) {
108 close(g->sock_fd);
109 g->sock_fd = -1;
113 static gboolean
114 source_wait_cb(gint fd, GIOCondition condition, gpointer user_data)
116 VuGpu *g = user_data;
118 if (!vg_recv_msg(g, VHOST_USER_GPU_DMABUF_UPDATE, 0, NULL)) {
119 return G_SOURCE_CONTINUE;
122 /* resume */
123 g->wait_ok = 0;
124 vg_handle_ctrl(&g->dev.parent, 0);
126 return G_SOURCE_REMOVE;
129 void
130 vg_wait_ok(VuGpu *g)
132 assert(g->wait_ok == 0);
133 g->wait_ok = g_unix_fd_add(g->sock_fd, G_IO_IN | G_IO_HUP,
134 source_wait_cb, g);
137 static int
138 vg_sock_fd_write(int sock, const void *buf, ssize_t buflen, int fd)
140 ssize_t ret;
141 struct iovec iov = {
142 .iov_base = (void *)buf,
143 .iov_len = buflen,
145 struct msghdr msg = {
146 .msg_iov = &iov,
147 .msg_iovlen = 1,
149 union {
150 struct cmsghdr cmsghdr;
151 char control[CMSG_SPACE(sizeof(int))];
152 } cmsgu;
153 struct cmsghdr *cmsg;
155 if (fd != -1) {
156 msg.msg_control = cmsgu.control;
157 msg.msg_controllen = sizeof(cmsgu.control);
159 cmsg = CMSG_FIRSTHDR(&msg);
160 cmsg->cmsg_len = CMSG_LEN(sizeof(int));
161 cmsg->cmsg_level = SOL_SOCKET;
162 cmsg->cmsg_type = SCM_RIGHTS;
164 *((int *)CMSG_DATA(cmsg)) = fd;
167 do {
168 ret = sendmsg(sock, &msg, 0);
169 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
171 g_warn_if_fail(ret == buflen);
172 return ret;
175 void
176 vg_send_msg(VuGpu *vg, const VhostUserGpuMsg *msg, int fd)
178 if (vg_sock_fd_write(vg->sock_fd, msg,
179 VHOST_USER_GPU_HDR_SIZE + msg->size, fd) < 0) {
180 vg_sock_fd_close(vg);
184 bool
185 vg_recv_msg(VuGpu *g, uint32_t expect_req, uint32_t expect_size,
186 gpointer payload)
188 uint32_t req, flags, size;
190 if (vg_sock_fd_read(g->sock_fd, &req, sizeof(req)) < 0 ||
191 vg_sock_fd_read(g->sock_fd, &flags, sizeof(flags)) < 0 ||
192 vg_sock_fd_read(g->sock_fd, &size, sizeof(size)) < 0) {
193 goto err;
196 g_return_val_if_fail(req == expect_req, false);
197 g_return_val_if_fail(flags & VHOST_USER_GPU_MSG_FLAG_REPLY, false);
198 g_return_val_if_fail(size == expect_size, false);
200 if (size && vg_sock_fd_read(g->sock_fd, payload, size) != size) {
201 goto err;
204 return true;
206 err:
207 vg_sock_fd_close(g);
208 return false;
211 static struct virtio_gpu_simple_resource *
212 virtio_gpu_find_resource(VuGpu *g, uint32_t resource_id)
214 struct virtio_gpu_simple_resource *res;
216 QTAILQ_FOREACH(res, &g->reslist, next) {
217 if (res->resource_id == resource_id) {
218 return res;
221 return NULL;
224 void
225 vg_ctrl_response(VuGpu *g,
226 struct virtio_gpu_ctrl_command *cmd,
227 struct virtio_gpu_ctrl_hdr *resp,
228 size_t resp_len)
230 size_t s;
232 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
233 resp->flags |= VIRTIO_GPU_FLAG_FENCE;
234 resp->fence_id = cmd->cmd_hdr.fence_id;
235 resp->ctx_id = cmd->cmd_hdr.ctx_id;
237 virtio_gpu_ctrl_hdr_bswap(resp);
238 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
239 if (s != resp_len) {
240 g_critical("%s: response size incorrect %zu vs %zu",
241 __func__, s, resp_len);
243 vu_queue_push(&g->dev.parent, cmd->vq, &cmd->elem, s);
244 vu_queue_notify(&g->dev.parent, cmd->vq);
245 cmd->finished = true;
248 void
249 vg_ctrl_response_nodata(VuGpu *g,
250 struct virtio_gpu_ctrl_command *cmd,
251 enum virtio_gpu_ctrl_type type)
253 struct virtio_gpu_ctrl_hdr resp = {
254 .type = type,
257 vg_ctrl_response(g, cmd, &resp, sizeof(resp));
260 void
261 vg_get_display_info(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd)
263 struct virtio_gpu_resp_display_info dpy_info = { {} };
264 VhostUserGpuMsg msg = {
265 .request = VHOST_USER_GPU_GET_DISPLAY_INFO,
266 .size = 0,
269 assert(vg->wait_ok == 0);
271 vg_send_msg(vg, &msg, -1);
272 if (!vg_recv_msg(vg, msg.request, sizeof(dpy_info), &dpy_info)) {
273 return;
276 vg_ctrl_response(vg, cmd, &dpy_info.hdr, sizeof(dpy_info));
279 static void
280 vg_resource_create_2d(VuGpu *g,
281 struct virtio_gpu_ctrl_command *cmd)
283 pixman_format_code_t pformat;
284 struct virtio_gpu_simple_resource *res;
285 struct virtio_gpu_resource_create_2d c2d;
287 VUGPU_FILL_CMD(c2d);
288 virtio_gpu_bswap_32(&c2d, sizeof(c2d));
290 if (c2d.resource_id == 0) {
291 g_critical("%s: resource id 0 is not allowed", __func__);
292 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
293 return;
296 res = virtio_gpu_find_resource(g, c2d.resource_id);
297 if (res) {
298 g_critical("%s: resource already exists %d", __func__, c2d.resource_id);
299 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
300 return;
303 res = g_new0(struct virtio_gpu_simple_resource, 1);
304 res->width = c2d.width;
305 res->height = c2d.height;
306 res->format = c2d.format;
307 res->resource_id = c2d.resource_id;
309 pformat = virtio_gpu_get_pixman_format(c2d.format);
310 if (!pformat) {
311 g_critical("%s: host couldn't handle guest format %d",
312 __func__, c2d.format);
313 g_free(res);
314 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
315 return;
317 vugbm_buffer_create(&res->buffer, &g->gdev, c2d.width, c2d.height);
318 res->image = pixman_image_create_bits(pformat,
319 c2d.width,
320 c2d.height,
321 (uint32_t *)res->buffer.mmap,
322 res->buffer.stride);
323 if (!res->image) {
324 g_critical("%s: resource creation failed %d %d %d",
325 __func__, c2d.resource_id, c2d.width, c2d.height);
326 g_free(res);
327 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
328 return;
331 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
334 static void
335 vg_disable_scanout(VuGpu *g, int scanout_id)
337 struct virtio_gpu_scanout *scanout = &g->scanout[scanout_id];
338 struct virtio_gpu_simple_resource *res;
340 if (scanout->resource_id == 0) {
341 return;
344 res = virtio_gpu_find_resource(g, scanout->resource_id);
345 if (res) {
346 res->scanout_bitmask &= ~(1 << scanout_id);
349 scanout->width = 0;
350 scanout->height = 0;
352 if (g->sock_fd >= 0) {
353 VhostUserGpuMsg msg = {
354 .request = VHOST_USER_GPU_SCANOUT,
355 .size = sizeof(VhostUserGpuScanout),
356 .payload.scanout.scanout_id = scanout_id,
358 vg_send_msg(g, &msg, -1);
362 static void
363 vg_resource_destroy(VuGpu *g,
364 struct virtio_gpu_simple_resource *res)
366 int i;
368 if (res->scanout_bitmask) {
369 for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) {
370 if (res->scanout_bitmask & (1 << i)) {
371 vg_disable_scanout(g, i);
376 vugbm_buffer_destroy(&res->buffer);
377 pixman_image_unref(res->image);
378 QTAILQ_REMOVE(&g->reslist, res, next);
379 g_free(res);
382 static void
383 vg_resource_unref(VuGpu *g,
384 struct virtio_gpu_ctrl_command *cmd)
386 struct virtio_gpu_simple_resource *res;
387 struct virtio_gpu_resource_unref unref;
389 VUGPU_FILL_CMD(unref);
390 virtio_gpu_bswap_32(&unref, sizeof(unref));
392 res = virtio_gpu_find_resource(g, unref.resource_id);
393 if (!res) {
394 g_critical("%s: illegal resource specified %d",
395 __func__, unref.resource_id);
396 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
397 return;
399 vg_resource_destroy(g, res);
403 vg_create_mapping_iov(VuGpu *g,
404 struct virtio_gpu_resource_attach_backing *ab,
405 struct virtio_gpu_ctrl_command *cmd,
406 struct iovec **iov)
408 struct virtio_gpu_mem_entry *ents;
409 size_t esize, s;
410 int i;
412 if (ab->nr_entries > 16384) {
413 g_critical("%s: nr_entries is too big (%d > 16384)",
414 __func__, ab->nr_entries);
415 return -1;
418 esize = sizeof(*ents) * ab->nr_entries;
419 ents = g_malloc(esize);
420 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
421 sizeof(*ab), ents, esize);
422 if (s != esize) {
423 g_critical("%s: command data size incorrect %zu vs %zu",
424 __func__, s, esize);
425 g_free(ents);
426 return -1;
429 *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries);
430 for (i = 0; i < ab->nr_entries; i++) {
431 uint64_t len = ents[i].length;
432 (*iov)[i].iov_len = ents[i].length;
433 (*iov)[i].iov_base = vu_gpa_to_va(&g->dev.parent, &len, ents[i].addr);
434 if (!(*iov)[i].iov_base || len != ents[i].length) {
435 g_critical("%s: resource %d element %d",
436 __func__, ab->resource_id, i);
437 g_free(*iov);
438 g_free(ents);
439 *iov = NULL;
440 return -1;
443 g_free(ents);
444 return 0;
447 static void
448 vg_resource_attach_backing(VuGpu *g,
449 struct virtio_gpu_ctrl_command *cmd)
451 struct virtio_gpu_simple_resource *res;
452 struct virtio_gpu_resource_attach_backing ab;
453 int ret;
455 VUGPU_FILL_CMD(ab);
456 virtio_gpu_bswap_32(&ab, sizeof(ab));
458 res = virtio_gpu_find_resource(g, ab.resource_id);
459 if (!res) {
460 g_critical("%s: illegal resource specified %d",
461 __func__, ab.resource_id);
462 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
463 return;
466 ret = vg_create_mapping_iov(g, &ab, cmd, &res->iov);
467 if (ret != 0) {
468 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
469 return;
472 res->iov_cnt = ab.nr_entries;
475 static void
476 vg_resource_detach_backing(VuGpu *g,
477 struct virtio_gpu_ctrl_command *cmd)
479 struct virtio_gpu_simple_resource *res;
480 struct virtio_gpu_resource_detach_backing detach;
482 VUGPU_FILL_CMD(detach);
483 virtio_gpu_bswap_32(&detach, sizeof(detach));
485 res = virtio_gpu_find_resource(g, detach.resource_id);
486 if (!res || !res->iov) {
487 g_critical("%s: illegal resource specified %d",
488 __func__, detach.resource_id);
489 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
490 return;
493 g_free(res->iov);
494 res->iov = NULL;
495 res->iov_cnt = 0;
498 static void
499 vg_transfer_to_host_2d(VuGpu *g,
500 struct virtio_gpu_ctrl_command *cmd)
502 struct virtio_gpu_simple_resource *res;
503 int h;
504 uint32_t src_offset, dst_offset, stride;
505 int bpp;
506 pixman_format_code_t format;
507 struct virtio_gpu_transfer_to_host_2d t2d;
509 VUGPU_FILL_CMD(t2d);
510 virtio_gpu_t2d_bswap(&t2d);
512 res = virtio_gpu_find_resource(g, t2d.resource_id);
513 if (!res || !res->iov) {
514 g_critical("%s: illegal resource specified %d",
515 __func__, t2d.resource_id);
516 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
517 return;
520 if (t2d.r.x > res->width ||
521 t2d.r.y > res->height ||
522 t2d.r.width > res->width ||
523 t2d.r.height > res->height ||
524 t2d.r.x + t2d.r.width > res->width ||
525 t2d.r.y + t2d.r.height > res->height) {
526 g_critical("%s: transfer bounds outside resource"
527 " bounds for resource %d: %d %d %d %d vs %d %d",
528 __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
529 t2d.r.width, t2d.r.height, res->width, res->height);
530 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
531 return;
534 format = pixman_image_get_format(res->image);
535 bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8;
536 stride = pixman_image_get_stride(res->image);
538 if (t2d.offset || t2d.r.x || t2d.r.y ||
539 t2d.r.width != pixman_image_get_width(res->image)) {
540 void *img_data = pixman_image_get_data(res->image);
541 for (h = 0; h < t2d.r.height; h++) {
542 src_offset = t2d.offset + stride * h;
543 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
545 iov_to_buf(res->iov, res->iov_cnt, src_offset,
546 img_data
547 + dst_offset, t2d.r.width * bpp);
549 } else {
550 iov_to_buf(res->iov, res->iov_cnt, 0,
551 pixman_image_get_data(res->image),
552 pixman_image_get_stride(res->image)
553 * pixman_image_get_height(res->image));
557 static void
558 vg_set_scanout(VuGpu *g,
559 struct virtio_gpu_ctrl_command *cmd)
561 struct virtio_gpu_simple_resource *res, *ores;
562 struct virtio_gpu_scanout *scanout;
563 struct virtio_gpu_set_scanout ss;
564 int fd;
566 VUGPU_FILL_CMD(ss);
567 virtio_gpu_bswap_32(&ss, sizeof(ss));
569 if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUTS) {
570 g_critical("%s: illegal scanout id specified %d",
571 __func__, ss.scanout_id);
572 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
573 return;
576 if (ss.resource_id == 0) {
577 vg_disable_scanout(g, ss.scanout_id);
578 return;
581 /* create a surface for this scanout */
582 res = virtio_gpu_find_resource(g, ss.resource_id);
583 if (!res) {
584 g_critical("%s: illegal resource specified %d",
585 __func__, ss.resource_id);
586 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
587 return;
590 if (ss.r.x > res->width ||
591 ss.r.y > res->height ||
592 ss.r.width > res->width ||
593 ss.r.height > res->height ||
594 ss.r.x + ss.r.width > res->width ||
595 ss.r.y + ss.r.height > res->height) {
596 g_critical("%s: illegal scanout %d bounds for"
597 " resource %d, (%d,%d)+%d,%d vs %d %d",
598 __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y,
599 ss.r.width, ss.r.height, res->width, res->height);
600 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
601 return;
604 scanout = &g->scanout[ss.scanout_id];
606 ores = virtio_gpu_find_resource(g, scanout->resource_id);
607 if (ores) {
608 ores->scanout_bitmask &= ~(1 << ss.scanout_id);
611 res->scanout_bitmask |= (1 << ss.scanout_id);
612 scanout->resource_id = ss.resource_id;
613 scanout->x = ss.r.x;
614 scanout->y = ss.r.y;
615 scanout->width = ss.r.width;
616 scanout->height = ss.r.height;
618 struct vugbm_buffer *buffer = &res->buffer;
620 if (vugbm_buffer_can_get_dmabuf_fd(buffer)) {
621 VhostUserGpuMsg msg = {
622 .request = VHOST_USER_GPU_DMABUF_SCANOUT,
623 .size = sizeof(VhostUserGpuDMABUFScanout),
624 .payload.dmabuf_scanout = (VhostUserGpuDMABUFScanout) {
625 .scanout_id = ss.scanout_id,
626 .x = ss.r.x,
627 .y = ss.r.y,
628 .width = ss.r.width,
629 .height = ss.r.height,
630 .fd_width = buffer->width,
631 .fd_height = buffer->height,
632 .fd_stride = buffer->stride,
633 .fd_drm_fourcc = buffer->format
637 if (vugbm_buffer_get_dmabuf_fd(buffer, &fd)) {
638 vg_send_msg(g, &msg, fd);
639 close(fd);
641 } else {
642 VhostUserGpuMsg msg = {
643 .request = VHOST_USER_GPU_SCANOUT,
644 .size = sizeof(VhostUserGpuScanout),
645 .payload.scanout = (VhostUserGpuScanout) {
646 .scanout_id = ss.scanout_id,
647 .width = scanout->width,
648 .height = scanout->height
651 vg_send_msg(g, &msg, -1);
655 static void
656 vg_resource_flush(VuGpu *g,
657 struct virtio_gpu_ctrl_command *cmd)
659 struct virtio_gpu_simple_resource *res;
660 struct virtio_gpu_resource_flush rf;
661 pixman_region16_t flush_region;
662 int i;
664 VUGPU_FILL_CMD(rf);
665 virtio_gpu_bswap_32(&rf, sizeof(rf));
667 res = virtio_gpu_find_resource(g, rf.resource_id);
668 if (!res) {
669 g_critical("%s: illegal resource specified %d\n",
670 __func__, rf.resource_id);
671 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
672 return;
675 if (rf.r.x > res->width ||
676 rf.r.y > res->height ||
677 rf.r.width > res->width ||
678 rf.r.height > res->height ||
679 rf.r.x + rf.r.width > res->width ||
680 rf.r.y + rf.r.height > res->height) {
681 g_critical("%s: flush bounds outside resource"
682 " bounds for resource %d: %d %d %d %d vs %d %d\n",
683 __func__, rf.resource_id, rf.r.x, rf.r.y,
684 rf.r.width, rf.r.height, res->width, res->height);
685 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
686 return;
689 pixman_region_init_rect(&flush_region,
690 rf.r.x, rf.r.y, rf.r.width, rf.r.height);
691 for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) {
692 struct virtio_gpu_scanout *scanout;
693 pixman_region16_t region, finalregion;
694 pixman_box16_t *extents;
696 if (!(res->scanout_bitmask & (1 << i))) {
697 continue;
699 scanout = &g->scanout[i];
701 pixman_region_init(&finalregion);
702 pixman_region_init_rect(&region, scanout->x, scanout->y,
703 scanout->width, scanout->height);
705 pixman_region_intersect(&finalregion, &flush_region, &region);
707 extents = pixman_region_extents(&finalregion);
708 size_t width = extents->x2 - extents->x1;
709 size_t height = extents->y2 - extents->y1;
711 if (vugbm_buffer_can_get_dmabuf_fd(&res->buffer)) {
712 VhostUserGpuMsg vmsg = {
713 .request = VHOST_USER_GPU_DMABUF_UPDATE,
714 .size = sizeof(VhostUserGpuUpdate),
715 .payload.update = (VhostUserGpuUpdate) {
716 .scanout_id = i,
717 .x = extents->x1,
718 .y = extents->y1,
719 .width = width,
720 .height = height,
723 vg_send_msg(g, &vmsg, -1);
724 vg_wait_ok(g);
725 } else {
726 size_t bpp =
727 PIXMAN_FORMAT_BPP(pixman_image_get_format(res->image)) / 8;
728 size_t size = width * height * bpp;
730 void *p = g_malloc(VHOST_USER_GPU_HDR_SIZE +
731 sizeof(VhostUserGpuUpdate) + size);
732 VhostUserGpuMsg *msg = p;
733 msg->request = VHOST_USER_GPU_UPDATE;
734 msg->size = sizeof(VhostUserGpuUpdate) + size;
735 msg->payload.update = (VhostUserGpuUpdate) {
736 .scanout_id = i,
737 .x = extents->x1,
738 .y = extents->y1,
739 .width = width,
740 .height = height,
742 pixman_image_t *i =
743 pixman_image_create_bits(pixman_image_get_format(res->image),
744 msg->payload.update.width,
745 msg->payload.update.height,
746 p + offsetof(VhostUserGpuMsg,
747 payload.update.data),
748 width * bpp);
749 pixman_image_composite(PIXMAN_OP_SRC,
750 res->image, NULL, i,
751 extents->x1, extents->y1,
752 0, 0, 0, 0,
753 width, height);
754 pixman_image_unref(i);
755 vg_send_msg(g, msg, -1);
756 g_free(msg);
758 pixman_region_fini(&region);
759 pixman_region_fini(&finalregion);
761 pixman_region_fini(&flush_region);
764 static void
765 vg_process_cmd(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd)
767 switch (cmd->cmd_hdr.type) {
768 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
769 vg_get_display_info(vg, cmd);
770 break;
771 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
772 vg_resource_create_2d(vg, cmd);
773 break;
774 case VIRTIO_GPU_CMD_RESOURCE_UNREF:
775 vg_resource_unref(vg, cmd);
776 break;
777 case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
778 vg_resource_flush(vg, cmd);
779 break;
780 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
781 vg_transfer_to_host_2d(vg, cmd);
782 break;
783 case VIRTIO_GPU_CMD_SET_SCANOUT:
784 vg_set_scanout(vg, cmd);
785 break;
786 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
787 vg_resource_attach_backing(vg, cmd);
788 break;
789 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
790 vg_resource_detach_backing(vg, cmd);
791 break;
792 /* case VIRTIO_GPU_CMD_GET_EDID: */
793 /* break */
794 default:
795 g_warning("TODO handle ctrl %x\n", cmd->cmd_hdr.type);
796 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
797 break;
799 if (!cmd->finished) {
800 vg_ctrl_response_nodata(vg, cmd, cmd->error ? cmd->error :
801 VIRTIO_GPU_RESP_OK_NODATA);
805 static void
806 vg_handle_ctrl(VuDev *dev, int qidx)
808 VuGpu *vg = container_of(dev, VuGpu, dev.parent);
809 VuVirtq *vq = vu_get_queue(dev, qidx);
810 struct virtio_gpu_ctrl_command *cmd = NULL;
811 size_t len;
813 for (;;) {
814 if (vg->wait_ok != 0) {
815 return;
818 cmd = vu_queue_pop(dev, vq, sizeof(struct virtio_gpu_ctrl_command));
819 if (!cmd) {
820 break;
822 cmd->vq = vq;
823 cmd->error = 0;
824 cmd->finished = false;
826 len = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
827 0, &cmd->cmd_hdr, sizeof(cmd->cmd_hdr));
828 if (len != sizeof(cmd->cmd_hdr)) {
829 g_warning("%s: command size incorrect %zu vs %zu\n",
830 __func__, len, sizeof(cmd->cmd_hdr));
833 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
834 g_debug("%d %s\n", cmd->cmd_hdr.type,
835 vg_cmd_to_string(cmd->cmd_hdr.type));
837 if (vg->virgl) {
838 vg_virgl_process_cmd(vg, cmd);
839 } else {
840 vg_process_cmd(vg, cmd);
843 if (!cmd->finished) {
844 QTAILQ_INSERT_TAIL(&vg->fenceq, cmd, next);
845 vg->inflight++;
846 } else {
847 g_free(cmd);
852 static void
853 update_cursor_data_simple(VuGpu *g, uint32_t resource_id, gpointer data)
855 struct virtio_gpu_simple_resource *res;
857 res = virtio_gpu_find_resource(g, resource_id);
858 g_return_if_fail(res != NULL);
859 g_return_if_fail(pixman_image_get_width(res->image) == 64);
860 g_return_if_fail(pixman_image_get_height(res->image) == 64);
861 g_return_if_fail(
862 PIXMAN_FORMAT_BPP(pixman_image_get_format(res->image)) == 32);
864 memcpy(data, pixman_image_get_data(res->image), 64 * 64 * sizeof(uint32_t));
867 static void
868 vg_process_cursor_cmd(VuGpu *g, struct virtio_gpu_update_cursor *cursor)
870 bool move = cursor->hdr.type != VIRTIO_GPU_CMD_MOVE_CURSOR;
872 g_debug("%s move:%d\n", G_STRFUNC, move);
874 if (move) {
875 VhostUserGpuMsg msg = {
876 .request = cursor->resource_id ?
877 VHOST_USER_GPU_CURSOR_POS : VHOST_USER_GPU_CURSOR_POS_HIDE,
878 .size = sizeof(VhostUserGpuCursorPos),
879 .payload.cursor_pos = {
880 .scanout_id = cursor->pos.scanout_id,
881 .x = cursor->pos.x,
882 .y = cursor->pos.y,
885 vg_send_msg(g, &msg, -1);
886 } else {
887 VhostUserGpuMsg msg = {
888 .request = VHOST_USER_GPU_CURSOR_UPDATE,
889 .size = sizeof(VhostUserGpuCursorUpdate),
890 .payload.cursor_update = {
891 .pos = {
892 .scanout_id = cursor->pos.scanout_id,
893 .x = cursor->pos.x,
894 .y = cursor->pos.y,
896 .hot_x = cursor->hot_x,
897 .hot_y = cursor->hot_y,
900 if (g->virgl) {
901 vg_virgl_update_cursor_data(g, cursor->resource_id,
902 msg.payload.cursor_update.data);
903 } else {
904 update_cursor_data_simple(g, cursor->resource_id,
905 msg.payload.cursor_update.data);
907 vg_send_msg(g, &msg, -1);
911 static void
912 vg_handle_cursor(VuDev *dev, int qidx)
914 VuGpu *g = container_of(dev, VuGpu, dev.parent);
915 VuVirtq *vq = vu_get_queue(dev, qidx);
916 VuVirtqElement *elem;
917 size_t len;
918 struct virtio_gpu_update_cursor cursor;
920 for (;;) {
921 elem = vu_queue_pop(dev, vq, sizeof(VuVirtqElement));
922 if (!elem) {
923 break;
925 g_debug("cursor out:%d in:%d\n", elem->out_num, elem->in_num);
927 len = iov_to_buf(elem->out_sg, elem->out_num,
928 0, &cursor, sizeof(cursor));
929 if (len != sizeof(cursor)) {
930 g_warning("%s: cursor size incorrect %zu vs %zu\n",
931 __func__, len, sizeof(cursor));
932 } else {
933 virtio_gpu_bswap_32(&cursor, sizeof(cursor));
934 vg_process_cursor_cmd(g, &cursor);
936 vu_queue_push(dev, vq, elem, 0);
937 vu_queue_notify(dev, vq);
938 g_free(elem);
942 static void
943 vg_panic(VuDev *dev, const char *msg)
945 g_critical("%s\n", msg);
946 exit(1);
949 static void
950 vg_queue_set_started(VuDev *dev, int qidx, bool started)
952 VuVirtq *vq = vu_get_queue(dev, qidx);
954 g_debug("queue started %d:%d\n", qidx, started);
956 switch (qidx) {
957 case 0:
958 vu_set_queue_handler(dev, vq, started ? vg_handle_ctrl : NULL);
959 break;
960 case 1:
961 vu_set_queue_handler(dev, vq, started ? vg_handle_cursor : NULL);
962 break;
963 default:
964 break;
968 static void
969 set_gpu_protocol_features(VuGpu *g)
971 uint64_t u64;
972 VhostUserGpuMsg msg = {
973 .request = VHOST_USER_GPU_GET_PROTOCOL_FEATURES
976 assert(g->wait_ok == 0);
977 vg_send_msg(g, &msg, -1);
978 if (!vg_recv_msg(g, msg.request, sizeof(u64), &u64)) {
979 return;
982 msg = (VhostUserGpuMsg) {
983 .request = VHOST_USER_GPU_SET_PROTOCOL_FEATURES,
984 .size = sizeof(uint64_t),
985 .payload.u64 = 0
987 vg_send_msg(g, &msg, -1);
990 static int
991 vg_process_msg(VuDev *dev, VhostUserMsg *msg, int *do_reply)
993 VuGpu *g = container_of(dev, VuGpu, dev.parent);
995 switch (msg->request) {
996 case VHOST_USER_GPU_SET_SOCKET: {
997 g_return_val_if_fail(msg->fd_num == 1, 1);
998 g_return_val_if_fail(g->sock_fd == -1, 1);
999 g->sock_fd = msg->fds[0];
1000 set_gpu_protocol_features(g);
1001 return 1;
1003 default:
1004 return 0;
1007 return 0;
1010 static uint64_t
1011 vg_get_features(VuDev *dev)
1013 uint64_t features = 0;
1015 if (opt_virgl) {
1016 features |= 1 << VIRTIO_GPU_F_VIRGL;
1019 return features;
1022 static void
1023 vg_set_features(VuDev *dev, uint64_t features)
1025 VuGpu *g = container_of(dev, VuGpu, dev.parent);
1026 bool virgl = features & (1 << VIRTIO_GPU_F_VIRGL);
1028 if (virgl && !g->virgl_inited) {
1029 if (!vg_virgl_init(g)) {
1030 vg_panic(dev, "Failed to initialize virgl");
1032 g->virgl_inited = true;
1035 g->virgl = virgl;
1038 static int
1039 vg_get_config(VuDev *dev, uint8_t *config, uint32_t len)
1041 VuGpu *g = container_of(dev, VuGpu, dev.parent);
1043 g_return_val_if_fail(len <= sizeof(struct virtio_gpu_config), -1);
1045 if (opt_virgl) {
1046 g->virtio_config.num_capsets = vg_virgl_get_num_capsets();
1049 memcpy(config, &g->virtio_config, len);
1051 return 0;
1054 static int
1055 vg_set_config(VuDev *dev, const uint8_t *data,
1056 uint32_t offset, uint32_t size,
1057 uint32_t flags)
1059 VuGpu *g = container_of(dev, VuGpu, dev.parent);
1060 struct virtio_gpu_config *config = (struct virtio_gpu_config *)data;
1062 if (config->events_clear) {
1063 g->virtio_config.events_read &= ~config->events_clear;
1066 return 0;
1069 static const VuDevIface vuiface = {
1070 .set_features = vg_set_features,
1071 .get_features = vg_get_features,
1072 .queue_set_started = vg_queue_set_started,
1073 .process_msg = vg_process_msg,
1074 .get_config = vg_get_config,
1075 .set_config = vg_set_config,
1078 static void
1079 vg_destroy(VuGpu *g)
1081 struct virtio_gpu_simple_resource *res, *tmp;
1083 vug_deinit(&g->dev);
1085 vg_sock_fd_close(g);
1087 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
1088 vg_resource_destroy(g, res);
1091 vugbm_device_destroy(&g->gdev);
1094 static GOptionEntry entries[] = {
1095 { "print-capabilities", 'c', 0, G_OPTION_ARG_NONE, &opt_print_caps,
1096 "Print capabilities", NULL },
1097 { "fd", 'f', 0, G_OPTION_ARG_INT, &opt_fdnum,
1098 "Use inherited fd socket", "FDNUM" },
1099 { "socket-path", 's', 0, G_OPTION_ARG_FILENAME, &opt_socket_path,
1100 "Use UNIX socket path", "PATH" },
1101 { "render-node", 'r', 0, G_OPTION_ARG_FILENAME, &opt_render_node,
1102 "Specify DRM render node", "PATH" },
1103 { "virgl", 'v', 0, G_OPTION_ARG_NONE, &opt_virgl,
1104 "Turn virgl rendering on", NULL },
1105 { NULL, }
1109 main(int argc, char *argv[])
1111 GOptionContext *context;
1112 GError *error = NULL;
1113 GMainLoop *loop = NULL;
1114 int fd;
1115 VuGpu g = { .sock_fd = -1, .drm_rnode_fd = -1 };
1117 QTAILQ_INIT(&g.reslist);
1118 QTAILQ_INIT(&g.fenceq);
1120 context = g_option_context_new("QEMU vhost-user-gpu");
1121 g_option_context_add_main_entries(context, entries, NULL);
1122 if (!g_option_context_parse(context, &argc, &argv, &error)) {
1123 g_printerr("Option parsing failed: %s\n", error->message);
1124 exit(EXIT_FAILURE);
1126 g_option_context_free(context);
1128 if (opt_print_caps) {
1129 g_print("{\n");
1130 g_print(" \"type\": \"gpu\",\n");
1131 g_print(" \"features\": [\n");
1132 g_print(" \"render-node\",\n");
1133 g_print(" \"virgl\"\n");
1134 g_print(" ]\n");
1135 g_print("}\n");
1136 exit(EXIT_SUCCESS);
1139 g.drm_rnode_fd = qemu_drm_rendernode_open(opt_render_node);
1140 if (opt_render_node && g.drm_rnode_fd == -1) {
1141 g_printerr("Failed to open DRM rendernode.\n");
1142 exit(EXIT_FAILURE);
1145 if (g.drm_rnode_fd >= 0) {
1146 if (!vugbm_device_init(&g.gdev, g.drm_rnode_fd)) {
1147 g_warning("Failed to init DRM device, using fallback path");
1151 if ((!!opt_socket_path + (opt_fdnum != -1)) != 1) {
1152 g_printerr("Please specify either --fd or --socket-path\n");
1153 exit(EXIT_FAILURE);
1156 if (opt_socket_path) {
1157 int lsock = unix_listen(opt_socket_path, &error_fatal);
1158 if (lsock < 0) {
1159 g_printerr("Failed to listen on %s.\n", opt_socket_path);
1160 exit(EXIT_FAILURE);
1162 fd = accept(lsock, NULL, NULL);
1163 close(lsock);
1164 } else {
1165 fd = opt_fdnum;
1167 if (fd == -1) {
1168 g_printerr("Invalid vhost-user socket.\n");
1169 exit(EXIT_FAILURE);
1172 vug_init(&g.dev, fd, vg_panic, &vuiface);
1174 loop = g_main_loop_new(NULL, FALSE);
1175 g_main_loop_run(loop);
1176 g_main_loop_unref(loop);
1178 vg_destroy(&g);
1179 if (g.drm_rnode_fd >= 0) {
1180 close(g.drm_rnode_fd);
1183 return 0;