Merge remote-tracking branch 'remotes/armbru/tags/pull-build-2019-07-02-v2' into...
[qemu/ar7.git] / hw / display / vhost-user-gpu.c
blob7181d9cdba1855ce0d58069b4810cdaeebe0112c
1 /*
2 * vhost-user GPU Device
4 * Copyright Red Hat, Inc. 2018
6 * Authors:
7 * Marc-André Lureau <marcandre.lureau@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
13 #include "qemu/osdep.h"
14 #include "hw/virtio/virtio-gpu.h"
15 #include "chardev/char-fe.h"
16 #include "qapi/error.h"
17 #include "migration/blocker.h"
19 #define VHOST_USER_GPU(obj) \
20 OBJECT_CHECK(VhostUserGPU, (obj), TYPE_VHOST_USER_GPU)
22 typedef enum VhostUserGpuRequest {
23 VHOST_USER_GPU_NONE = 0,
24 VHOST_USER_GPU_GET_PROTOCOL_FEATURES,
25 VHOST_USER_GPU_SET_PROTOCOL_FEATURES,
26 VHOST_USER_GPU_GET_DISPLAY_INFO,
27 VHOST_USER_GPU_CURSOR_POS,
28 VHOST_USER_GPU_CURSOR_POS_HIDE,
29 VHOST_USER_GPU_CURSOR_UPDATE,
30 VHOST_USER_GPU_SCANOUT,
31 VHOST_USER_GPU_UPDATE,
32 VHOST_USER_GPU_DMABUF_SCANOUT,
33 VHOST_USER_GPU_DMABUF_UPDATE,
34 } VhostUserGpuRequest;
36 typedef struct VhostUserGpuDisplayInfoReply {
37 struct virtio_gpu_resp_display_info info;
38 } VhostUserGpuDisplayInfoReply;
40 typedef struct VhostUserGpuCursorPos {
41 uint32_t scanout_id;
42 uint32_t x;
43 uint32_t y;
44 } QEMU_PACKED VhostUserGpuCursorPos;
46 typedef struct VhostUserGpuCursorUpdate {
47 VhostUserGpuCursorPos pos;
48 uint32_t hot_x;
49 uint32_t hot_y;
50 uint32_t data[64 * 64];
51 } QEMU_PACKED VhostUserGpuCursorUpdate;
53 typedef struct VhostUserGpuScanout {
54 uint32_t scanout_id;
55 uint32_t width;
56 uint32_t height;
57 } QEMU_PACKED VhostUserGpuScanout;
59 typedef struct VhostUserGpuUpdate {
60 uint32_t scanout_id;
61 uint32_t x;
62 uint32_t y;
63 uint32_t width;
64 uint32_t height;
65 uint8_t data[];
66 } QEMU_PACKED VhostUserGpuUpdate;
68 typedef struct VhostUserGpuDMABUFScanout {
69 uint32_t scanout_id;
70 uint32_t x;
71 uint32_t y;
72 uint32_t width;
73 uint32_t height;
74 uint32_t fd_width;
75 uint32_t fd_height;
76 uint32_t fd_stride;
77 uint32_t fd_flags;
78 int fd_drm_fourcc;
79 } QEMU_PACKED VhostUserGpuDMABUFScanout;
81 typedef struct VhostUserGpuMsg {
82 uint32_t request; /* VhostUserGpuRequest */
83 uint32_t flags;
84 uint32_t size; /* the following payload size */
85 union {
86 VhostUserGpuCursorPos cursor_pos;
87 VhostUserGpuCursorUpdate cursor_update;
88 VhostUserGpuScanout scanout;
89 VhostUserGpuUpdate update;
90 VhostUserGpuDMABUFScanout dmabuf_scanout;
91 struct virtio_gpu_resp_display_info display_info;
92 uint64_t u64;
93 } payload;
94 } QEMU_PACKED VhostUserGpuMsg;
96 static VhostUserGpuMsg m __attribute__ ((unused));
97 #define VHOST_USER_GPU_HDR_SIZE \
98 (sizeof(m.request) + sizeof(m.size) + sizeof(m.flags))
100 #define VHOST_USER_GPU_MSG_FLAG_REPLY 0x4
102 static void vhost_user_gpu_update_blocked(VhostUserGPU *g, bool blocked);
104 static void
105 vhost_user_gpu_handle_cursor(VhostUserGPU *g, VhostUserGpuMsg *msg)
107 VhostUserGpuCursorPos *pos = &msg->payload.cursor_pos;
108 struct virtio_gpu_scanout *s;
110 if (pos->scanout_id >= g->parent_obj.conf.max_outputs) {
111 return;
113 s = &g->parent_obj.scanout[pos->scanout_id];
115 if (msg->request == VHOST_USER_GPU_CURSOR_UPDATE) {
116 VhostUserGpuCursorUpdate *up = &msg->payload.cursor_update;
117 if (!s->current_cursor) {
118 s->current_cursor = cursor_alloc(64, 64);
121 s->current_cursor->hot_x = up->hot_x;
122 s->current_cursor->hot_y = up->hot_y;
124 memcpy(s->current_cursor->data, up->data,
125 64 * 64 * sizeof(uint32_t));
127 dpy_cursor_define(s->con, s->current_cursor);
130 dpy_mouse_set(s->con, pos->x, pos->y,
131 msg->request != VHOST_USER_GPU_CURSOR_POS_HIDE);
134 static void
135 vhost_user_gpu_send_msg(VhostUserGPU *g, const VhostUserGpuMsg *msg)
137 qemu_chr_fe_write(&g->vhost_chr, (uint8_t *)msg,
138 VHOST_USER_GPU_HDR_SIZE + msg->size);
141 static void
142 vhost_user_gpu_unblock(VhostUserGPU *g)
144 VhostUserGpuMsg msg = {
145 .request = VHOST_USER_GPU_DMABUF_UPDATE,
146 .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
149 vhost_user_gpu_send_msg(g, &msg);
152 static void
153 vhost_user_gpu_handle_display(VhostUserGPU *g, VhostUserGpuMsg *msg)
155 QemuConsole *con = NULL;
156 struct virtio_gpu_scanout *s;
158 switch (msg->request) {
159 case VHOST_USER_GPU_GET_PROTOCOL_FEATURES: {
160 VhostUserGpuMsg reply = {
161 .request = msg->request,
162 .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
163 .size = sizeof(uint64_t),
166 vhost_user_gpu_send_msg(g, &reply);
167 break;
169 case VHOST_USER_GPU_SET_PROTOCOL_FEATURES: {
170 break;
172 case VHOST_USER_GPU_GET_DISPLAY_INFO: {
173 struct virtio_gpu_resp_display_info display_info = { {} };
174 VhostUserGpuMsg reply = {
175 .request = msg->request,
176 .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
177 .size = sizeof(struct virtio_gpu_resp_display_info),
180 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
181 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
182 memcpy(&reply.payload.display_info, &display_info,
183 sizeof(display_info));
184 vhost_user_gpu_send_msg(g, &reply);
185 break;
187 case VHOST_USER_GPU_SCANOUT: {
188 VhostUserGpuScanout *m = &msg->payload.scanout;
190 if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
191 return;
194 g->parent_obj.enable = 1;
195 s = &g->parent_obj.scanout[m->scanout_id];
196 con = s->con;
198 if (m->scanout_id == 0 && m->width == 0) {
199 s->ds = qemu_create_message_surface(640, 480,
200 "Guest disabled display.");
201 dpy_gfx_replace_surface(con, s->ds);
202 } else {
203 s->ds = qemu_create_displaysurface(m->width, m->height);
204 /* replace surface on next update */
207 break;
209 case VHOST_USER_GPU_DMABUF_SCANOUT: {
210 VhostUserGpuDMABUFScanout *m = &msg->payload.dmabuf_scanout;
211 int fd = qemu_chr_fe_get_msgfd(&g->vhost_chr);
212 QemuDmaBuf *dmabuf;
214 if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
215 error_report("invalid scanout: %d", m->scanout_id);
216 if (fd >= 0) {
217 close(fd);
219 break;
222 g->parent_obj.enable = 1;
223 con = g->parent_obj.scanout[m->scanout_id].con;
224 dmabuf = &g->dmabuf[m->scanout_id];
225 if (dmabuf->fd >= 0) {
226 close(dmabuf->fd);
227 dmabuf->fd = -1;
229 if (!console_has_gl_dmabuf(con)) {
230 /* it would be nice to report that error earlier */
231 error_report("console doesn't support dmabuf!");
232 break;
234 dpy_gl_release_dmabuf(con, dmabuf);
235 if (fd == -1) {
236 dpy_gl_scanout_disable(con);
237 break;
239 *dmabuf = (QemuDmaBuf) {
240 .fd = fd,
241 .width = m->fd_width,
242 .height = m->fd_height,
243 .stride = m->fd_stride,
244 .fourcc = m->fd_drm_fourcc,
245 .y0_top = m->fd_flags & VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP,
247 dpy_gl_scanout_dmabuf(con, dmabuf);
248 break;
250 case VHOST_USER_GPU_DMABUF_UPDATE: {
251 VhostUserGpuUpdate *m = &msg->payload.update;
253 if (m->scanout_id >= g->parent_obj.conf.max_outputs ||
254 !g->parent_obj.scanout[m->scanout_id].con) {
255 error_report("invalid scanout update: %d", m->scanout_id);
256 vhost_user_gpu_unblock(g);
257 break;
260 con = g->parent_obj.scanout[m->scanout_id].con;
261 if (!console_has_gl(con)) {
262 error_report("console doesn't support GL!");
263 vhost_user_gpu_unblock(g);
264 break;
266 dpy_gl_update(con, m->x, m->y, m->width, m->height);
267 g->backend_blocked = true;
268 break;
270 case VHOST_USER_GPU_UPDATE: {
271 VhostUserGpuUpdate *m = &msg->payload.update;
273 if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
274 break;
276 s = &g->parent_obj.scanout[m->scanout_id];
277 con = s->con;
278 pixman_image_t *image =
279 pixman_image_create_bits(PIXMAN_x8r8g8b8,
280 m->width,
281 m->height,
282 (uint32_t *)m->data,
283 m->width * 4);
285 pixman_image_composite(PIXMAN_OP_SRC,
286 image, NULL, s->ds->image,
287 0, 0, 0, 0, m->x, m->y, m->width, m->height);
289 pixman_image_unref(image);
290 if (qemu_console_surface(con) != s->ds) {
291 dpy_gfx_replace_surface(con, s->ds);
292 } else {
293 dpy_gfx_update(con, m->x, m->y, m->width, m->height);
295 break;
297 default:
298 g_warning("unhandled message %d %d", msg->request, msg->size);
301 if (con && qemu_console_is_gl_blocked(con)) {
302 vhost_user_gpu_update_blocked(g, true);
306 static void
307 vhost_user_gpu_chr_read(void *opaque)
309 VhostUserGPU *g = opaque;
310 VhostUserGpuMsg *msg = NULL;
311 VhostUserGpuRequest request;
312 uint32_t size, flags;
313 int r;
315 r = qemu_chr_fe_read_all(&g->vhost_chr,
316 (uint8_t *)&request, sizeof(uint32_t));
317 if (r != sizeof(uint32_t)) {
318 error_report("failed to read msg header: %d, %d", r, errno);
319 goto end;
322 r = qemu_chr_fe_read_all(&g->vhost_chr,
323 (uint8_t *)&flags, sizeof(uint32_t));
324 if (r != sizeof(uint32_t)) {
325 error_report("failed to read msg flags");
326 goto end;
329 r = qemu_chr_fe_read_all(&g->vhost_chr,
330 (uint8_t *)&size, sizeof(uint32_t));
331 if (r != sizeof(uint32_t)) {
332 error_report("failed to read msg size");
333 goto end;
336 msg = g_malloc(VHOST_USER_GPU_HDR_SIZE + size);
337 g_return_if_fail(msg != NULL);
339 r = qemu_chr_fe_read_all(&g->vhost_chr,
340 (uint8_t *)&msg->payload, size);
341 if (r != size) {
342 error_report("failed to read msg payload %d != %d", r, size);
343 goto end;
346 msg->request = request;
347 msg->flags = size;
348 msg->size = size;
350 if (request == VHOST_USER_GPU_CURSOR_UPDATE ||
351 request == VHOST_USER_GPU_CURSOR_POS ||
352 request == VHOST_USER_GPU_CURSOR_POS_HIDE) {
353 vhost_user_gpu_handle_cursor(g, msg);
354 } else {
355 vhost_user_gpu_handle_display(g, msg);
358 end:
359 g_free(msg);
362 static void
363 vhost_user_gpu_update_blocked(VhostUserGPU *g, bool blocked)
365 qemu_set_fd_handler(g->vhost_gpu_fd,
366 blocked ? NULL : vhost_user_gpu_chr_read, NULL, g);
369 static void
370 vhost_user_gpu_gl_unblock(VirtIOGPUBase *b)
372 VhostUserGPU *g = VHOST_USER_GPU(b);
374 if (g->backend_blocked) {
375 vhost_user_gpu_unblock(VHOST_USER_GPU(g));
376 g->backend_blocked = false;
379 vhost_user_gpu_update_blocked(VHOST_USER_GPU(g), false);
382 static bool
383 vhost_user_gpu_do_set_socket(VhostUserGPU *g, Error **errp)
385 Chardev *chr;
386 int sv[2];
388 if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
389 error_setg_errno(errp, errno, "socketpair() failed");
390 return false;
393 chr = CHARDEV(object_new(TYPE_CHARDEV_SOCKET));
394 if (!chr || qemu_chr_add_client(chr, sv[0]) == -1) {
395 error_setg(errp, "Failed to make socket chardev");
396 goto err;
398 if (!qemu_chr_fe_init(&g->vhost_chr, chr, errp)) {
399 goto err;
401 if (vhost_user_gpu_set_socket(&g->vhost->dev, sv[1]) < 0) {
402 error_setg(errp, "Failed to set vhost-user-gpu socket");
403 qemu_chr_fe_deinit(&g->vhost_chr, false);
404 goto err;
407 g->vhost_gpu_fd = sv[0];
408 vhost_user_gpu_update_blocked(g, false);
409 close(sv[1]);
410 return true;
412 err:
413 close(sv[0]);
414 close(sv[1]);
415 if (chr) {
416 object_unref(OBJECT(chr));
418 return false;
421 static void
422 vhost_user_gpu_get_config(VirtIODevice *vdev, uint8_t *config_data)
424 VhostUserGPU *g = VHOST_USER_GPU(vdev);
425 VirtIOGPUBase *b = VIRTIO_GPU_BASE(vdev);
426 struct virtio_gpu_config *vgconfig =
427 (struct virtio_gpu_config *)config_data;
428 int ret;
430 memset(config_data, 0, sizeof(struct virtio_gpu_config));
432 ret = vhost_dev_get_config(&g->vhost->dev,
433 config_data, sizeof(struct virtio_gpu_config));
434 if (ret) {
435 error_report("vhost-user-gpu: get device config space failed");
436 return;
439 /* those fields are managed by qemu */
440 vgconfig->num_scanouts = b->virtio_config.num_scanouts;
441 vgconfig->events_read = b->virtio_config.events_read;
442 vgconfig->events_clear = b->virtio_config.events_clear;
445 static void
446 vhost_user_gpu_set_config(VirtIODevice *vdev,
447 const uint8_t *config_data)
449 VhostUserGPU *g = VHOST_USER_GPU(vdev);
450 VirtIOGPUBase *b = VIRTIO_GPU_BASE(vdev);
451 const struct virtio_gpu_config *vgconfig =
452 (const struct virtio_gpu_config *)config_data;
453 int ret;
455 if (vgconfig->events_clear) {
456 b->virtio_config.events_read &= ~vgconfig->events_clear;
459 ret = vhost_dev_set_config(&g->vhost->dev, config_data,
460 0, sizeof(struct virtio_gpu_config),
461 VHOST_SET_CONFIG_TYPE_MASTER);
462 if (ret) {
463 error_report("vhost-user-gpu: set device config space failed");
464 return;
468 static void
469 vhost_user_gpu_set_status(VirtIODevice *vdev, uint8_t val)
471 VhostUserGPU *g = VHOST_USER_GPU(vdev);
472 Error *err = NULL;
474 if (val & VIRTIO_CONFIG_S_DRIVER_OK && vdev->vm_running) {
475 if (!vhost_user_gpu_do_set_socket(g, &err)) {
476 error_report_err(err);
477 return;
479 vhost_user_backend_start(g->vhost);
480 } else {
481 /* unblock any wait and stop processing */
482 if (g->vhost_gpu_fd != -1) {
483 vhost_user_gpu_update_blocked(g, true);
484 qemu_chr_fe_deinit(&g->vhost_chr, true);
485 g->vhost_gpu_fd = -1;
487 vhost_user_backend_stop(g->vhost);
491 static bool
492 vhost_user_gpu_guest_notifier_pending(VirtIODevice *vdev, int idx)
494 VhostUserGPU *g = VHOST_USER_GPU(vdev);
496 return vhost_virtqueue_pending(&g->vhost->dev, idx);
499 static void
500 vhost_user_gpu_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
502 VhostUserGPU *g = VHOST_USER_GPU(vdev);
504 vhost_virtqueue_mask(&g->vhost->dev, vdev, idx, mask);
507 static void
508 vhost_user_gpu_instance_init(Object *obj)
510 VhostUserGPU *g = VHOST_USER_GPU(obj);
512 g->vhost = VHOST_USER_BACKEND(object_new(TYPE_VHOST_USER_BACKEND));
513 object_property_add_alias(obj, "chardev",
514 OBJECT(g->vhost), "chardev", &error_abort);
517 static void
518 vhost_user_gpu_instance_finalize(Object *obj)
520 VhostUserGPU *g = VHOST_USER_GPU(obj);
522 object_unref(OBJECT(g->vhost));
525 static void
526 vhost_user_gpu_reset(VirtIODevice *vdev)
528 VhostUserGPU *g = VHOST_USER_GPU(vdev);
530 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
532 vhost_user_backend_stop(g->vhost);
535 static int
536 vhost_user_gpu_config_change(struct vhost_dev *dev)
538 error_report("vhost-user-gpu: unhandled backend config change");
539 return -1;
542 static const VhostDevConfigOps config_ops = {
543 .vhost_dev_config_notifier = vhost_user_gpu_config_change,
546 static void
547 vhost_user_gpu_device_realize(DeviceState *qdev, Error **errp)
549 VhostUserGPU *g = VHOST_USER_GPU(qdev);
550 VirtIODevice *vdev = VIRTIO_DEVICE(g);
552 vhost_dev_set_config_notifier(&g->vhost->dev, &config_ops);
553 if (vhost_user_backend_dev_init(g->vhost, vdev, 2, errp) < 0) {
554 return;
557 if (virtio_has_feature(g->vhost->dev.features, VIRTIO_GPU_F_VIRGL)) {
558 g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED;
561 if (!virtio_gpu_base_device_realize(qdev, NULL, NULL, errp)) {
562 return;
565 g->vhost_gpu_fd = -1;
568 static Property vhost_user_gpu_properties[] = {
569 VIRTIO_GPU_BASE_PROPERTIES(VhostUserGPU, parent_obj.conf),
570 DEFINE_PROP_END_OF_LIST(),
573 static void
574 vhost_user_gpu_class_init(ObjectClass *klass, void *data)
576 DeviceClass *dc = DEVICE_CLASS(klass);
577 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
578 VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_CLASS(klass);
580 vgc->gl_unblock = vhost_user_gpu_gl_unblock;
582 vdc->realize = vhost_user_gpu_device_realize;
583 vdc->reset = vhost_user_gpu_reset;
584 vdc->set_status = vhost_user_gpu_set_status;
585 vdc->guest_notifier_mask = vhost_user_gpu_guest_notifier_mask;
586 vdc->guest_notifier_pending = vhost_user_gpu_guest_notifier_pending;
587 vdc->get_config = vhost_user_gpu_get_config;
588 vdc->set_config = vhost_user_gpu_set_config;
590 dc->props = vhost_user_gpu_properties;
593 static const TypeInfo vhost_user_gpu_info = {
594 .name = TYPE_VHOST_USER_GPU,
595 .parent = TYPE_VIRTIO_GPU_BASE,
596 .instance_size = sizeof(VhostUserGPU),
597 .instance_init = vhost_user_gpu_instance_init,
598 .instance_finalize = vhost_user_gpu_instance_finalize,
599 .class_init = vhost_user_gpu_class_init,
602 static void vhost_user_gpu_register_types(void)
604 type_register_static(&vhost_user_gpu_info);
607 type_init(vhost_user_gpu_register_types)