2 * vhost-user GPU Device
4 * Copyright Red Hat, Inc. 2018
7 * Marc-André Lureau <marcandre.lureau@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
13 #include "qemu/osdep.h"
14 #include "hw/qdev-properties.h"
15 #include "hw/virtio/virtio-gpu.h"
16 #include "chardev/char-fe.h"
17 #include "qapi/error.h"
18 #include "migration/blocker.h"
20 typedef enum VhostUserGpuRequest
{
21 VHOST_USER_GPU_NONE
= 0,
22 VHOST_USER_GPU_GET_PROTOCOL_FEATURES
,
23 VHOST_USER_GPU_SET_PROTOCOL_FEATURES
,
24 VHOST_USER_GPU_GET_DISPLAY_INFO
,
25 VHOST_USER_GPU_CURSOR_POS
,
26 VHOST_USER_GPU_CURSOR_POS_HIDE
,
27 VHOST_USER_GPU_CURSOR_UPDATE
,
28 VHOST_USER_GPU_SCANOUT
,
29 VHOST_USER_GPU_UPDATE
,
30 VHOST_USER_GPU_DMABUF_SCANOUT
,
31 VHOST_USER_GPU_DMABUF_UPDATE
,
32 } VhostUserGpuRequest
;
34 typedef struct VhostUserGpuDisplayInfoReply
{
35 struct virtio_gpu_resp_display_info info
;
36 } VhostUserGpuDisplayInfoReply
;
38 typedef struct VhostUserGpuCursorPos
{
42 } QEMU_PACKED VhostUserGpuCursorPos
;
44 typedef struct VhostUserGpuCursorUpdate
{
45 VhostUserGpuCursorPos pos
;
48 uint32_t data
[64 * 64];
49 } QEMU_PACKED VhostUserGpuCursorUpdate
;
51 typedef struct VhostUserGpuScanout
{
55 } QEMU_PACKED VhostUserGpuScanout
;
57 typedef struct VhostUserGpuUpdate
{
64 } QEMU_PACKED VhostUserGpuUpdate
;
66 typedef struct VhostUserGpuDMABUFScanout
{
77 } QEMU_PACKED VhostUserGpuDMABUFScanout
;
79 typedef struct VhostUserGpuMsg
{
80 uint32_t request
; /* VhostUserGpuRequest */
82 uint32_t size
; /* the following payload size */
84 VhostUserGpuCursorPos cursor_pos
;
85 VhostUserGpuCursorUpdate cursor_update
;
86 VhostUserGpuScanout scanout
;
87 VhostUserGpuUpdate update
;
88 VhostUserGpuDMABUFScanout dmabuf_scanout
;
89 struct virtio_gpu_resp_display_info display_info
;
92 } QEMU_PACKED VhostUserGpuMsg
;
94 static VhostUserGpuMsg m
__attribute__ ((unused
));
95 #define VHOST_USER_GPU_HDR_SIZE \
96 (sizeof(m.request) + sizeof(m.size) + sizeof(m.flags))
98 #define VHOST_USER_GPU_MSG_FLAG_REPLY 0x4
100 static void vhost_user_gpu_update_blocked(VhostUserGPU
*g
, bool blocked
);
103 vhost_user_gpu_handle_cursor(VhostUserGPU
*g
, VhostUserGpuMsg
*msg
)
105 VhostUserGpuCursorPos
*pos
= &msg
->payload
.cursor_pos
;
106 struct virtio_gpu_scanout
*s
;
108 if (pos
->scanout_id
>= g
->parent_obj
.conf
.max_outputs
) {
111 s
= &g
->parent_obj
.scanout
[pos
->scanout_id
];
113 if (msg
->request
== VHOST_USER_GPU_CURSOR_UPDATE
) {
114 VhostUserGpuCursorUpdate
*up
= &msg
->payload
.cursor_update
;
115 if (!s
->current_cursor
) {
116 s
->current_cursor
= cursor_alloc(64, 64);
119 s
->current_cursor
->hot_x
= up
->hot_x
;
120 s
->current_cursor
->hot_y
= up
->hot_y
;
122 memcpy(s
->current_cursor
->data
, up
->data
,
123 64 * 64 * sizeof(uint32_t));
125 dpy_cursor_define(s
->con
, s
->current_cursor
);
128 dpy_mouse_set(s
->con
, pos
->x
, pos
->y
,
129 msg
->request
!= VHOST_USER_GPU_CURSOR_POS_HIDE
);
133 vhost_user_gpu_send_msg(VhostUserGPU
*g
, const VhostUserGpuMsg
*msg
)
135 qemu_chr_fe_write(&g
->vhost_chr
, (uint8_t *)msg
,
136 VHOST_USER_GPU_HDR_SIZE
+ msg
->size
);
140 vhost_user_gpu_unblock(VhostUserGPU
*g
)
142 VhostUserGpuMsg msg
= {
143 .request
= VHOST_USER_GPU_DMABUF_UPDATE
,
144 .flags
= VHOST_USER_GPU_MSG_FLAG_REPLY
,
147 vhost_user_gpu_send_msg(g
, &msg
);
151 vhost_user_gpu_handle_display(VhostUserGPU
*g
, VhostUserGpuMsg
*msg
)
153 QemuConsole
*con
= NULL
;
154 struct virtio_gpu_scanout
*s
;
156 switch (msg
->request
) {
157 case VHOST_USER_GPU_GET_PROTOCOL_FEATURES
: {
158 VhostUserGpuMsg reply
= {
159 .request
= msg
->request
,
160 .flags
= VHOST_USER_GPU_MSG_FLAG_REPLY
,
161 .size
= sizeof(uint64_t),
164 vhost_user_gpu_send_msg(g
, &reply
);
167 case VHOST_USER_GPU_SET_PROTOCOL_FEATURES
: {
170 case VHOST_USER_GPU_GET_DISPLAY_INFO
: {
171 struct virtio_gpu_resp_display_info display_info
= { {} };
172 VhostUserGpuMsg reply
= {
173 .request
= msg
->request
,
174 .flags
= VHOST_USER_GPU_MSG_FLAG_REPLY
,
175 .size
= sizeof(struct virtio_gpu_resp_display_info
),
178 display_info
.hdr
.type
= VIRTIO_GPU_RESP_OK_DISPLAY_INFO
;
179 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g
), &display_info
);
180 memcpy(&reply
.payload
.display_info
, &display_info
,
181 sizeof(display_info
));
182 vhost_user_gpu_send_msg(g
, &reply
);
185 case VHOST_USER_GPU_SCANOUT
: {
186 VhostUserGpuScanout
*m
= &msg
->payload
.scanout
;
188 if (m
->scanout_id
>= g
->parent_obj
.conf
.max_outputs
) {
192 g
->parent_obj
.enable
= 1;
193 s
= &g
->parent_obj
.scanout
[m
->scanout_id
];
197 dpy_gfx_replace_surface(con
, NULL
);
199 s
->ds
= qemu_create_displaysurface(m
->width
, m
->height
);
200 /* replace surface on next update */
205 case VHOST_USER_GPU_DMABUF_SCANOUT
: {
206 VhostUserGpuDMABUFScanout
*m
= &msg
->payload
.dmabuf_scanout
;
207 int fd
= qemu_chr_fe_get_msgfd(&g
->vhost_chr
);
210 if (m
->scanout_id
>= g
->parent_obj
.conf
.max_outputs
) {
211 error_report("invalid scanout: %d", m
->scanout_id
);
218 g
->parent_obj
.enable
= 1;
219 con
= g
->parent_obj
.scanout
[m
->scanout_id
].con
;
220 dmabuf
= &g
->dmabuf
[m
->scanout_id
];
221 if (dmabuf
->fd
>= 0) {
225 dpy_gl_release_dmabuf(con
, dmabuf
);
227 dpy_gl_scanout_disable(con
);
230 *dmabuf
= (QemuDmaBuf
) {
232 .width
= m
->fd_width
,
233 .height
= m
->fd_height
,
234 .stride
= m
->fd_stride
,
235 .fourcc
= m
->fd_drm_fourcc
,
236 .y0_top
= m
->fd_flags
& VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP
,
238 dpy_gl_scanout_dmabuf(con
, dmabuf
);
241 case VHOST_USER_GPU_DMABUF_UPDATE
: {
242 VhostUserGpuUpdate
*m
= &msg
->payload
.update
;
244 if (m
->scanout_id
>= g
->parent_obj
.conf
.max_outputs
||
245 !g
->parent_obj
.scanout
[m
->scanout_id
].con
) {
246 error_report("invalid scanout update: %d", m
->scanout_id
);
247 vhost_user_gpu_unblock(g
);
251 con
= g
->parent_obj
.scanout
[m
->scanout_id
].con
;
252 if (!console_has_gl(con
)) {
253 error_report("console doesn't support GL!");
254 vhost_user_gpu_unblock(g
);
257 g
->backend_blocked
= true;
258 dpy_gl_update(con
, m
->x
, m
->y
, m
->width
, m
->height
);
261 case VHOST_USER_GPU_UPDATE
: {
262 VhostUserGpuUpdate
*m
= &msg
->payload
.update
;
264 if (m
->scanout_id
>= g
->parent_obj
.conf
.max_outputs
) {
267 s
= &g
->parent_obj
.scanout
[m
->scanout_id
];
269 pixman_image_t
*image
=
270 pixman_image_create_bits(PIXMAN_x8r8g8b8
,
276 pixman_image_composite(PIXMAN_OP_SRC
,
277 image
, NULL
, s
->ds
->image
,
278 0, 0, 0, 0, m
->x
, m
->y
, m
->width
, m
->height
);
280 pixman_image_unref(image
);
281 if (qemu_console_surface(con
) != s
->ds
) {
282 dpy_gfx_replace_surface(con
, s
->ds
);
284 dpy_gfx_update(con
, m
->x
, m
->y
, m
->width
, m
->height
);
289 g_warning("unhandled message %d %d", msg
->request
, msg
->size
);
292 if (con
&& qemu_console_is_gl_blocked(con
)) {
293 vhost_user_gpu_update_blocked(g
, true);
298 vhost_user_gpu_chr_read(void *opaque
)
300 VhostUserGPU
*g
= opaque
;
301 VhostUserGpuMsg
*msg
= NULL
;
302 VhostUserGpuRequest request
;
303 uint32_t size
, flags
;
306 r
= qemu_chr_fe_read_all(&g
->vhost_chr
,
307 (uint8_t *)&request
, sizeof(uint32_t));
308 if (r
!= sizeof(uint32_t)) {
309 error_report("failed to read msg header: %d, %d", r
, errno
);
313 r
= qemu_chr_fe_read_all(&g
->vhost_chr
,
314 (uint8_t *)&flags
, sizeof(uint32_t));
315 if (r
!= sizeof(uint32_t)) {
316 error_report("failed to read msg flags");
320 r
= qemu_chr_fe_read_all(&g
->vhost_chr
,
321 (uint8_t *)&size
, sizeof(uint32_t));
322 if (r
!= sizeof(uint32_t)) {
323 error_report("failed to read msg size");
327 msg
= g_malloc(VHOST_USER_GPU_HDR_SIZE
+ size
);
329 r
= qemu_chr_fe_read_all(&g
->vhost_chr
,
330 (uint8_t *)&msg
->payload
, size
);
332 error_report("failed to read msg payload %d != %d", r
, size
);
336 msg
->request
= request
;
340 if (request
== VHOST_USER_GPU_CURSOR_UPDATE
||
341 request
== VHOST_USER_GPU_CURSOR_POS
||
342 request
== VHOST_USER_GPU_CURSOR_POS_HIDE
) {
343 vhost_user_gpu_handle_cursor(g
, msg
);
345 vhost_user_gpu_handle_display(g
, msg
);
353 vhost_user_gpu_update_blocked(VhostUserGPU
*g
, bool blocked
)
355 qemu_set_fd_handler(g
->vhost_gpu_fd
,
356 blocked
? NULL
: vhost_user_gpu_chr_read
, NULL
, g
);
360 vhost_user_gpu_gl_flushed(VirtIOGPUBase
*b
)
362 VhostUserGPU
*g
= VHOST_USER_GPU(b
);
364 if (g
->backend_blocked
) {
365 vhost_user_gpu_unblock(VHOST_USER_GPU(g
));
366 g
->backend_blocked
= false;
369 vhost_user_gpu_update_blocked(VHOST_USER_GPU(g
), false);
373 vhost_user_gpu_do_set_socket(VhostUserGPU
*g
, Error
**errp
)
378 if (socketpair(PF_UNIX
, SOCK_STREAM
, 0, sv
) == -1) {
379 error_setg_errno(errp
, errno
, "socketpair() failed");
383 chr
= CHARDEV(object_new(TYPE_CHARDEV_SOCKET
));
384 if (!chr
|| qemu_chr_add_client(chr
, sv
[0]) == -1) {
385 error_setg(errp
, "Failed to make socket chardev");
388 if (!qemu_chr_fe_init(&g
->vhost_chr
, chr
, errp
)) {
391 if (vhost_user_gpu_set_socket(&g
->vhost
->dev
, sv
[1]) < 0) {
392 error_setg(errp
, "Failed to set vhost-user-gpu socket");
393 qemu_chr_fe_deinit(&g
->vhost_chr
, false);
397 g
->vhost_gpu_fd
= sv
[0];
398 vhost_user_gpu_update_blocked(g
, false);
406 object_unref(OBJECT(chr
));
412 vhost_user_gpu_get_config(VirtIODevice
*vdev
, uint8_t *config_data
)
414 VhostUserGPU
*g
= VHOST_USER_GPU(vdev
);
415 VirtIOGPUBase
*b
= VIRTIO_GPU_BASE(vdev
);
416 struct virtio_gpu_config
*vgconfig
=
417 (struct virtio_gpu_config
*)config_data
;
418 Error
*local_err
= NULL
;
421 memset(config_data
, 0, sizeof(struct virtio_gpu_config
));
423 ret
= vhost_dev_get_config(&g
->vhost
->dev
,
424 config_data
, sizeof(struct virtio_gpu_config
),
427 error_report_err(local_err
);
431 /* those fields are managed by qemu */
432 vgconfig
->num_scanouts
= b
->virtio_config
.num_scanouts
;
433 vgconfig
->events_read
= b
->virtio_config
.events_read
;
434 vgconfig
->events_clear
= b
->virtio_config
.events_clear
;
438 vhost_user_gpu_set_config(VirtIODevice
*vdev
,
439 const uint8_t *config_data
)
441 VhostUserGPU
*g
= VHOST_USER_GPU(vdev
);
442 VirtIOGPUBase
*b
= VIRTIO_GPU_BASE(vdev
);
443 const struct virtio_gpu_config
*vgconfig
=
444 (const struct virtio_gpu_config
*)config_data
;
447 if (vgconfig
->events_clear
) {
448 b
->virtio_config
.events_read
&= ~vgconfig
->events_clear
;
451 ret
= vhost_dev_set_config(&g
->vhost
->dev
, config_data
,
452 0, sizeof(struct virtio_gpu_config
),
453 VHOST_SET_CONFIG_TYPE_MASTER
);
455 error_report("vhost-user-gpu: set device config space failed");
461 vhost_user_gpu_set_status(VirtIODevice
*vdev
, uint8_t val
)
463 VhostUserGPU
*g
= VHOST_USER_GPU(vdev
);
466 if (val
& VIRTIO_CONFIG_S_DRIVER_OK
&& vdev
->vm_running
) {
467 if (!vhost_user_gpu_do_set_socket(g
, &err
)) {
468 error_report_err(err
);
471 vhost_user_backend_start(g
->vhost
);
473 /* unblock any wait and stop processing */
474 if (g
->vhost_gpu_fd
!= -1) {
475 vhost_user_gpu_update_blocked(g
, true);
476 qemu_chr_fe_deinit(&g
->vhost_chr
, true);
477 g
->vhost_gpu_fd
= -1;
479 vhost_user_backend_stop(g
->vhost
);
484 vhost_user_gpu_guest_notifier_pending(VirtIODevice
*vdev
, int idx
)
486 VhostUserGPU
*g
= VHOST_USER_GPU(vdev
);
488 return vhost_virtqueue_pending(&g
->vhost
->dev
, idx
);
492 vhost_user_gpu_guest_notifier_mask(VirtIODevice
*vdev
, int idx
, bool mask
)
494 VhostUserGPU
*g
= VHOST_USER_GPU(vdev
);
496 vhost_virtqueue_mask(&g
->vhost
->dev
, vdev
, idx
, mask
);
500 vhost_user_gpu_instance_init(Object
*obj
)
502 VhostUserGPU
*g
= VHOST_USER_GPU(obj
);
504 g
->vhost
= VHOST_USER_BACKEND(object_new(TYPE_VHOST_USER_BACKEND
));
505 object_property_add_alias(obj
, "chardev",
506 OBJECT(g
->vhost
), "chardev");
510 vhost_user_gpu_instance_finalize(Object
*obj
)
512 VhostUserGPU
*g
= VHOST_USER_GPU(obj
);
514 object_unref(OBJECT(g
->vhost
));
518 vhost_user_gpu_reset(VirtIODevice
*vdev
)
520 VhostUserGPU
*g
= VHOST_USER_GPU(vdev
);
522 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev
));
524 vhost_user_backend_stop(g
->vhost
);
528 vhost_user_gpu_config_change(struct vhost_dev
*dev
)
530 error_report("vhost-user-gpu: unhandled backend config change");
534 static const VhostDevConfigOps config_ops
= {
535 .vhost_dev_config_notifier
= vhost_user_gpu_config_change
,
539 vhost_user_gpu_device_realize(DeviceState
*qdev
, Error
**errp
)
541 VhostUserGPU
*g
= VHOST_USER_GPU(qdev
);
542 VirtIODevice
*vdev
= VIRTIO_DEVICE(g
);
544 vhost_dev_set_config_notifier(&g
->vhost
->dev
, &config_ops
);
545 if (vhost_user_backend_dev_init(g
->vhost
, vdev
, 2, errp
) < 0) {
549 /* existing backend may send DMABUF, so let's add that requirement */
550 g
->parent_obj
.conf
.flags
|= 1 << VIRTIO_GPU_FLAG_DMABUF_ENABLED
;
551 if (virtio_has_feature(g
->vhost
->dev
.features
, VIRTIO_GPU_F_VIRGL
)) {
552 g
->parent_obj
.conf
.flags
|= 1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED
;
554 if (virtio_has_feature(g
->vhost
->dev
.features
, VIRTIO_GPU_F_EDID
)) {
555 g
->parent_obj
.conf
.flags
|= 1 << VIRTIO_GPU_FLAG_EDID_ENABLED
;
557 error_report("EDID requested but the backend doesn't support it.");
558 g
->parent_obj
.conf
.flags
&= ~(1 << VIRTIO_GPU_FLAG_EDID_ENABLED
);
561 if (!virtio_gpu_base_device_realize(qdev
, NULL
, NULL
, errp
)) {
565 g
->vhost_gpu_fd
= -1;
568 static struct vhost_dev
*vhost_user_gpu_get_vhost(VirtIODevice
*vdev
)
570 VhostUserGPU
*g
= VHOST_USER_GPU(vdev
);
571 return &g
->vhost
->dev
;
574 static Property vhost_user_gpu_properties
[] = {
575 VIRTIO_GPU_BASE_PROPERTIES(VhostUserGPU
, parent_obj
.conf
),
576 DEFINE_PROP_END_OF_LIST(),
580 vhost_user_gpu_class_init(ObjectClass
*klass
, void *data
)
582 DeviceClass
*dc
= DEVICE_CLASS(klass
);
583 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
584 VirtIOGPUBaseClass
*vgc
= VIRTIO_GPU_BASE_CLASS(klass
);
586 vgc
->gl_flushed
= vhost_user_gpu_gl_flushed
;
588 vdc
->realize
= vhost_user_gpu_device_realize
;
589 vdc
->reset
= vhost_user_gpu_reset
;
590 vdc
->set_status
= vhost_user_gpu_set_status
;
591 vdc
->guest_notifier_mask
= vhost_user_gpu_guest_notifier_mask
;
592 vdc
->guest_notifier_pending
= vhost_user_gpu_guest_notifier_pending
;
593 vdc
->get_config
= vhost_user_gpu_get_config
;
594 vdc
->set_config
= vhost_user_gpu_set_config
;
595 vdc
->get_vhost
= vhost_user_gpu_get_vhost
;
597 device_class_set_props(dc
, vhost_user_gpu_properties
);
600 static const TypeInfo vhost_user_gpu_info
= {
601 .name
= TYPE_VHOST_USER_GPU
,
602 .parent
= TYPE_VIRTIO_GPU_BASE
,
603 .instance_size
= sizeof(VhostUserGPU
),
604 .instance_init
= vhost_user_gpu_instance_init
,
605 .instance_finalize
= vhost_user_gpu_instance_finalize
,
606 .class_init
= vhost_user_gpu_class_init
,
608 module_obj(TYPE_VHOST_USER_GPU
);
610 static void vhost_user_gpu_register_types(void)
612 type_register_static(&vhost_user_gpu_info
);
615 type_init(vhost_user_gpu_register_types
)