2 * Virtio vhost-user GPU Device
4 * Copyright Red Hat, Inc. 2013-2018
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
9 * Marc-André Lureau <marcandre.lureau@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
15 #include "qemu/osdep.h"
16 #include <virglrenderer.h>
22 vg_virgl_update_cursor_data(VuGpu
*g
, uint32_t resource_id
,
25 uint32_t width
, height
;
28 cursor
= virgl_renderer_get_cursor_data(resource_id
, &width
, &height
);
29 g_return_if_fail(cursor
!= NULL
);
30 g_return_if_fail(width
== 64);
31 g_return_if_fail(height
== 64);
33 memcpy(data
, cursor
, 64 * 64 * sizeof(uint32_t));
38 virgl_cmd_context_create(VuGpu
*g
,
39 struct virtio_gpu_ctrl_command
*cmd
)
41 struct virtio_gpu_ctx_create cc
;
45 virgl_renderer_context_create(cc
.hdr
.ctx_id
, cc
.nlen
,
50 virgl_cmd_context_destroy(VuGpu
*g
,
51 struct virtio_gpu_ctrl_command
*cmd
)
53 struct virtio_gpu_ctx_destroy cd
;
57 virgl_renderer_context_destroy(cd
.hdr
.ctx_id
);
61 virgl_cmd_create_resource_2d(VuGpu
*g
,
62 struct virtio_gpu_ctrl_command
*cmd
)
64 struct virtio_gpu_resource_create_2d c2d
;
65 struct virgl_renderer_resource_create_args args
;
69 args
.handle
= c2d
.resource_id
;
71 args
.format
= c2d
.format
;
73 args
.width
= c2d
.width
;
74 args
.height
= c2d
.height
;
79 args
.flags
= VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP
;
80 virgl_renderer_resource_create(&args
, NULL
, 0);
84 virgl_cmd_create_resource_3d(VuGpu
*g
,
85 struct virtio_gpu_ctrl_command
*cmd
)
87 struct virtio_gpu_resource_create_3d c3d
;
88 struct virgl_renderer_resource_create_args args
;
92 args
.handle
= c3d
.resource_id
;
93 args
.target
= c3d
.target
;
94 args
.format
= c3d
.format
;
96 args
.width
= c3d
.width
;
97 args
.height
= c3d
.height
;
98 args
.depth
= c3d
.depth
;
99 args
.array_size
= c3d
.array_size
;
100 args
.last_level
= c3d
.last_level
;
101 args
.nr_samples
= c3d
.nr_samples
;
102 args
.flags
= c3d
.flags
;
103 virgl_renderer_resource_create(&args
, NULL
, 0);
107 virgl_cmd_resource_unref(VuGpu
*g
,
108 struct virtio_gpu_ctrl_command
*cmd
)
110 struct virtio_gpu_resource_unref unref
;
112 VUGPU_FILL_CMD(unref
);
114 virgl_renderer_resource_unref(unref
.resource_id
);
117 /* Not yet(?) defined in standard-headers, remove when possible */
118 #ifndef VIRTIO_GPU_CAPSET_VIRGL2
119 #define VIRTIO_GPU_CAPSET_VIRGL2 2
123 virgl_cmd_get_capset_info(VuGpu
*g
,
124 struct virtio_gpu_ctrl_command
*cmd
)
126 struct virtio_gpu_get_capset_info info
;
127 struct virtio_gpu_resp_capset_info resp
;
129 VUGPU_FILL_CMD(info
);
131 if (info
.capset_index
== 0) {
132 resp
.capset_id
= VIRTIO_GPU_CAPSET_VIRGL
;
133 virgl_renderer_get_cap_set(resp
.capset_id
,
134 &resp
.capset_max_version
,
135 &resp
.capset_max_size
);
136 } else if (info
.capset_index
== 1) {
137 resp
.capset_id
= VIRTIO_GPU_CAPSET_VIRGL2
;
138 virgl_renderer_get_cap_set(resp
.capset_id
,
139 &resp
.capset_max_version
,
140 &resp
.capset_max_size
);
142 resp
.capset_max_version
= 0;
143 resp
.capset_max_size
= 0;
145 resp
.hdr
.type
= VIRTIO_GPU_RESP_OK_CAPSET_INFO
;
146 vg_ctrl_response(g
, cmd
, &resp
.hdr
, sizeof(resp
));
150 vg_virgl_get_num_capsets(void)
152 uint32_t capset2_max_ver
, capset2_max_size
;
153 virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2
,
157 return capset2_max_ver
? 2 : 1;
161 virgl_cmd_get_capset(VuGpu
*g
,
162 struct virtio_gpu_ctrl_command
*cmd
)
164 struct virtio_gpu_get_capset gc
;
165 struct virtio_gpu_resp_capset
*resp
;
166 uint32_t max_ver
, max_size
;
170 virgl_renderer_get_cap_set(gc
.capset_id
, &max_ver
,
172 resp
= g_malloc0(sizeof(*resp
) + max_size
);
174 resp
->hdr
.type
= VIRTIO_GPU_RESP_OK_CAPSET
;
175 virgl_renderer_fill_caps(gc
.capset_id
,
177 (void *)resp
->capset_data
);
178 vg_ctrl_response(g
, cmd
, &resp
->hdr
, sizeof(*resp
) + max_size
);
183 virgl_cmd_submit_3d(VuGpu
*g
,
184 struct virtio_gpu_ctrl_command
*cmd
)
186 struct virtio_gpu_cmd_submit cs
;
192 buf
= g_malloc(cs
.size
);
193 s
= iov_to_buf(cmd
->elem
.out_sg
, cmd
->elem
.out_num
,
194 sizeof(cs
), buf
, cs
.size
);
196 g_critical("%s: size mismatch (%zd/%d)", __func__
, s
, cs
.size
);
197 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
201 virgl_renderer_submit_cmd(buf
, cs
.hdr
.ctx_id
, cs
.size
/ 4);
208 virgl_cmd_transfer_to_host_2d(VuGpu
*g
,
209 struct virtio_gpu_ctrl_command
*cmd
)
211 struct virtio_gpu_transfer_to_host_2d t2d
;
212 struct virtio_gpu_box box
;
220 box
.h
= t2d
.r
.height
;
223 virgl_renderer_transfer_write_iov(t2d
.resource_id
,
228 (struct virgl_box
*)&box
,
229 t2d
.offset
, NULL
, 0);
233 virgl_cmd_transfer_to_host_3d(VuGpu
*g
,
234 struct virtio_gpu_ctrl_command
*cmd
)
236 struct virtio_gpu_transfer_host_3d t3d
;
240 virgl_renderer_transfer_write_iov(t3d
.resource_id
,
245 (struct virgl_box
*)&t3d
.box
,
246 t3d
.offset
, NULL
, 0);
250 virgl_cmd_transfer_from_host_3d(VuGpu
*g
,
251 struct virtio_gpu_ctrl_command
*cmd
)
253 struct virtio_gpu_transfer_host_3d tf3d
;
255 VUGPU_FILL_CMD(tf3d
);
257 virgl_renderer_transfer_read_iov(tf3d
.resource_id
,
262 (struct virgl_box
*)&tf3d
.box
,
263 tf3d
.offset
, NULL
, 0);
267 virgl_resource_attach_backing(VuGpu
*g
,
268 struct virtio_gpu_ctrl_command
*cmd
)
270 struct virtio_gpu_resource_attach_backing att_rb
;
271 struct iovec
*res_iovs
;
274 VUGPU_FILL_CMD(att_rb
);
276 ret
= vg_create_mapping_iov(g
, &att_rb
, cmd
, &res_iovs
);
278 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
282 virgl_renderer_resource_attach_iov(att_rb
.resource_id
,
283 res_iovs
, att_rb
.nr_entries
);
287 virgl_resource_detach_backing(VuGpu
*g
,
288 struct virtio_gpu_ctrl_command
*cmd
)
290 struct virtio_gpu_resource_detach_backing detach_rb
;
291 struct iovec
*res_iovs
= NULL
;
294 VUGPU_FILL_CMD(detach_rb
);
296 virgl_renderer_resource_detach_iov(detach_rb
.resource_id
,
299 if (res_iovs
== NULL
|| num_iovs
== 0) {
306 virgl_cmd_set_scanout(VuGpu
*g
,
307 struct virtio_gpu_ctrl_command
*cmd
)
309 struct virtio_gpu_set_scanout ss
;
310 struct virgl_renderer_resource_info info
;
315 if (ss
.scanout_id
>= VIRTIO_GPU_MAX_SCANOUTS
) {
316 g_critical("%s: illegal scanout id specified %d",
317 __func__
, ss
.scanout_id
);
318 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
322 memset(&info
, 0, sizeof(info
));
324 if (ss
.resource_id
&& ss
.r
.width
&& ss
.r
.height
) {
325 ret
= virgl_renderer_resource_get_info(ss
.resource_id
, &info
);
327 g_critical("%s: illegal resource specified %d\n",
328 __func__
, ss
.resource_id
);
329 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
334 if (virgl_renderer_get_fd_for_texture(info
.tex_id
, &fd
) < 0) {
335 g_critical("%s: failed to get fd for texture\n", __func__
);
336 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
340 VhostUserGpuMsg msg
= {
341 .request
= VHOST_USER_GPU_DMABUF_SCANOUT
,
342 .size
= sizeof(VhostUserGpuDMABUFScanout
),
343 .payload
.dmabuf_scanout
.scanout_id
= ss
.scanout_id
,
344 .payload
.dmabuf_scanout
.x
= ss
.r
.x
,
345 .payload
.dmabuf_scanout
.y
= ss
.r
.y
,
346 .payload
.dmabuf_scanout
.width
= ss
.r
.width
,
347 .payload
.dmabuf_scanout
.height
= ss
.r
.height
,
348 .payload
.dmabuf_scanout
.fd_width
= info
.width
,
349 .payload
.dmabuf_scanout
.fd_height
= info
.height
,
350 .payload
.dmabuf_scanout
.fd_stride
= info
.stride
,
351 .payload
.dmabuf_scanout
.fd_flags
= info
.flags
,
352 .payload
.dmabuf_scanout
.fd_drm_fourcc
= info
.drm_fourcc
354 vg_send_msg(g
, &msg
, fd
);
357 VhostUserGpuMsg msg
= {
358 .request
= VHOST_USER_GPU_DMABUF_SCANOUT
,
359 .size
= sizeof(VhostUserGpuDMABUFScanout
),
360 .payload
.dmabuf_scanout
.scanout_id
= ss
.scanout_id
,
362 g_debug("disable scanout");
363 vg_send_msg(g
, &msg
, -1);
365 g
->scanout
[ss
.scanout_id
].resource_id
= ss
.resource_id
;
369 virgl_cmd_resource_flush(VuGpu
*g
,
370 struct virtio_gpu_ctrl_command
*cmd
)
372 struct virtio_gpu_resource_flush rf
;
378 if (!rf
.resource_id
) {
379 g_debug("bad resource id for flush..?");
382 for (i
= 0; i
< VIRTIO_GPU_MAX_SCANOUTS
; i
++) {
383 if (g
->scanout
[i
].resource_id
!= rf
.resource_id
) {
386 VhostUserGpuMsg msg
= {
387 .request
= VHOST_USER_GPU_DMABUF_UPDATE
,
388 .size
= sizeof(VhostUserGpuUpdate
),
389 .payload
.update
.scanout_id
= i
,
390 .payload
.update
.x
= rf
.r
.x
,
391 .payload
.update
.y
= rf
.r
.y
,
392 .payload
.update
.width
= rf
.r
.width
,
393 .payload
.update
.height
= rf
.r
.height
395 vg_send_msg(g
, &msg
, -1);
401 virgl_cmd_ctx_attach_resource(VuGpu
*g
,
402 struct virtio_gpu_ctrl_command
*cmd
)
404 struct virtio_gpu_ctx_resource att_res
;
406 VUGPU_FILL_CMD(att_res
);
408 virgl_renderer_ctx_attach_resource(att_res
.hdr
.ctx_id
, att_res
.resource_id
);
412 virgl_cmd_ctx_detach_resource(VuGpu
*g
,
413 struct virtio_gpu_ctrl_command
*cmd
)
415 struct virtio_gpu_ctx_resource det_res
;
417 VUGPU_FILL_CMD(det_res
);
419 virgl_renderer_ctx_detach_resource(det_res
.hdr
.ctx_id
, det_res
.resource_id
);
422 void vg_virgl_process_cmd(VuGpu
*g
, struct virtio_gpu_ctrl_command
*cmd
)
424 virgl_renderer_force_ctx_0();
425 switch (cmd
->cmd_hdr
.type
) {
426 case VIRTIO_GPU_CMD_CTX_CREATE
:
427 virgl_cmd_context_create(g
, cmd
);
429 case VIRTIO_GPU_CMD_CTX_DESTROY
:
430 virgl_cmd_context_destroy(g
, cmd
);
432 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
:
433 virgl_cmd_create_resource_2d(g
, cmd
);
435 case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D
:
436 virgl_cmd_create_resource_3d(g
, cmd
);
438 case VIRTIO_GPU_CMD_SUBMIT_3D
:
439 virgl_cmd_submit_3d(g
, cmd
);
441 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
:
442 virgl_cmd_transfer_to_host_2d(g
, cmd
);
444 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D
:
445 virgl_cmd_transfer_to_host_3d(g
, cmd
);
447 case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D
:
448 virgl_cmd_transfer_from_host_3d(g
, cmd
);
450 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
:
451 virgl_resource_attach_backing(g
, cmd
);
453 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
:
454 virgl_resource_detach_backing(g
, cmd
);
456 case VIRTIO_GPU_CMD_SET_SCANOUT
:
457 virgl_cmd_set_scanout(g
, cmd
);
459 case VIRTIO_GPU_CMD_RESOURCE_FLUSH
:
460 virgl_cmd_resource_flush(g
, cmd
);
462 case VIRTIO_GPU_CMD_RESOURCE_UNREF
:
463 virgl_cmd_resource_unref(g
, cmd
);
465 case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE
:
466 /* TODO add security */
467 virgl_cmd_ctx_attach_resource(g
, cmd
);
469 case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE
:
470 /* TODO add security */
471 virgl_cmd_ctx_detach_resource(g
, cmd
);
473 case VIRTIO_GPU_CMD_GET_CAPSET_INFO
:
474 virgl_cmd_get_capset_info(g
, cmd
);
476 case VIRTIO_GPU_CMD_GET_CAPSET
:
477 virgl_cmd_get_capset(g
, cmd
);
479 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO
:
480 vg_get_display_info(g
, cmd
);
483 g_debug("TODO handle ctrl %x\n", cmd
->cmd_hdr
.type
);
484 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
488 if (cmd
->state
!= VG_CMD_STATE_NEW
) {
493 g_warning("%s: ctrl 0x%x, error 0x%x\n", __func__
,
494 cmd
->cmd_hdr
.type
, cmd
->error
);
495 vg_ctrl_response_nodata(g
, cmd
, cmd
->error
);
499 if (!(cmd
->cmd_hdr
.flags
& VIRTIO_GPU_FLAG_FENCE
)) {
500 vg_ctrl_response_nodata(g
, cmd
, VIRTIO_GPU_RESP_OK_NODATA
);
504 g_debug("Creating fence id:%" PRId64
" type:%d",
505 cmd
->cmd_hdr
.fence_id
, cmd
->cmd_hdr
.type
);
506 virgl_renderer_create_fence(cmd
->cmd_hdr
.fence_id
, cmd
->cmd_hdr
.type
);
510 virgl_write_fence(void *opaque
, uint32_t fence
)
513 struct virtio_gpu_ctrl_command
*cmd
, *tmp
;
515 QTAILQ_FOREACH_SAFE(cmd
, &g
->fenceq
, next
, tmp
) {
517 * the guest can end up emitting fences out of order
518 * so we should check all fenced cmds not just the first one.
520 if (cmd
->cmd_hdr
.fence_id
> fence
) {
523 g_debug("FENCE %" PRIu64
, cmd
->cmd_hdr
.fence_id
);
524 vg_ctrl_response_nodata(g
, cmd
, VIRTIO_GPU_RESP_OK_NODATA
);
525 QTAILQ_REMOVE(&g
->fenceq
, cmd
, next
);
531 #if defined(VIRGL_RENDERER_CALLBACKS_VERSION) && \
532 VIRGL_RENDERER_CALLBACKS_VERSION >= 2
534 virgl_get_drm_fd(void *opaque
)
538 return g
->drm_rnode_fd
;
542 static struct virgl_renderer_callbacks virgl_cbs
= {
543 #if defined(VIRGL_RENDERER_CALLBACKS_VERSION) && \
544 VIRGL_RENDERER_CALLBACKS_VERSION >= 2
545 .get_drm_fd
= virgl_get_drm_fd
,
550 .write_fence
= virgl_write_fence
,
554 vg_virgl_poll(VuDev
*dev
, int condition
, void *data
)
556 virgl_renderer_poll();
560 vg_virgl_init(VuGpu
*g
)
564 if (g
->drm_rnode_fd
&& virgl_cbs
.version
== 1) {
565 g_warning("virgl will use the default rendernode");
568 ret
= virgl_renderer_init(g
,
569 VIRGL_RENDERER_USE_EGL
|
570 VIRGL_RENDERER_THREAD_SYNC
,
576 ret
= virgl_renderer_get_poll_fd();
579 vug_source_new(&g
->dev
, ret
, G_IO_IN
, vg_virgl_poll
, g
);