2 * Virtio vhost-user GPU Device
4 * Copyright Red Hat, Inc. 2013-2018
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
9 * Marc-André Lureau <marcandre.lureau@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
15 #include <virglrenderer.h>
19 vg_virgl_update_cursor_data(VuGpu
*g
, uint32_t resource_id
,
22 uint32_t width
, height
;
25 cursor
= virgl_renderer_get_cursor_data(resource_id
, &width
, &height
);
26 g_return_if_fail(cursor
!= NULL
);
27 g_return_if_fail(width
== 64);
28 g_return_if_fail(height
== 64);
30 memcpy(data
, cursor
, 64 * 64 * sizeof(uint32_t));
35 virgl_cmd_context_create(VuGpu
*g
,
36 struct virtio_gpu_ctrl_command
*cmd
)
38 struct virtio_gpu_ctx_create cc
;
42 virgl_renderer_context_create(cc
.hdr
.ctx_id
, cc
.nlen
,
47 virgl_cmd_context_destroy(VuGpu
*g
,
48 struct virtio_gpu_ctrl_command
*cmd
)
50 struct virtio_gpu_ctx_destroy cd
;
54 virgl_renderer_context_destroy(cd
.hdr
.ctx_id
);
58 virgl_cmd_create_resource_2d(VuGpu
*g
,
59 struct virtio_gpu_ctrl_command
*cmd
)
61 struct virtio_gpu_resource_create_2d c2d
;
62 struct virgl_renderer_resource_create_args args
;
66 args
.handle
= c2d
.resource_id
;
68 args
.format
= c2d
.format
;
70 args
.width
= c2d
.width
;
71 args
.height
= c2d
.height
;
76 args
.flags
= VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP
;
77 virgl_renderer_resource_create(&args
, NULL
, 0);
81 virgl_cmd_create_resource_3d(VuGpu
*g
,
82 struct virtio_gpu_ctrl_command
*cmd
)
84 struct virtio_gpu_resource_create_3d c3d
;
85 struct virgl_renderer_resource_create_args args
;
89 args
.handle
= c3d
.resource_id
;
90 args
.target
= c3d
.target
;
91 args
.format
= c3d
.format
;
93 args
.width
= c3d
.width
;
94 args
.height
= c3d
.height
;
95 args
.depth
= c3d
.depth
;
96 args
.array_size
= c3d
.array_size
;
97 args
.last_level
= c3d
.last_level
;
98 args
.nr_samples
= c3d
.nr_samples
;
99 args
.flags
= c3d
.flags
;
100 virgl_renderer_resource_create(&args
, NULL
, 0);
104 virgl_cmd_resource_unref(VuGpu
*g
,
105 struct virtio_gpu_ctrl_command
*cmd
)
107 struct virtio_gpu_resource_unref unref
;
109 VUGPU_FILL_CMD(unref
);
111 virgl_renderer_resource_unref(unref
.resource_id
);
114 /* Not yet(?) defined in standard-headers, remove when possible */
115 #ifndef VIRTIO_GPU_CAPSET_VIRGL2
116 #define VIRTIO_GPU_CAPSET_VIRGL2 2
120 virgl_cmd_get_capset_info(VuGpu
*g
,
121 struct virtio_gpu_ctrl_command
*cmd
)
123 struct virtio_gpu_get_capset_info info
;
124 struct virtio_gpu_resp_capset_info resp
;
126 VUGPU_FILL_CMD(info
);
128 if (info
.capset_index
== 0) {
129 resp
.capset_id
= VIRTIO_GPU_CAPSET_VIRGL
;
130 virgl_renderer_get_cap_set(resp
.capset_id
,
131 &resp
.capset_max_version
,
132 &resp
.capset_max_size
);
133 } else if (info
.capset_index
== 1) {
134 resp
.capset_id
= VIRTIO_GPU_CAPSET_VIRGL2
;
135 virgl_renderer_get_cap_set(resp
.capset_id
,
136 &resp
.capset_max_version
,
137 &resp
.capset_max_size
);
139 resp
.capset_max_version
= 0;
140 resp
.capset_max_size
= 0;
142 resp
.hdr
.type
= VIRTIO_GPU_RESP_OK_CAPSET_INFO
;
143 vg_ctrl_response(g
, cmd
, &resp
.hdr
, sizeof(resp
));
147 vg_virgl_get_num_capsets(void)
149 uint32_t capset2_max_ver
, capset2_max_size
;
150 virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2
,
154 return capset2_max_ver
? 2 : 1;
158 virgl_cmd_get_capset(VuGpu
*g
,
159 struct virtio_gpu_ctrl_command
*cmd
)
161 struct virtio_gpu_get_capset gc
;
162 struct virtio_gpu_resp_capset
*resp
;
163 uint32_t max_ver
, max_size
;
167 virgl_renderer_get_cap_set(gc
.capset_id
, &max_ver
,
169 resp
= g_malloc0(sizeof(*resp
) + max_size
);
171 resp
->hdr
.type
= VIRTIO_GPU_RESP_OK_CAPSET
;
172 virgl_renderer_fill_caps(gc
.capset_id
,
174 (void *)resp
->capset_data
);
175 vg_ctrl_response(g
, cmd
, &resp
->hdr
, sizeof(*resp
) + max_size
);
180 virgl_cmd_submit_3d(VuGpu
*g
,
181 struct virtio_gpu_ctrl_command
*cmd
)
183 struct virtio_gpu_cmd_submit cs
;
189 buf
= g_malloc(cs
.size
);
190 s
= iov_to_buf(cmd
->elem
.out_sg
, cmd
->elem
.out_num
,
191 sizeof(cs
), buf
, cs
.size
);
193 g_critical("%s: size mismatch (%zd/%d)", __func__
, s
, cs
.size
);
194 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
198 virgl_renderer_submit_cmd(buf
, cs
.hdr
.ctx_id
, cs
.size
/ 4);
205 virgl_cmd_transfer_to_host_2d(VuGpu
*g
,
206 struct virtio_gpu_ctrl_command
*cmd
)
208 struct virtio_gpu_transfer_to_host_2d t2d
;
209 struct virtio_gpu_box box
;
217 box
.h
= t2d
.r
.height
;
220 virgl_renderer_transfer_write_iov(t2d
.resource_id
,
225 (struct virgl_box
*)&box
,
226 t2d
.offset
, NULL
, 0);
230 virgl_cmd_transfer_to_host_3d(VuGpu
*g
,
231 struct virtio_gpu_ctrl_command
*cmd
)
233 struct virtio_gpu_transfer_host_3d t3d
;
237 virgl_renderer_transfer_write_iov(t3d
.resource_id
,
242 (struct virgl_box
*)&t3d
.box
,
243 t3d
.offset
, NULL
, 0);
247 virgl_cmd_transfer_from_host_3d(VuGpu
*g
,
248 struct virtio_gpu_ctrl_command
*cmd
)
250 struct virtio_gpu_transfer_host_3d tf3d
;
252 VUGPU_FILL_CMD(tf3d
);
254 virgl_renderer_transfer_read_iov(tf3d
.resource_id
,
259 (struct virgl_box
*)&tf3d
.box
,
260 tf3d
.offset
, NULL
, 0);
264 virgl_resource_attach_backing(VuGpu
*g
,
265 struct virtio_gpu_ctrl_command
*cmd
)
267 struct virtio_gpu_resource_attach_backing att_rb
;
268 struct iovec
*res_iovs
;
271 VUGPU_FILL_CMD(att_rb
);
273 ret
= vg_create_mapping_iov(g
, &att_rb
, cmd
, &res_iovs
);
275 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
279 virgl_renderer_resource_attach_iov(att_rb
.resource_id
,
280 res_iovs
, att_rb
.nr_entries
);
284 virgl_resource_detach_backing(VuGpu
*g
,
285 struct virtio_gpu_ctrl_command
*cmd
)
287 struct virtio_gpu_resource_detach_backing detach_rb
;
288 struct iovec
*res_iovs
= NULL
;
291 VUGPU_FILL_CMD(detach_rb
);
293 virgl_renderer_resource_detach_iov(detach_rb
.resource_id
,
296 if (res_iovs
== NULL
|| num_iovs
== 0) {
303 virgl_cmd_set_scanout(VuGpu
*g
,
304 struct virtio_gpu_ctrl_command
*cmd
)
306 struct virtio_gpu_set_scanout ss
;
307 struct virgl_renderer_resource_info info
;
312 if (ss
.scanout_id
>= VIRTIO_GPU_MAX_SCANOUTS
) {
313 g_critical("%s: illegal scanout id specified %d",
314 __func__
, ss
.scanout_id
);
315 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
319 memset(&info
, 0, sizeof(info
));
321 if (ss
.resource_id
&& ss
.r
.width
&& ss
.r
.height
) {
322 ret
= virgl_renderer_resource_get_info(ss
.resource_id
, &info
);
324 g_critical("%s: illegal resource specified %d\n",
325 __func__
, ss
.resource_id
);
326 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
331 if (virgl_renderer_get_fd_for_texture(info
.tex_id
, &fd
) < 0) {
332 g_critical("%s: failed to get fd for texture\n", __func__
);
333 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
337 VhostUserGpuMsg msg
= {
338 .request
= VHOST_USER_GPU_DMABUF_SCANOUT
,
339 .size
= sizeof(VhostUserGpuDMABUFScanout
),
340 .payload
.dmabuf_scanout
.scanout_id
= ss
.scanout_id
,
341 .payload
.dmabuf_scanout
.x
= ss
.r
.x
,
342 .payload
.dmabuf_scanout
.y
= ss
.r
.y
,
343 .payload
.dmabuf_scanout
.width
= ss
.r
.width
,
344 .payload
.dmabuf_scanout
.height
= ss
.r
.height
,
345 .payload
.dmabuf_scanout
.fd_width
= info
.width
,
346 .payload
.dmabuf_scanout
.fd_height
= info
.height
,
347 .payload
.dmabuf_scanout
.fd_stride
= info
.stride
,
348 .payload
.dmabuf_scanout
.fd_flags
= info
.flags
,
349 .payload
.dmabuf_scanout
.fd_drm_fourcc
= info
.drm_fourcc
351 vg_send_msg(g
, &msg
, fd
);
354 VhostUserGpuMsg msg
= {
355 .request
= VHOST_USER_GPU_DMABUF_SCANOUT
,
356 .size
= sizeof(VhostUserGpuDMABUFScanout
),
357 .payload
.dmabuf_scanout
.scanout_id
= ss
.scanout_id
,
359 g_debug("disable scanout");
360 vg_send_msg(g
, &msg
, -1);
362 g
->scanout
[ss
.scanout_id
].resource_id
= ss
.resource_id
;
366 virgl_cmd_resource_flush(VuGpu
*g
,
367 struct virtio_gpu_ctrl_command
*cmd
)
369 struct virtio_gpu_resource_flush rf
;
374 if (!rf
.resource_id
) {
375 g_debug("bad resource id for flush..?");
378 for (i
= 0; i
< VIRTIO_GPU_MAX_SCANOUTS
; i
++) {
379 if (g
->scanout
[i
].resource_id
!= rf
.resource_id
) {
382 VhostUserGpuMsg msg
= {
383 .request
= VHOST_USER_GPU_DMABUF_UPDATE
,
384 .size
= sizeof(VhostUserGpuUpdate
),
385 .payload
.update
.scanout_id
= i
,
386 .payload
.update
.x
= rf
.r
.x
,
387 .payload
.update
.y
= rf
.r
.y
,
388 .payload
.update
.width
= rf
.r
.width
,
389 .payload
.update
.height
= rf
.r
.height
391 vg_send_msg(g
, &msg
, -1);
397 virgl_cmd_ctx_attach_resource(VuGpu
*g
,
398 struct virtio_gpu_ctrl_command
*cmd
)
400 struct virtio_gpu_ctx_resource att_res
;
402 VUGPU_FILL_CMD(att_res
);
404 virgl_renderer_ctx_attach_resource(att_res
.hdr
.ctx_id
, att_res
.resource_id
);
408 virgl_cmd_ctx_detach_resource(VuGpu
*g
,
409 struct virtio_gpu_ctrl_command
*cmd
)
411 struct virtio_gpu_ctx_resource det_res
;
413 VUGPU_FILL_CMD(det_res
);
415 virgl_renderer_ctx_detach_resource(det_res
.hdr
.ctx_id
, det_res
.resource_id
);
418 void vg_virgl_process_cmd(VuGpu
*g
, struct virtio_gpu_ctrl_command
*cmd
)
420 virgl_renderer_force_ctx_0();
421 switch (cmd
->cmd_hdr
.type
) {
422 case VIRTIO_GPU_CMD_CTX_CREATE
:
423 virgl_cmd_context_create(g
, cmd
);
425 case VIRTIO_GPU_CMD_CTX_DESTROY
:
426 virgl_cmd_context_destroy(g
, cmd
);
428 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
:
429 virgl_cmd_create_resource_2d(g
, cmd
);
431 case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D
:
432 virgl_cmd_create_resource_3d(g
, cmd
);
434 case VIRTIO_GPU_CMD_SUBMIT_3D
:
435 virgl_cmd_submit_3d(g
, cmd
);
437 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
:
438 virgl_cmd_transfer_to_host_2d(g
, cmd
);
440 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D
:
441 virgl_cmd_transfer_to_host_3d(g
, cmd
);
443 case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D
:
444 virgl_cmd_transfer_from_host_3d(g
, cmd
);
446 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
:
447 virgl_resource_attach_backing(g
, cmd
);
449 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
:
450 virgl_resource_detach_backing(g
, cmd
);
452 case VIRTIO_GPU_CMD_SET_SCANOUT
:
453 virgl_cmd_set_scanout(g
, cmd
);
455 case VIRTIO_GPU_CMD_RESOURCE_FLUSH
:
456 virgl_cmd_resource_flush(g
, cmd
);
458 case VIRTIO_GPU_CMD_RESOURCE_UNREF
:
459 virgl_cmd_resource_unref(g
, cmd
);
461 case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE
:
462 /* TODO add security */
463 virgl_cmd_ctx_attach_resource(g
, cmd
);
465 case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE
:
466 /* TODO add security */
467 virgl_cmd_ctx_detach_resource(g
, cmd
);
469 case VIRTIO_GPU_CMD_GET_CAPSET_INFO
:
470 virgl_cmd_get_capset_info(g
, cmd
);
472 case VIRTIO_GPU_CMD_GET_CAPSET
:
473 virgl_cmd_get_capset(g
, cmd
);
475 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO
:
476 vg_get_display_info(g
, cmd
);
479 g_debug("TODO handle ctrl %x\n", cmd
->cmd_hdr
.type
);
480 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
489 g_warning("%s: ctrl 0x%x, error 0x%x\n", __func__
,
490 cmd
->cmd_hdr
.type
, cmd
->error
);
491 vg_ctrl_response_nodata(g
, cmd
, cmd
->error
);
495 if (!(cmd
->cmd_hdr
.flags
& VIRTIO_GPU_FLAG_FENCE
)) {
496 vg_ctrl_response_nodata(g
, cmd
, VIRTIO_GPU_RESP_OK_NODATA
);
500 g_debug("Creating fence id:%" PRId64
" type:%d",
501 cmd
->cmd_hdr
.fence_id
, cmd
->cmd_hdr
.type
);
502 virgl_renderer_create_fence(cmd
->cmd_hdr
.fence_id
, cmd
->cmd_hdr
.type
);
506 virgl_write_fence(void *opaque
, uint32_t fence
)
509 struct virtio_gpu_ctrl_command
*cmd
, *tmp
;
511 QTAILQ_FOREACH_SAFE(cmd
, &g
->fenceq
, next
, tmp
) {
513 * the guest can end up emitting fences out of order
514 * so we should check all fenced cmds not just the first one.
516 if (cmd
->cmd_hdr
.fence_id
> fence
) {
519 g_debug("FENCE %" PRIu64
, cmd
->cmd_hdr
.fence_id
);
520 vg_ctrl_response_nodata(g
, cmd
, VIRTIO_GPU_RESP_OK_NODATA
);
521 QTAILQ_REMOVE(&g
->fenceq
, cmd
, next
);
527 #if defined(VIRGL_RENDERER_CALLBACKS_VERSION) && \
528 VIRGL_RENDERER_CALLBACKS_VERSION >= 2
530 virgl_get_drm_fd(void *opaque
)
534 return g
->drm_rnode_fd
;
538 static struct virgl_renderer_callbacks virgl_cbs
= {
539 #if defined(VIRGL_RENDERER_CALLBACKS_VERSION) && \
540 VIRGL_RENDERER_CALLBACKS_VERSION >= 2
541 .get_drm_fd
= virgl_get_drm_fd
,
546 .write_fence
= virgl_write_fence
,
550 vg_virgl_poll(VuDev
*dev
, int condition
, void *data
)
552 virgl_renderer_poll();
556 vg_virgl_init(VuGpu
*g
)
560 if (g
->drm_rnode_fd
&& virgl_cbs
.version
== 1) {
561 g_warning("virgl will use the default rendernode");
564 ret
= virgl_renderer_init(g
,
565 VIRGL_RENDERER_USE_EGL
|
566 VIRGL_RENDERER_THREAD_SYNC
,
572 ret
= virgl_renderer_get_poll_fd();
575 vug_source_new(&g
->dev
, ret
, G_IO_IN
, vg_virgl_poll
, g
);