2 * Virtio vhost-user GPU Device
4 * Copyright Red Hat, Inc. 2013-2018
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
9 * Marc-André Lureau <marcandre.lureau@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
15 #include "qemu/osdep.h"
16 #include <virglrenderer.h>
20 vg_virgl_update_cursor_data(VuGpu
*g
, uint32_t resource_id
,
23 uint32_t width
, height
;
26 cursor
= virgl_renderer_get_cursor_data(resource_id
, &width
, &height
);
27 g_return_if_fail(cursor
!= NULL
);
28 g_return_if_fail(width
== 64);
29 g_return_if_fail(height
== 64);
31 memcpy(data
, cursor
, 64 * 64 * sizeof(uint32_t));
36 virgl_cmd_context_create(VuGpu
*g
,
37 struct virtio_gpu_ctrl_command
*cmd
)
39 struct virtio_gpu_ctx_create cc
;
43 virgl_renderer_context_create(cc
.hdr
.ctx_id
, cc
.nlen
,
48 virgl_cmd_context_destroy(VuGpu
*g
,
49 struct virtio_gpu_ctrl_command
*cmd
)
51 struct virtio_gpu_ctx_destroy cd
;
55 virgl_renderer_context_destroy(cd
.hdr
.ctx_id
);
59 virgl_cmd_create_resource_2d(VuGpu
*g
,
60 struct virtio_gpu_ctrl_command
*cmd
)
62 struct virtio_gpu_resource_create_2d c2d
;
63 struct virgl_renderer_resource_create_args args
;
67 args
.handle
= c2d
.resource_id
;
69 args
.format
= c2d
.format
;
71 args
.width
= c2d
.width
;
72 args
.height
= c2d
.height
;
77 args
.flags
= VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP
;
78 virgl_renderer_resource_create(&args
, NULL
, 0);
82 virgl_cmd_create_resource_3d(VuGpu
*g
,
83 struct virtio_gpu_ctrl_command
*cmd
)
85 struct virtio_gpu_resource_create_3d c3d
;
86 struct virgl_renderer_resource_create_args args
;
90 args
.handle
= c3d
.resource_id
;
91 args
.target
= c3d
.target
;
92 args
.format
= c3d
.format
;
94 args
.width
= c3d
.width
;
95 args
.height
= c3d
.height
;
96 args
.depth
= c3d
.depth
;
97 args
.array_size
= c3d
.array_size
;
98 args
.last_level
= c3d
.last_level
;
99 args
.nr_samples
= c3d
.nr_samples
;
100 args
.flags
= c3d
.flags
;
101 virgl_renderer_resource_create(&args
, NULL
, 0);
105 virgl_cmd_resource_unref(VuGpu
*g
,
106 struct virtio_gpu_ctrl_command
*cmd
)
108 struct virtio_gpu_resource_unref unref
;
110 VUGPU_FILL_CMD(unref
);
112 virgl_renderer_resource_unref(unref
.resource_id
);
115 /* Not yet(?) defined in standard-headers, remove when possible */
116 #ifndef VIRTIO_GPU_CAPSET_VIRGL2
117 #define VIRTIO_GPU_CAPSET_VIRGL2 2
121 virgl_cmd_get_capset_info(VuGpu
*g
,
122 struct virtio_gpu_ctrl_command
*cmd
)
124 struct virtio_gpu_get_capset_info info
;
125 struct virtio_gpu_resp_capset_info resp
;
127 VUGPU_FILL_CMD(info
);
129 if (info
.capset_index
== 0) {
130 resp
.capset_id
= VIRTIO_GPU_CAPSET_VIRGL
;
131 virgl_renderer_get_cap_set(resp
.capset_id
,
132 &resp
.capset_max_version
,
133 &resp
.capset_max_size
);
134 } else if (info
.capset_index
== 1) {
135 resp
.capset_id
= VIRTIO_GPU_CAPSET_VIRGL2
;
136 virgl_renderer_get_cap_set(resp
.capset_id
,
137 &resp
.capset_max_version
,
138 &resp
.capset_max_size
);
140 resp
.capset_max_version
= 0;
141 resp
.capset_max_size
= 0;
143 resp
.hdr
.type
= VIRTIO_GPU_RESP_OK_CAPSET_INFO
;
144 vg_ctrl_response(g
, cmd
, &resp
.hdr
, sizeof(resp
));
148 vg_virgl_get_num_capsets(void)
150 uint32_t capset2_max_ver
, capset2_max_size
;
151 virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2
,
155 return capset2_max_ver
? 2 : 1;
159 virgl_cmd_get_capset(VuGpu
*g
,
160 struct virtio_gpu_ctrl_command
*cmd
)
162 struct virtio_gpu_get_capset gc
;
163 struct virtio_gpu_resp_capset
*resp
;
164 uint32_t max_ver
, max_size
;
168 virgl_renderer_get_cap_set(gc
.capset_id
, &max_ver
,
170 resp
= g_malloc0(sizeof(*resp
) + max_size
);
172 resp
->hdr
.type
= VIRTIO_GPU_RESP_OK_CAPSET
;
173 virgl_renderer_fill_caps(gc
.capset_id
,
175 (void *)resp
->capset_data
);
176 vg_ctrl_response(g
, cmd
, &resp
->hdr
, sizeof(*resp
) + max_size
);
181 virgl_cmd_submit_3d(VuGpu
*g
,
182 struct virtio_gpu_ctrl_command
*cmd
)
184 struct virtio_gpu_cmd_submit cs
;
190 buf
= g_malloc(cs
.size
);
191 s
= iov_to_buf(cmd
->elem
.out_sg
, cmd
->elem
.out_num
,
192 sizeof(cs
), buf
, cs
.size
);
194 g_critical("%s: size mismatch (%zd/%d)", __func__
, s
, cs
.size
);
195 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
199 virgl_renderer_submit_cmd(buf
, cs
.hdr
.ctx_id
, cs
.size
/ 4);
206 virgl_cmd_transfer_to_host_2d(VuGpu
*g
,
207 struct virtio_gpu_ctrl_command
*cmd
)
209 struct virtio_gpu_transfer_to_host_2d t2d
;
210 struct virtio_gpu_box box
;
218 box
.h
= t2d
.r
.height
;
221 virgl_renderer_transfer_write_iov(t2d
.resource_id
,
226 (struct virgl_box
*)&box
,
227 t2d
.offset
, NULL
, 0);
231 virgl_cmd_transfer_to_host_3d(VuGpu
*g
,
232 struct virtio_gpu_ctrl_command
*cmd
)
234 struct virtio_gpu_transfer_host_3d t3d
;
238 virgl_renderer_transfer_write_iov(t3d
.resource_id
,
243 (struct virgl_box
*)&t3d
.box
,
244 t3d
.offset
, NULL
, 0);
248 virgl_cmd_transfer_from_host_3d(VuGpu
*g
,
249 struct virtio_gpu_ctrl_command
*cmd
)
251 struct virtio_gpu_transfer_host_3d tf3d
;
253 VUGPU_FILL_CMD(tf3d
);
255 virgl_renderer_transfer_read_iov(tf3d
.resource_id
,
260 (struct virgl_box
*)&tf3d
.box
,
261 tf3d
.offset
, NULL
, 0);
265 virgl_resource_attach_backing(VuGpu
*g
,
266 struct virtio_gpu_ctrl_command
*cmd
)
268 struct virtio_gpu_resource_attach_backing att_rb
;
269 struct iovec
*res_iovs
;
272 VUGPU_FILL_CMD(att_rb
);
274 ret
= vg_create_mapping_iov(g
, &att_rb
, cmd
, &res_iovs
);
276 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
280 virgl_renderer_resource_attach_iov(att_rb
.resource_id
,
281 res_iovs
, att_rb
.nr_entries
);
285 virgl_resource_detach_backing(VuGpu
*g
,
286 struct virtio_gpu_ctrl_command
*cmd
)
288 struct virtio_gpu_resource_detach_backing detach_rb
;
289 struct iovec
*res_iovs
= NULL
;
292 VUGPU_FILL_CMD(detach_rb
);
294 virgl_renderer_resource_detach_iov(detach_rb
.resource_id
,
297 if (res_iovs
== NULL
|| num_iovs
== 0) {
304 virgl_cmd_set_scanout(VuGpu
*g
,
305 struct virtio_gpu_ctrl_command
*cmd
)
307 struct virtio_gpu_set_scanout ss
;
308 struct virgl_renderer_resource_info info
;
313 if (ss
.scanout_id
>= VIRTIO_GPU_MAX_SCANOUTS
) {
314 g_critical("%s: illegal scanout id specified %d",
315 __func__
, ss
.scanout_id
);
316 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
320 memset(&info
, 0, sizeof(info
));
322 if (ss
.resource_id
&& ss
.r
.width
&& ss
.r
.height
) {
323 ret
= virgl_renderer_resource_get_info(ss
.resource_id
, &info
);
325 g_critical("%s: illegal resource specified %d\n",
326 __func__
, ss
.resource_id
);
327 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
332 if (virgl_renderer_get_fd_for_texture(info
.tex_id
, &fd
) < 0) {
333 g_critical("%s: failed to get fd for texture\n", __func__
);
334 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
338 VhostUserGpuMsg msg
= {
339 .request
= VHOST_USER_GPU_DMABUF_SCANOUT
,
340 .size
= sizeof(VhostUserGpuDMABUFScanout
),
341 .payload
.dmabuf_scanout
.scanout_id
= ss
.scanout_id
,
342 .payload
.dmabuf_scanout
.x
= ss
.r
.x
,
343 .payload
.dmabuf_scanout
.y
= ss
.r
.y
,
344 .payload
.dmabuf_scanout
.width
= ss
.r
.width
,
345 .payload
.dmabuf_scanout
.height
= ss
.r
.height
,
346 .payload
.dmabuf_scanout
.fd_width
= info
.width
,
347 .payload
.dmabuf_scanout
.fd_height
= info
.height
,
348 .payload
.dmabuf_scanout
.fd_stride
= info
.stride
,
349 .payload
.dmabuf_scanout
.fd_flags
= info
.flags
,
350 .payload
.dmabuf_scanout
.fd_drm_fourcc
= info
.drm_fourcc
352 vg_send_msg(g
, &msg
, fd
);
355 VhostUserGpuMsg msg
= {
356 .request
= VHOST_USER_GPU_DMABUF_SCANOUT
,
357 .size
= sizeof(VhostUserGpuDMABUFScanout
),
358 .payload
.dmabuf_scanout
.scanout_id
= ss
.scanout_id
,
360 g_debug("disable scanout");
361 vg_send_msg(g
, &msg
, -1);
363 g
->scanout
[ss
.scanout_id
].resource_id
= ss
.resource_id
;
367 virgl_cmd_resource_flush(VuGpu
*g
,
368 struct virtio_gpu_ctrl_command
*cmd
)
370 struct virtio_gpu_resource_flush rf
;
375 if (!rf
.resource_id
) {
376 g_debug("bad resource id for flush..?");
379 for (i
= 0; i
< VIRTIO_GPU_MAX_SCANOUTS
; i
++) {
380 if (g
->scanout
[i
].resource_id
!= rf
.resource_id
) {
383 VhostUserGpuMsg msg
= {
384 .request
= VHOST_USER_GPU_DMABUF_UPDATE
,
385 .size
= sizeof(VhostUserGpuUpdate
),
386 .payload
.update
.scanout_id
= i
,
387 .payload
.update
.x
= rf
.r
.x
,
388 .payload
.update
.y
= rf
.r
.y
,
389 .payload
.update
.width
= rf
.r
.width
,
390 .payload
.update
.height
= rf
.r
.height
392 vg_send_msg(g
, &msg
, -1);
398 virgl_cmd_ctx_attach_resource(VuGpu
*g
,
399 struct virtio_gpu_ctrl_command
*cmd
)
401 struct virtio_gpu_ctx_resource att_res
;
403 VUGPU_FILL_CMD(att_res
);
405 virgl_renderer_ctx_attach_resource(att_res
.hdr
.ctx_id
, att_res
.resource_id
);
409 virgl_cmd_ctx_detach_resource(VuGpu
*g
,
410 struct virtio_gpu_ctrl_command
*cmd
)
412 struct virtio_gpu_ctx_resource det_res
;
414 VUGPU_FILL_CMD(det_res
);
416 virgl_renderer_ctx_detach_resource(det_res
.hdr
.ctx_id
, det_res
.resource_id
);
419 void vg_virgl_process_cmd(VuGpu
*g
, struct virtio_gpu_ctrl_command
*cmd
)
421 virgl_renderer_force_ctx_0();
422 switch (cmd
->cmd_hdr
.type
) {
423 case VIRTIO_GPU_CMD_CTX_CREATE
:
424 virgl_cmd_context_create(g
, cmd
);
426 case VIRTIO_GPU_CMD_CTX_DESTROY
:
427 virgl_cmd_context_destroy(g
, cmd
);
429 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
:
430 virgl_cmd_create_resource_2d(g
, cmd
);
432 case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D
:
433 virgl_cmd_create_resource_3d(g
, cmd
);
435 case VIRTIO_GPU_CMD_SUBMIT_3D
:
436 virgl_cmd_submit_3d(g
, cmd
);
438 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
:
439 virgl_cmd_transfer_to_host_2d(g
, cmd
);
441 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D
:
442 virgl_cmd_transfer_to_host_3d(g
, cmd
);
444 case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D
:
445 virgl_cmd_transfer_from_host_3d(g
, cmd
);
447 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
:
448 virgl_resource_attach_backing(g
, cmd
);
450 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
:
451 virgl_resource_detach_backing(g
, cmd
);
453 case VIRTIO_GPU_CMD_SET_SCANOUT
:
454 virgl_cmd_set_scanout(g
, cmd
);
456 case VIRTIO_GPU_CMD_RESOURCE_FLUSH
:
457 virgl_cmd_resource_flush(g
, cmd
);
459 case VIRTIO_GPU_CMD_RESOURCE_UNREF
:
460 virgl_cmd_resource_unref(g
, cmd
);
462 case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE
:
463 /* TODO add security */
464 virgl_cmd_ctx_attach_resource(g
, cmd
);
466 case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE
:
467 /* TODO add security */
468 virgl_cmd_ctx_detach_resource(g
, cmd
);
470 case VIRTIO_GPU_CMD_GET_CAPSET_INFO
:
471 virgl_cmd_get_capset_info(g
, cmd
);
473 case VIRTIO_GPU_CMD_GET_CAPSET
:
474 virgl_cmd_get_capset(g
, cmd
);
476 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO
:
477 vg_get_display_info(g
, cmd
);
480 g_debug("TODO handle ctrl %x\n", cmd
->cmd_hdr
.type
);
481 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
485 if (cmd
->state
!= VG_CMD_STATE_NEW
) {
490 g_warning("%s: ctrl 0x%x, error 0x%x\n", __func__
,
491 cmd
->cmd_hdr
.type
, cmd
->error
);
492 vg_ctrl_response_nodata(g
, cmd
, cmd
->error
);
496 if (!(cmd
->cmd_hdr
.flags
& VIRTIO_GPU_FLAG_FENCE
)) {
497 vg_ctrl_response_nodata(g
, cmd
, VIRTIO_GPU_RESP_OK_NODATA
);
501 g_debug("Creating fence id:%" PRId64
" type:%d",
502 cmd
->cmd_hdr
.fence_id
, cmd
->cmd_hdr
.type
);
503 virgl_renderer_create_fence(cmd
->cmd_hdr
.fence_id
, cmd
->cmd_hdr
.type
);
507 virgl_write_fence(void *opaque
, uint32_t fence
)
510 struct virtio_gpu_ctrl_command
*cmd
, *tmp
;
512 QTAILQ_FOREACH_SAFE(cmd
, &g
->fenceq
, next
, tmp
) {
514 * the guest can end up emitting fences out of order
515 * so we should check all fenced cmds not just the first one.
517 if (cmd
->cmd_hdr
.fence_id
> fence
) {
520 g_debug("FENCE %" PRIu64
, cmd
->cmd_hdr
.fence_id
);
521 vg_ctrl_response_nodata(g
, cmd
, VIRTIO_GPU_RESP_OK_NODATA
);
522 QTAILQ_REMOVE(&g
->fenceq
, cmd
, next
);
528 #if defined(VIRGL_RENDERER_CALLBACKS_VERSION) && \
529 VIRGL_RENDERER_CALLBACKS_VERSION >= 2
531 virgl_get_drm_fd(void *opaque
)
535 return g
->drm_rnode_fd
;
539 static struct virgl_renderer_callbacks virgl_cbs
= {
540 #if defined(VIRGL_RENDERER_CALLBACKS_VERSION) && \
541 VIRGL_RENDERER_CALLBACKS_VERSION >= 2
542 .get_drm_fd
= virgl_get_drm_fd
,
547 .write_fence
= virgl_write_fence
,
551 vg_virgl_poll(VuDev
*dev
, int condition
, void *data
)
553 virgl_renderer_poll();
557 vg_virgl_init(VuGpu
*g
)
561 if (g
->drm_rnode_fd
&& virgl_cbs
.version
== 1) {
562 g_warning("virgl will use the default rendernode");
565 ret
= virgl_renderer_init(g
,
566 VIRGL_RENDERER_USE_EGL
|
567 VIRGL_RENDERER_THREAD_SYNC
,
573 ret
= virgl_renderer_get_poll_fd();
576 vug_source_new(&g
->dev
, ret
, G_IO_IN
, vg_virgl_poll
, g
);