4 * Copyright Red Hat, Inc. 2013-2014
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu-common.h"
18 #include "hw/virtio/virtio.h"
19 #include "hw/virtio/virtio-gpu.h"
23 #include "virglrenderer.h"
25 static struct virgl_renderer_callbacks virtio_gpu_3d_cbs
;
27 static void virgl_cmd_create_resource_2d(VirtIOGPU
*g
,
28 struct virtio_gpu_ctrl_command
*cmd
)
30 struct virtio_gpu_resource_create_2d c2d
;
31 struct virgl_renderer_resource_create_args args
;
33 VIRTIO_GPU_FILL_CMD(c2d
);
34 trace_virtio_gpu_cmd_res_create_2d(c2d
.resource_id
, c2d
.format
,
35 c2d
.width
, c2d
.height
);
37 args
.handle
= c2d
.resource_id
;
39 args
.format
= c2d
.format
;
41 args
.width
= c2d
.width
;
42 args
.height
= c2d
.height
;
47 args
.flags
= VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP
;
48 virgl_renderer_resource_create(&args
, NULL
, 0);
51 static void virgl_cmd_create_resource_3d(VirtIOGPU
*g
,
52 struct virtio_gpu_ctrl_command
*cmd
)
54 struct virtio_gpu_resource_create_3d c3d
;
55 struct virgl_renderer_resource_create_args args
;
57 VIRTIO_GPU_FILL_CMD(c3d
);
58 trace_virtio_gpu_cmd_res_create_3d(c3d
.resource_id
, c3d
.format
,
59 c3d
.width
, c3d
.height
, c3d
.depth
);
61 args
.handle
= c3d
.resource_id
;
62 args
.target
= c3d
.target
;
63 args
.format
= c3d
.format
;
65 args
.width
= c3d
.width
;
66 args
.height
= c3d
.height
;
67 args
.depth
= c3d
.depth
;
68 args
.array_size
= c3d
.array_size
;
69 args
.last_level
= c3d
.last_level
;
70 args
.nr_samples
= c3d
.nr_samples
;
71 args
.flags
= c3d
.flags
;
72 virgl_renderer_resource_create(&args
, NULL
, 0);
75 static void virgl_cmd_resource_unref(VirtIOGPU
*g
,
76 struct virtio_gpu_ctrl_command
*cmd
)
78 struct virtio_gpu_resource_unref unref
;
80 VIRTIO_GPU_FILL_CMD(unref
);
81 trace_virtio_gpu_cmd_res_unref(unref
.resource_id
);
83 virgl_renderer_resource_unref(unref
.resource_id
);
86 static void virgl_cmd_context_create(VirtIOGPU
*g
,
87 struct virtio_gpu_ctrl_command
*cmd
)
89 struct virtio_gpu_ctx_create cc
;
91 VIRTIO_GPU_FILL_CMD(cc
);
92 trace_virtio_gpu_cmd_ctx_create(cc
.hdr
.ctx_id
,
95 virgl_renderer_context_create(cc
.hdr
.ctx_id
, cc
.nlen
,
99 static void virgl_cmd_context_destroy(VirtIOGPU
*g
,
100 struct virtio_gpu_ctrl_command
*cmd
)
102 struct virtio_gpu_ctx_destroy cd
;
104 VIRTIO_GPU_FILL_CMD(cd
);
105 trace_virtio_gpu_cmd_ctx_destroy(cd
.hdr
.ctx_id
);
107 virgl_renderer_context_destroy(cd
.hdr
.ctx_id
);
110 static void virtio_gpu_rect_update(VirtIOGPU
*g
, int idx
, int x
, int y
,
111 int width
, int height
)
113 if (!g
->scanout
[idx
].con
) {
117 dpy_gl_update(g
->scanout
[idx
].con
, x
, y
, width
, height
);
120 static void virgl_cmd_resource_flush(VirtIOGPU
*g
,
121 struct virtio_gpu_ctrl_command
*cmd
)
123 struct virtio_gpu_resource_flush rf
;
126 VIRTIO_GPU_FILL_CMD(rf
);
127 trace_virtio_gpu_cmd_res_flush(rf
.resource_id
,
128 rf
.r
.width
, rf
.r
.height
, rf
.r
.x
, rf
.r
.y
);
130 for (i
= 0; i
< VIRTIO_GPU_MAX_SCANOUT
; i
++) {
131 if (g
->scanout
[i
].resource_id
!= rf
.resource_id
) {
134 virtio_gpu_rect_update(g
, i
, rf
.r
.x
, rf
.r
.y
, rf
.r
.width
, rf
.r
.height
);
138 static void virgl_cmd_set_scanout(VirtIOGPU
*g
,
139 struct virtio_gpu_ctrl_command
*cmd
)
141 struct virtio_gpu_set_scanout ss
;
142 struct virgl_renderer_resource_info info
;
145 VIRTIO_GPU_FILL_CMD(ss
);
146 trace_virtio_gpu_cmd_set_scanout(ss
.scanout_id
, ss
.resource_id
,
147 ss
.r
.width
, ss
.r
.height
, ss
.r
.x
, ss
.r
.y
);
149 if (ss
.scanout_id
>= VIRTIO_GPU_MAX_SCANOUT
) {
150 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout id specified %d",
151 __func__
, ss
.scanout_id
);
152 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
157 memset(&info
, 0, sizeof(info
));
159 if (ss
.resource_id
&& ss
.r
.width
&& ss
.r
.height
) {
160 ret
= virgl_renderer_resource_get_info(ss
.resource_id
, &info
);
162 qemu_log_mask(LOG_GUEST_ERROR
,
163 "%s: illegal resource specified %d\n",
164 __func__
, ss
.resource_id
);
165 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
168 qemu_console_resize(g
->scanout
[ss
.scanout_id
].con
,
169 ss
.r
.width
, ss
.r
.height
);
170 virgl_renderer_force_ctx_0();
171 dpy_gl_scanout(g
->scanout
[ss
.scanout_id
].con
, info
.tex_id
,
172 info
.flags
& 1 /* FIXME: Y_0_TOP */,
173 ss
.r
.x
, ss
.r
.y
, ss
.r
.width
, ss
.r
.height
);
175 if (ss
.scanout_id
!= 0) {
176 dpy_gfx_replace_surface(g
->scanout
[ss
.scanout_id
].con
, NULL
);
178 dpy_gl_scanout(g
->scanout
[ss
.scanout_id
].con
, 0, false,
181 g
->scanout
[ss
.scanout_id
].resource_id
= ss
.resource_id
;
184 static void virgl_cmd_submit_3d(VirtIOGPU
*g
,
185 struct virtio_gpu_ctrl_command
*cmd
)
187 struct virtio_gpu_cmd_submit cs
;
191 VIRTIO_GPU_FILL_CMD(cs
);
192 trace_virtio_gpu_cmd_ctx_submit(cs
.hdr
.ctx_id
, cs
.size
);
194 buf
= g_malloc(cs
.size
);
195 s
= iov_to_buf(cmd
->elem
.out_sg
, cmd
->elem
.out_num
,
196 sizeof(cs
), buf
, cs
.size
);
198 qemu_log_mask(LOG_GUEST_ERROR
, "%s: size mismatch (%zd/%d)",
199 __func__
, s
, cs
.size
);
200 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
204 if (virtio_gpu_stats_enabled(g
->conf
)) {
206 g
->stats
.bytes_3d
+= cs
.size
;
209 virgl_renderer_submit_cmd(buf
, cs
.hdr
.ctx_id
, cs
.size
/ 4);
215 static void virgl_cmd_transfer_to_host_2d(VirtIOGPU
*g
,
216 struct virtio_gpu_ctrl_command
*cmd
)
218 struct virtio_gpu_transfer_to_host_2d t2d
;
219 struct virtio_gpu_box box
;
221 VIRTIO_GPU_FILL_CMD(t2d
);
222 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d
.resource_id
);
228 box
.h
= t2d
.r
.height
;
231 virgl_renderer_transfer_write_iov(t2d
.resource_id
,
236 (struct virgl_box
*)&box
,
237 t2d
.offset
, NULL
, 0);
240 static void virgl_cmd_transfer_to_host_3d(VirtIOGPU
*g
,
241 struct virtio_gpu_ctrl_command
*cmd
)
243 struct virtio_gpu_transfer_host_3d t3d
;
245 VIRTIO_GPU_FILL_CMD(t3d
);
246 trace_virtio_gpu_cmd_res_xfer_toh_3d(t3d
.resource_id
);
248 virgl_renderer_transfer_write_iov(t3d
.resource_id
,
253 (struct virgl_box
*)&t3d
.box
,
254 t3d
.offset
, NULL
, 0);
258 virgl_cmd_transfer_from_host_3d(VirtIOGPU
*g
,
259 struct virtio_gpu_ctrl_command
*cmd
)
261 struct virtio_gpu_transfer_host_3d tf3d
;
263 VIRTIO_GPU_FILL_CMD(tf3d
);
264 trace_virtio_gpu_cmd_res_xfer_fromh_3d(tf3d
.resource_id
);
266 virgl_renderer_transfer_read_iov(tf3d
.resource_id
,
271 (struct virgl_box
*)&tf3d
.box
,
272 tf3d
.offset
, NULL
, 0);
276 static void virgl_resource_attach_backing(VirtIOGPU
*g
,
277 struct virtio_gpu_ctrl_command
*cmd
)
279 struct virtio_gpu_resource_attach_backing att_rb
;
280 struct iovec
*res_iovs
;
283 VIRTIO_GPU_FILL_CMD(att_rb
);
284 trace_virtio_gpu_cmd_res_back_attach(att_rb
.resource_id
);
286 ret
= virtio_gpu_create_mapping_iov(&att_rb
, cmd
, &res_iovs
);
288 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
292 virgl_renderer_resource_attach_iov(att_rb
.resource_id
,
293 res_iovs
, att_rb
.nr_entries
);
296 static void virgl_resource_detach_backing(VirtIOGPU
*g
,
297 struct virtio_gpu_ctrl_command
*cmd
)
299 struct virtio_gpu_resource_detach_backing detach_rb
;
300 struct iovec
*res_iovs
= NULL
;
303 VIRTIO_GPU_FILL_CMD(detach_rb
);
304 trace_virtio_gpu_cmd_res_back_detach(detach_rb
.resource_id
);
306 virgl_renderer_resource_detach_iov(detach_rb
.resource_id
,
309 if (res_iovs
== NULL
|| num_iovs
== 0) {
312 virtio_gpu_cleanup_mapping_iov(res_iovs
, num_iovs
);
316 static void virgl_cmd_ctx_attach_resource(VirtIOGPU
*g
,
317 struct virtio_gpu_ctrl_command
*cmd
)
319 struct virtio_gpu_ctx_resource att_res
;
321 VIRTIO_GPU_FILL_CMD(att_res
);
322 trace_virtio_gpu_cmd_ctx_res_attach(att_res
.hdr
.ctx_id
,
323 att_res
.resource_id
);
325 virgl_renderer_ctx_attach_resource(att_res
.hdr
.ctx_id
, att_res
.resource_id
);
328 static void virgl_cmd_ctx_detach_resource(VirtIOGPU
*g
,
329 struct virtio_gpu_ctrl_command
*cmd
)
331 struct virtio_gpu_ctx_resource det_res
;
333 VIRTIO_GPU_FILL_CMD(det_res
);
334 trace_virtio_gpu_cmd_ctx_res_detach(det_res
.hdr
.ctx_id
,
335 det_res
.resource_id
);
337 virgl_renderer_ctx_detach_resource(det_res
.hdr
.ctx_id
, det_res
.resource_id
);
340 static void virgl_cmd_get_capset_info(VirtIOGPU
*g
,
341 struct virtio_gpu_ctrl_command
*cmd
)
343 struct virtio_gpu_get_capset_info info
;
344 struct virtio_gpu_resp_capset_info resp
;
346 VIRTIO_GPU_FILL_CMD(info
);
348 if (info
.capset_index
== 0) {
349 resp
.capset_id
= VIRTIO_GPU_CAPSET_VIRGL
;
350 virgl_renderer_get_cap_set(resp
.capset_id
,
351 &resp
.capset_max_version
,
352 &resp
.capset_max_size
);
354 resp
.capset_max_version
= 0;
355 resp
.capset_max_size
= 0;
357 resp
.hdr
.type
= VIRTIO_GPU_RESP_OK_CAPSET_INFO
;
358 virtio_gpu_ctrl_response(g
, cmd
, &resp
.hdr
, sizeof(resp
));
361 static void virgl_cmd_get_capset(VirtIOGPU
*g
,
362 struct virtio_gpu_ctrl_command
*cmd
)
364 struct virtio_gpu_get_capset gc
;
365 struct virtio_gpu_resp_capset
*resp
;
366 uint32_t max_ver
, max_size
;
367 VIRTIO_GPU_FILL_CMD(gc
);
369 virgl_renderer_get_cap_set(gc
.capset_id
, &max_ver
,
371 resp
= g_malloc(sizeof(*resp
) + max_size
);
373 resp
->hdr
.type
= VIRTIO_GPU_RESP_OK_CAPSET
;
374 virgl_renderer_fill_caps(gc
.capset_id
,
376 (void *)resp
->capset_data
);
377 virtio_gpu_ctrl_response(g
, cmd
, &resp
->hdr
, sizeof(*resp
) + max_size
);
381 void virtio_gpu_virgl_process_cmd(VirtIOGPU
*g
,
382 struct virtio_gpu_ctrl_command
*cmd
)
384 VIRTIO_GPU_FILL_CMD(cmd
->cmd_hdr
);
386 cmd
->waiting
= g
->renderer_blocked
;
391 virgl_renderer_force_ctx_0();
392 switch (cmd
->cmd_hdr
.type
) {
393 case VIRTIO_GPU_CMD_CTX_CREATE
:
394 virgl_cmd_context_create(g
, cmd
);
396 case VIRTIO_GPU_CMD_CTX_DESTROY
:
397 virgl_cmd_context_destroy(g
, cmd
);
399 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
:
400 virgl_cmd_create_resource_2d(g
, cmd
);
402 case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D
:
403 virgl_cmd_create_resource_3d(g
, cmd
);
405 case VIRTIO_GPU_CMD_SUBMIT_3D
:
406 virgl_cmd_submit_3d(g
, cmd
);
408 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
:
409 virgl_cmd_transfer_to_host_2d(g
, cmd
);
411 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D
:
412 virgl_cmd_transfer_to_host_3d(g
, cmd
);
414 case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D
:
415 virgl_cmd_transfer_from_host_3d(g
, cmd
);
417 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
:
418 virgl_resource_attach_backing(g
, cmd
);
420 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
:
421 virgl_resource_detach_backing(g
, cmd
);
423 case VIRTIO_GPU_CMD_SET_SCANOUT
:
424 virgl_cmd_set_scanout(g
, cmd
);
426 case VIRTIO_GPU_CMD_RESOURCE_FLUSH
:
427 virgl_cmd_resource_flush(g
, cmd
);
429 case VIRTIO_GPU_CMD_RESOURCE_UNREF
:
430 virgl_cmd_resource_unref(g
, cmd
);
432 case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE
:
433 /* TODO add security */
434 virgl_cmd_ctx_attach_resource(g
, cmd
);
436 case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE
:
437 /* TODO add security */
438 virgl_cmd_ctx_detach_resource(g
, cmd
);
440 case VIRTIO_GPU_CMD_GET_CAPSET_INFO
:
441 virgl_cmd_get_capset_info(g
, cmd
);
443 case VIRTIO_GPU_CMD_GET_CAPSET
:
444 virgl_cmd_get_capset(g
, cmd
);
447 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO
:
448 virtio_gpu_get_display_info(g
, cmd
);
451 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
459 fprintf(stderr
, "%s: ctrl 0x%x, error 0x%x\n", __func__
,
460 cmd
->cmd_hdr
.type
, cmd
->error
);
461 virtio_gpu_ctrl_response_nodata(g
, cmd
, cmd
->error
);
464 if (!(cmd
->cmd_hdr
.flags
& VIRTIO_GPU_FLAG_FENCE
)) {
465 virtio_gpu_ctrl_response_nodata(g
, cmd
, VIRTIO_GPU_RESP_OK_NODATA
);
469 trace_virtio_gpu_fence_ctrl(cmd
->cmd_hdr
.fence_id
, cmd
->cmd_hdr
.type
);
470 virgl_renderer_create_fence(cmd
->cmd_hdr
.fence_id
, cmd
->cmd_hdr
.type
);
473 static void virgl_write_fence(void *opaque
, uint32_t fence
)
475 VirtIOGPU
*g
= opaque
;
476 struct virtio_gpu_ctrl_command
*cmd
, *tmp
;
478 QTAILQ_FOREACH_SAFE(cmd
, &g
->fenceq
, next
, tmp
) {
480 * the guest can end up emitting fences out of order
481 * so we should check all fenced cmds not just the first one.
483 if (cmd
->cmd_hdr
.fence_id
> fence
) {
486 trace_virtio_gpu_fence_resp(cmd
->cmd_hdr
.fence_id
);
487 virtio_gpu_ctrl_response_nodata(g
, cmd
, VIRTIO_GPU_RESP_OK_NODATA
);
488 QTAILQ_REMOVE(&g
->fenceq
, cmd
, next
);
491 if (virtio_gpu_stats_enabled(g
->conf
)) {
492 fprintf(stderr
, "inflight: %3d (-)\r", g
->inflight
);
497 static virgl_renderer_gl_context
498 virgl_create_context(void *opaque
, int scanout_idx
,
499 struct virgl_renderer_gl_ctx_param
*params
)
501 VirtIOGPU
*g
= opaque
;
503 QEMUGLParams qparams
;
505 qparams
.major_ver
= params
->major_ver
;
506 qparams
.minor_ver
= params
->minor_ver
;
508 ctx
= dpy_gl_ctx_create(g
->scanout
[scanout_idx
].con
, &qparams
);
509 return (virgl_renderer_gl_context
)ctx
;
512 static void virgl_destroy_context(void *opaque
, virgl_renderer_gl_context ctx
)
514 VirtIOGPU
*g
= opaque
;
515 QEMUGLContext qctx
= (QEMUGLContext
)ctx
;
517 dpy_gl_ctx_destroy(g
->scanout
[0].con
, qctx
);
520 static int virgl_make_context_current(void *opaque
, int scanout_idx
,
521 virgl_renderer_gl_context ctx
)
523 VirtIOGPU
*g
= opaque
;
524 QEMUGLContext qctx
= (QEMUGLContext
)ctx
;
526 return dpy_gl_ctx_make_current(g
->scanout
[scanout_idx
].con
, qctx
);
529 static struct virgl_renderer_callbacks virtio_gpu_3d_cbs
= {
531 .write_fence
= virgl_write_fence
,
532 .create_gl_context
= virgl_create_context
,
533 .destroy_gl_context
= virgl_destroy_context
,
534 .make_current
= virgl_make_context_current
,
537 static void virtio_gpu_print_stats(void *opaque
)
539 VirtIOGPU
*g
= opaque
;
541 if (g
->stats
.requests
) {
542 fprintf(stderr
, "stats: vq req %4d, %3d -- 3D %4d (%5d)\n",
544 g
->stats
.max_inflight
,
547 g
->stats
.requests
= 0;
548 g
->stats
.max_inflight
= 0;
550 g
->stats
.bytes_3d
= 0;
552 fprintf(stderr
, "stats: idle\r");
554 timer_mod(g
->print_stats
, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + 1000);
557 static void virtio_gpu_fence_poll(void *opaque
)
559 VirtIOGPU
*g
= opaque
;
561 virgl_renderer_poll();
562 virtio_gpu_process_cmdq(g
);
563 if (!QTAILQ_EMPTY(&g
->cmdq
) || !QTAILQ_EMPTY(&g
->fenceq
)) {
564 timer_mod(g
->fence_poll
, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + 10);
568 void virtio_gpu_virgl_fence_poll(VirtIOGPU
*g
)
570 virtio_gpu_fence_poll(g
);
573 void virtio_gpu_virgl_reset(VirtIOGPU
*g
)
577 /* virgl_renderer_reset() ??? */
578 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
580 dpy_gfx_replace_surface(g
->scanout
[i
].con
, NULL
);
582 dpy_gl_scanout(g
->scanout
[i
].con
, 0, false, 0, 0, 0, 0);
586 int virtio_gpu_virgl_init(VirtIOGPU
*g
)
590 ret
= virgl_renderer_init(g
, 0, &virtio_gpu_3d_cbs
);
595 g
->fence_poll
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
596 virtio_gpu_fence_poll
, g
);
598 if (virtio_gpu_stats_enabled(g
->conf
)) {
599 g
->print_stats
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
600 virtio_gpu_print_stats
, g
);
601 timer_mod(g
->print_stats
, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + 1000);
606 #endif /* CONFIG_VIRGL */