4 * Copyright Red Hat, Inc. 2013-2014
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu-common.h"
18 #include "hw/virtio/virtio.h"
19 #include "hw/virtio/virtio-gpu.h"
20 #include "qapi/error.h"
24 #include <virglrenderer.h>
26 static struct virgl_renderer_callbacks virtio_gpu_3d_cbs
;
28 static void virgl_cmd_create_resource_2d(VirtIOGPU
*g
,
29 struct virtio_gpu_ctrl_command
*cmd
)
31 struct virtio_gpu_resource_create_2d c2d
;
32 struct virgl_renderer_resource_create_args args
;
34 VIRTIO_GPU_FILL_CMD(c2d
);
35 trace_virtio_gpu_cmd_res_create_2d(c2d
.resource_id
, c2d
.format
,
36 c2d
.width
, c2d
.height
);
38 args
.handle
= c2d
.resource_id
;
40 args
.format
= c2d
.format
;
42 args
.width
= c2d
.width
;
43 args
.height
= c2d
.height
;
48 args
.flags
= VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP
;
49 virgl_renderer_resource_create(&args
, NULL
, 0);
52 static void virgl_cmd_create_resource_3d(VirtIOGPU
*g
,
53 struct virtio_gpu_ctrl_command
*cmd
)
55 struct virtio_gpu_resource_create_3d c3d
;
56 struct virgl_renderer_resource_create_args args
;
58 VIRTIO_GPU_FILL_CMD(c3d
);
59 trace_virtio_gpu_cmd_res_create_3d(c3d
.resource_id
, c3d
.format
,
60 c3d
.width
, c3d
.height
, c3d
.depth
);
62 args
.handle
= c3d
.resource_id
;
63 args
.target
= c3d
.target
;
64 args
.format
= c3d
.format
;
66 args
.width
= c3d
.width
;
67 args
.height
= c3d
.height
;
68 args
.depth
= c3d
.depth
;
69 args
.array_size
= c3d
.array_size
;
70 args
.last_level
= c3d
.last_level
;
71 args
.nr_samples
= c3d
.nr_samples
;
72 args
.flags
= c3d
.flags
;
73 virgl_renderer_resource_create(&args
, NULL
, 0);
76 static void virgl_cmd_resource_unref(VirtIOGPU
*g
,
77 struct virtio_gpu_ctrl_command
*cmd
)
79 struct virtio_gpu_resource_unref unref
;
80 struct iovec
*res_iovs
= NULL
;
83 VIRTIO_GPU_FILL_CMD(unref
);
84 trace_virtio_gpu_cmd_res_unref(unref
.resource_id
);
86 virgl_renderer_resource_detach_iov(unref
.resource_id
,
89 if (res_iovs
!= NULL
&& num_iovs
!= 0) {
90 virtio_gpu_cleanup_mapping_iov(res_iovs
, num_iovs
);
92 virgl_renderer_resource_unref(unref
.resource_id
);
95 static void virgl_cmd_context_create(VirtIOGPU
*g
,
96 struct virtio_gpu_ctrl_command
*cmd
)
98 struct virtio_gpu_ctx_create cc
;
100 VIRTIO_GPU_FILL_CMD(cc
);
101 trace_virtio_gpu_cmd_ctx_create(cc
.hdr
.ctx_id
,
104 virgl_renderer_context_create(cc
.hdr
.ctx_id
, cc
.nlen
,
108 static void virgl_cmd_context_destroy(VirtIOGPU
*g
,
109 struct virtio_gpu_ctrl_command
*cmd
)
111 struct virtio_gpu_ctx_destroy cd
;
113 VIRTIO_GPU_FILL_CMD(cd
);
114 trace_virtio_gpu_cmd_ctx_destroy(cd
.hdr
.ctx_id
);
116 virgl_renderer_context_destroy(cd
.hdr
.ctx_id
);
119 static void virtio_gpu_rect_update(VirtIOGPU
*g
, int idx
, int x
, int y
,
120 int width
, int height
)
122 if (!g
->scanout
[idx
].con
) {
126 dpy_gl_update(g
->scanout
[idx
].con
, x
, y
, width
, height
);
129 static void virgl_cmd_resource_flush(VirtIOGPU
*g
,
130 struct virtio_gpu_ctrl_command
*cmd
)
132 struct virtio_gpu_resource_flush rf
;
135 VIRTIO_GPU_FILL_CMD(rf
);
136 trace_virtio_gpu_cmd_res_flush(rf
.resource_id
,
137 rf
.r
.width
, rf
.r
.height
, rf
.r
.x
, rf
.r
.y
);
139 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
140 if (g
->scanout
[i
].resource_id
!= rf
.resource_id
) {
143 virtio_gpu_rect_update(g
, i
, rf
.r
.x
, rf
.r
.y
, rf
.r
.width
, rf
.r
.height
);
147 static void virgl_cmd_set_scanout(VirtIOGPU
*g
,
148 struct virtio_gpu_ctrl_command
*cmd
)
150 struct virtio_gpu_set_scanout ss
;
151 struct virgl_renderer_resource_info info
;
154 VIRTIO_GPU_FILL_CMD(ss
);
155 trace_virtio_gpu_cmd_set_scanout(ss
.scanout_id
, ss
.resource_id
,
156 ss
.r
.width
, ss
.r
.height
, ss
.r
.x
, ss
.r
.y
);
158 if (ss
.scanout_id
>= g
->conf
.max_outputs
) {
159 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout id specified %d",
160 __func__
, ss
.scanout_id
);
161 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
166 memset(&info
, 0, sizeof(info
));
168 if (ss
.resource_id
&& ss
.r
.width
&& ss
.r
.height
) {
169 ret
= virgl_renderer_resource_get_info(ss
.resource_id
, &info
);
171 qemu_log_mask(LOG_GUEST_ERROR
,
172 "%s: illegal resource specified %d\n",
173 __func__
, ss
.resource_id
);
174 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
177 qemu_console_resize(g
->scanout
[ss
.scanout_id
].con
,
178 ss
.r
.width
, ss
.r
.height
);
179 virgl_renderer_force_ctx_0();
180 dpy_gl_scanout_texture(g
->scanout
[ss
.scanout_id
].con
, info
.tex_id
,
181 info
.flags
& 1 /* FIXME: Y_0_TOP */,
182 info
.width
, info
.height
,
183 ss
.r
.x
, ss
.r
.y
, ss
.r
.width
, ss
.r
.height
);
185 if (ss
.scanout_id
!= 0) {
186 dpy_gfx_replace_surface(g
->scanout
[ss
.scanout_id
].con
, NULL
);
188 dpy_gl_scanout_disable(g
->scanout
[ss
.scanout_id
].con
);
190 g
->scanout
[ss
.scanout_id
].resource_id
= ss
.resource_id
;
193 static void virgl_cmd_submit_3d(VirtIOGPU
*g
,
194 struct virtio_gpu_ctrl_command
*cmd
)
196 struct virtio_gpu_cmd_submit cs
;
200 VIRTIO_GPU_FILL_CMD(cs
);
201 trace_virtio_gpu_cmd_ctx_submit(cs
.hdr
.ctx_id
, cs
.size
);
203 buf
= g_malloc(cs
.size
);
204 s
= iov_to_buf(cmd
->elem
.out_sg
, cmd
->elem
.out_num
,
205 sizeof(cs
), buf
, cs
.size
);
207 qemu_log_mask(LOG_GUEST_ERROR
, "%s: size mismatch (%zd/%d)",
208 __func__
, s
, cs
.size
);
209 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
213 if (virtio_gpu_stats_enabled(g
->conf
)) {
215 g
->stats
.bytes_3d
+= cs
.size
;
218 virgl_renderer_submit_cmd(buf
, cs
.hdr
.ctx_id
, cs
.size
/ 4);
224 static void virgl_cmd_transfer_to_host_2d(VirtIOGPU
*g
,
225 struct virtio_gpu_ctrl_command
*cmd
)
227 struct virtio_gpu_transfer_to_host_2d t2d
;
228 struct virtio_gpu_box box
;
230 VIRTIO_GPU_FILL_CMD(t2d
);
231 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d
.resource_id
);
237 box
.h
= t2d
.r
.height
;
240 virgl_renderer_transfer_write_iov(t2d
.resource_id
,
245 (struct virgl_box
*)&box
,
246 t2d
.offset
, NULL
, 0);
249 static void virgl_cmd_transfer_to_host_3d(VirtIOGPU
*g
,
250 struct virtio_gpu_ctrl_command
*cmd
)
252 struct virtio_gpu_transfer_host_3d t3d
;
254 VIRTIO_GPU_FILL_CMD(t3d
);
255 trace_virtio_gpu_cmd_res_xfer_toh_3d(t3d
.resource_id
);
257 virgl_renderer_transfer_write_iov(t3d
.resource_id
,
262 (struct virgl_box
*)&t3d
.box
,
263 t3d
.offset
, NULL
, 0);
267 virgl_cmd_transfer_from_host_3d(VirtIOGPU
*g
,
268 struct virtio_gpu_ctrl_command
*cmd
)
270 struct virtio_gpu_transfer_host_3d tf3d
;
272 VIRTIO_GPU_FILL_CMD(tf3d
);
273 trace_virtio_gpu_cmd_res_xfer_fromh_3d(tf3d
.resource_id
);
275 virgl_renderer_transfer_read_iov(tf3d
.resource_id
,
280 (struct virgl_box
*)&tf3d
.box
,
281 tf3d
.offset
, NULL
, 0);
285 static void virgl_resource_attach_backing(VirtIOGPU
*g
,
286 struct virtio_gpu_ctrl_command
*cmd
)
288 struct virtio_gpu_resource_attach_backing att_rb
;
289 struct iovec
*res_iovs
;
292 VIRTIO_GPU_FILL_CMD(att_rb
);
293 trace_virtio_gpu_cmd_res_back_attach(att_rb
.resource_id
);
295 ret
= virtio_gpu_create_mapping_iov(&att_rb
, cmd
, NULL
, &res_iovs
);
297 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
301 ret
= virgl_renderer_resource_attach_iov(att_rb
.resource_id
,
302 res_iovs
, att_rb
.nr_entries
);
305 virtio_gpu_cleanup_mapping_iov(res_iovs
, att_rb
.nr_entries
);
308 static void virgl_resource_detach_backing(VirtIOGPU
*g
,
309 struct virtio_gpu_ctrl_command
*cmd
)
311 struct virtio_gpu_resource_detach_backing detach_rb
;
312 struct iovec
*res_iovs
= NULL
;
315 VIRTIO_GPU_FILL_CMD(detach_rb
);
316 trace_virtio_gpu_cmd_res_back_detach(detach_rb
.resource_id
);
318 virgl_renderer_resource_detach_iov(detach_rb
.resource_id
,
321 if (res_iovs
== NULL
|| num_iovs
== 0) {
324 virtio_gpu_cleanup_mapping_iov(res_iovs
, num_iovs
);
328 static void virgl_cmd_ctx_attach_resource(VirtIOGPU
*g
,
329 struct virtio_gpu_ctrl_command
*cmd
)
331 struct virtio_gpu_ctx_resource att_res
;
333 VIRTIO_GPU_FILL_CMD(att_res
);
334 trace_virtio_gpu_cmd_ctx_res_attach(att_res
.hdr
.ctx_id
,
335 att_res
.resource_id
);
337 virgl_renderer_ctx_attach_resource(att_res
.hdr
.ctx_id
, att_res
.resource_id
);
340 static void virgl_cmd_ctx_detach_resource(VirtIOGPU
*g
,
341 struct virtio_gpu_ctrl_command
*cmd
)
343 struct virtio_gpu_ctx_resource det_res
;
345 VIRTIO_GPU_FILL_CMD(det_res
);
346 trace_virtio_gpu_cmd_ctx_res_detach(det_res
.hdr
.ctx_id
,
347 det_res
.resource_id
);
349 virgl_renderer_ctx_detach_resource(det_res
.hdr
.ctx_id
, det_res
.resource_id
);
352 static void virgl_cmd_get_capset_info(VirtIOGPU
*g
,
353 struct virtio_gpu_ctrl_command
*cmd
)
355 struct virtio_gpu_get_capset_info info
;
356 struct virtio_gpu_resp_capset_info resp
;
358 VIRTIO_GPU_FILL_CMD(info
);
360 memset(&resp
, 0, sizeof(resp
));
361 if (info
.capset_index
== 0) {
362 resp
.capset_id
= VIRTIO_GPU_CAPSET_VIRGL
;
363 virgl_renderer_get_cap_set(resp
.capset_id
,
364 &resp
.capset_max_version
,
365 &resp
.capset_max_size
);
367 resp
.capset_max_version
= 0;
368 resp
.capset_max_size
= 0;
370 resp
.hdr
.type
= VIRTIO_GPU_RESP_OK_CAPSET_INFO
;
371 virtio_gpu_ctrl_response(g
, cmd
, &resp
.hdr
, sizeof(resp
));
374 static void virgl_cmd_get_capset(VirtIOGPU
*g
,
375 struct virtio_gpu_ctrl_command
*cmd
)
377 struct virtio_gpu_get_capset gc
;
378 struct virtio_gpu_resp_capset
*resp
;
379 uint32_t max_ver
, max_size
;
380 VIRTIO_GPU_FILL_CMD(gc
);
382 virgl_renderer_get_cap_set(gc
.capset_id
, &max_ver
,
385 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
389 resp
= g_malloc0(sizeof(*resp
) + max_size
);
390 resp
->hdr
.type
= VIRTIO_GPU_RESP_OK_CAPSET
;
391 virgl_renderer_fill_caps(gc
.capset_id
,
393 (void *)resp
->capset_data
);
394 virtio_gpu_ctrl_response(g
, cmd
, &resp
->hdr
, sizeof(*resp
) + max_size
);
398 void virtio_gpu_virgl_process_cmd(VirtIOGPU
*g
,
399 struct virtio_gpu_ctrl_command
*cmd
)
401 VIRTIO_GPU_FILL_CMD(cmd
->cmd_hdr
);
403 cmd
->waiting
= g
->renderer_blocked
;
408 virgl_renderer_force_ctx_0();
409 switch (cmd
->cmd_hdr
.type
) {
410 case VIRTIO_GPU_CMD_CTX_CREATE
:
411 virgl_cmd_context_create(g
, cmd
);
413 case VIRTIO_GPU_CMD_CTX_DESTROY
:
414 virgl_cmd_context_destroy(g
, cmd
);
416 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
:
417 virgl_cmd_create_resource_2d(g
, cmd
);
419 case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D
:
420 virgl_cmd_create_resource_3d(g
, cmd
);
422 case VIRTIO_GPU_CMD_SUBMIT_3D
:
423 virgl_cmd_submit_3d(g
, cmd
);
425 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
:
426 virgl_cmd_transfer_to_host_2d(g
, cmd
);
428 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D
:
429 virgl_cmd_transfer_to_host_3d(g
, cmd
);
431 case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D
:
432 virgl_cmd_transfer_from_host_3d(g
, cmd
);
434 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
:
435 virgl_resource_attach_backing(g
, cmd
);
437 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
:
438 virgl_resource_detach_backing(g
, cmd
);
440 case VIRTIO_GPU_CMD_SET_SCANOUT
:
441 virgl_cmd_set_scanout(g
, cmd
);
443 case VIRTIO_GPU_CMD_RESOURCE_FLUSH
:
444 virgl_cmd_resource_flush(g
, cmd
);
446 case VIRTIO_GPU_CMD_RESOURCE_UNREF
:
447 virgl_cmd_resource_unref(g
, cmd
);
449 case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE
:
450 /* TODO add security */
451 virgl_cmd_ctx_attach_resource(g
, cmd
);
453 case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE
:
454 /* TODO add security */
455 virgl_cmd_ctx_detach_resource(g
, cmd
);
457 case VIRTIO_GPU_CMD_GET_CAPSET_INFO
:
458 virgl_cmd_get_capset_info(g
, cmd
);
460 case VIRTIO_GPU_CMD_GET_CAPSET
:
461 virgl_cmd_get_capset(g
, cmd
);
464 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO
:
465 virtio_gpu_get_display_info(g
, cmd
);
468 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
476 fprintf(stderr
, "%s: ctrl 0x%x, error 0x%x\n", __func__
,
477 cmd
->cmd_hdr
.type
, cmd
->error
);
478 virtio_gpu_ctrl_response_nodata(g
, cmd
, cmd
->error
);
481 if (!(cmd
->cmd_hdr
.flags
& VIRTIO_GPU_FLAG_FENCE
)) {
482 virtio_gpu_ctrl_response_nodata(g
, cmd
, VIRTIO_GPU_RESP_OK_NODATA
);
486 trace_virtio_gpu_fence_ctrl(cmd
->cmd_hdr
.fence_id
, cmd
->cmd_hdr
.type
);
487 virgl_renderer_create_fence(cmd
->cmd_hdr
.fence_id
, cmd
->cmd_hdr
.type
);
490 static void virgl_write_fence(void *opaque
, uint32_t fence
)
492 VirtIOGPU
*g
= opaque
;
493 struct virtio_gpu_ctrl_command
*cmd
, *tmp
;
495 QTAILQ_FOREACH_SAFE(cmd
, &g
->fenceq
, next
, tmp
) {
497 * the guest can end up emitting fences out of order
498 * so we should check all fenced cmds not just the first one.
500 if (cmd
->cmd_hdr
.fence_id
> fence
) {
503 trace_virtio_gpu_fence_resp(cmd
->cmd_hdr
.fence_id
);
504 virtio_gpu_ctrl_response_nodata(g
, cmd
, VIRTIO_GPU_RESP_OK_NODATA
);
505 QTAILQ_REMOVE(&g
->fenceq
, cmd
, next
);
508 if (virtio_gpu_stats_enabled(g
->conf
)) {
509 fprintf(stderr
, "inflight: %3d (-)\r", g
->inflight
);
514 static virgl_renderer_gl_context
515 virgl_create_context(void *opaque
, int scanout_idx
,
516 struct virgl_renderer_gl_ctx_param
*params
)
518 VirtIOGPU
*g
= opaque
;
520 QEMUGLParams qparams
;
522 qparams
.major_ver
= params
->major_ver
;
523 qparams
.minor_ver
= params
->minor_ver
;
525 ctx
= dpy_gl_ctx_create(g
->scanout
[scanout_idx
].con
, &qparams
);
526 return (virgl_renderer_gl_context
)ctx
;
529 static void virgl_destroy_context(void *opaque
, virgl_renderer_gl_context ctx
)
531 VirtIOGPU
*g
= opaque
;
532 QEMUGLContext qctx
= (QEMUGLContext
)ctx
;
534 dpy_gl_ctx_destroy(g
->scanout
[0].con
, qctx
);
537 static int virgl_make_context_current(void *opaque
, int scanout_idx
,
538 virgl_renderer_gl_context ctx
)
540 VirtIOGPU
*g
= opaque
;
541 QEMUGLContext qctx
= (QEMUGLContext
)ctx
;
543 return dpy_gl_ctx_make_current(g
->scanout
[scanout_idx
].con
, qctx
);
546 static struct virgl_renderer_callbacks virtio_gpu_3d_cbs
= {
548 .write_fence
= virgl_write_fence
,
549 .create_gl_context
= virgl_create_context
,
550 .destroy_gl_context
= virgl_destroy_context
,
551 .make_current
= virgl_make_context_current
,
554 static void virtio_gpu_print_stats(void *opaque
)
556 VirtIOGPU
*g
= opaque
;
558 if (g
->stats
.requests
) {
559 fprintf(stderr
, "stats: vq req %4d, %3d -- 3D %4d (%5d)\n",
561 g
->stats
.max_inflight
,
564 g
->stats
.requests
= 0;
565 g
->stats
.max_inflight
= 0;
567 g
->stats
.bytes_3d
= 0;
569 fprintf(stderr
, "stats: idle\r");
571 timer_mod(g
->print_stats
, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + 1000);
574 static void virtio_gpu_fence_poll(void *opaque
)
576 VirtIOGPU
*g
= opaque
;
578 virgl_renderer_poll();
579 virtio_gpu_process_cmdq(g
);
580 if (!QTAILQ_EMPTY(&g
->cmdq
) || !QTAILQ_EMPTY(&g
->fenceq
)) {
581 timer_mod(g
->fence_poll
, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + 10);
585 void virtio_gpu_virgl_fence_poll(VirtIOGPU
*g
)
587 virtio_gpu_fence_poll(g
);
590 void virtio_gpu_virgl_reset(VirtIOGPU
*g
)
594 /* virgl_renderer_reset() ??? */
595 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
597 dpy_gfx_replace_surface(g
->scanout
[i
].con
, NULL
);
599 dpy_gl_scanout_disable(g
->scanout
[i
].con
);
603 void virtio_gpu_gl_block(void *opaque
, bool block
)
605 VirtIOGPU
*g
= opaque
;
608 g
->renderer_blocked
++;
610 g
->renderer_blocked
--;
612 assert(g
->renderer_blocked
>= 0);
614 if (g
->renderer_blocked
== 0) {
615 virtio_gpu_process_cmdq(g
);
619 int virtio_gpu_virgl_init(VirtIOGPU
*g
)
623 ret
= virgl_renderer_init(g
, 0, &virtio_gpu_3d_cbs
);
628 g
->fence_poll
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
629 virtio_gpu_fence_poll
, g
);
631 if (virtio_gpu_stats_enabled(g
->conf
)) {
632 g
->print_stats
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
633 virtio_gpu_print_stats
, g
);
634 timer_mod(g
->print_stats
, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + 1000);
639 #endif /* CONFIG_VIRGL */