4 * Copyright Red Hat, Inc. 2013-2014
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu-common.h"
18 #include "hw/virtio/virtio.h"
19 #include "hw/virtio/virtio-gpu.h"
23 #include <virglrenderer.h>
25 static struct virgl_renderer_callbacks virtio_gpu_3d_cbs
;
27 static void virgl_cmd_create_resource_2d(VirtIOGPU
*g
,
28 struct virtio_gpu_ctrl_command
*cmd
)
30 struct virtio_gpu_resource_create_2d c2d
;
31 struct virgl_renderer_resource_create_args args
;
33 VIRTIO_GPU_FILL_CMD(c2d
);
34 trace_virtio_gpu_cmd_res_create_2d(c2d
.resource_id
, c2d
.format
,
35 c2d
.width
, c2d
.height
);
37 args
.handle
= c2d
.resource_id
;
39 args
.format
= c2d
.format
;
41 args
.width
= c2d
.width
;
42 args
.height
= c2d
.height
;
47 args
.flags
= VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP
;
48 virgl_renderer_resource_create(&args
, NULL
, 0);
51 static void virgl_cmd_create_resource_3d(VirtIOGPU
*g
,
52 struct virtio_gpu_ctrl_command
*cmd
)
54 struct virtio_gpu_resource_create_3d c3d
;
55 struct virgl_renderer_resource_create_args args
;
57 VIRTIO_GPU_FILL_CMD(c3d
);
58 trace_virtio_gpu_cmd_res_create_3d(c3d
.resource_id
, c3d
.format
,
59 c3d
.width
, c3d
.height
, c3d
.depth
);
61 args
.handle
= c3d
.resource_id
;
62 args
.target
= c3d
.target
;
63 args
.format
= c3d
.format
;
65 args
.width
= c3d
.width
;
66 args
.height
= c3d
.height
;
67 args
.depth
= c3d
.depth
;
68 args
.array_size
= c3d
.array_size
;
69 args
.last_level
= c3d
.last_level
;
70 args
.nr_samples
= c3d
.nr_samples
;
71 args
.flags
= c3d
.flags
;
72 virgl_renderer_resource_create(&args
, NULL
, 0);
75 static void virgl_cmd_resource_unref(VirtIOGPU
*g
,
76 struct virtio_gpu_ctrl_command
*cmd
)
78 struct virtio_gpu_resource_unref unref
;
79 struct iovec
*res_iovs
= NULL
;
82 VIRTIO_GPU_FILL_CMD(unref
);
83 trace_virtio_gpu_cmd_res_unref(unref
.resource_id
);
85 virgl_renderer_resource_detach_iov(unref
.resource_id
,
88 if (res_iovs
!= NULL
&& num_iovs
!= 0) {
89 virtio_gpu_cleanup_mapping_iov(g
, res_iovs
, num_iovs
);
91 virgl_renderer_resource_unref(unref
.resource_id
);
94 static void virgl_cmd_context_create(VirtIOGPU
*g
,
95 struct virtio_gpu_ctrl_command
*cmd
)
97 struct virtio_gpu_ctx_create cc
;
99 VIRTIO_GPU_FILL_CMD(cc
);
100 trace_virtio_gpu_cmd_ctx_create(cc
.hdr
.ctx_id
,
103 virgl_renderer_context_create(cc
.hdr
.ctx_id
, cc
.nlen
,
107 static void virgl_cmd_context_destroy(VirtIOGPU
*g
,
108 struct virtio_gpu_ctrl_command
*cmd
)
110 struct virtio_gpu_ctx_destroy cd
;
112 VIRTIO_GPU_FILL_CMD(cd
);
113 trace_virtio_gpu_cmd_ctx_destroy(cd
.hdr
.ctx_id
);
115 virgl_renderer_context_destroy(cd
.hdr
.ctx_id
);
118 static void virtio_gpu_rect_update(VirtIOGPU
*g
, int idx
, int x
, int y
,
119 int width
, int height
)
121 if (!g
->scanout
[idx
].con
) {
125 dpy_gl_update(g
->scanout
[idx
].con
, x
, y
, width
, height
);
128 static void virgl_cmd_resource_flush(VirtIOGPU
*g
,
129 struct virtio_gpu_ctrl_command
*cmd
)
131 struct virtio_gpu_resource_flush rf
;
134 VIRTIO_GPU_FILL_CMD(rf
);
135 trace_virtio_gpu_cmd_res_flush(rf
.resource_id
,
136 rf
.r
.width
, rf
.r
.height
, rf
.r
.x
, rf
.r
.y
);
138 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
139 if (g
->scanout
[i
].resource_id
!= rf
.resource_id
) {
142 virtio_gpu_rect_update(g
, i
, rf
.r
.x
, rf
.r
.y
, rf
.r
.width
, rf
.r
.height
);
146 static void virgl_cmd_set_scanout(VirtIOGPU
*g
,
147 struct virtio_gpu_ctrl_command
*cmd
)
149 struct virtio_gpu_set_scanout ss
;
150 struct virgl_renderer_resource_info info
;
153 VIRTIO_GPU_FILL_CMD(ss
);
154 trace_virtio_gpu_cmd_set_scanout(ss
.scanout_id
, ss
.resource_id
,
155 ss
.r
.width
, ss
.r
.height
, ss
.r
.x
, ss
.r
.y
);
157 if (ss
.scanout_id
>= g
->conf
.max_outputs
) {
158 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout id specified %d",
159 __func__
, ss
.scanout_id
);
160 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
165 memset(&info
, 0, sizeof(info
));
167 if (ss
.resource_id
&& ss
.r
.width
&& ss
.r
.height
) {
168 ret
= virgl_renderer_resource_get_info(ss
.resource_id
, &info
);
170 qemu_log_mask(LOG_GUEST_ERROR
,
171 "%s: illegal resource specified %d\n",
172 __func__
, ss
.resource_id
);
173 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
176 qemu_console_resize(g
->scanout
[ss
.scanout_id
].con
,
177 ss
.r
.width
, ss
.r
.height
);
178 virgl_renderer_force_ctx_0();
179 dpy_gl_scanout_texture(g
->scanout
[ss
.scanout_id
].con
, info
.tex_id
,
180 info
.flags
& 1 /* FIXME: Y_0_TOP */,
181 info
.width
, info
.height
,
182 ss
.r
.x
, ss
.r
.y
, ss
.r
.width
, ss
.r
.height
);
184 if (ss
.scanout_id
!= 0) {
185 dpy_gfx_replace_surface(g
->scanout
[ss
.scanout_id
].con
, NULL
);
187 dpy_gl_scanout_disable(g
->scanout
[ss
.scanout_id
].con
);
189 g
->scanout
[ss
.scanout_id
].resource_id
= ss
.resource_id
;
192 static void virgl_cmd_submit_3d(VirtIOGPU
*g
,
193 struct virtio_gpu_ctrl_command
*cmd
)
195 struct virtio_gpu_cmd_submit cs
;
199 VIRTIO_GPU_FILL_CMD(cs
);
200 trace_virtio_gpu_cmd_ctx_submit(cs
.hdr
.ctx_id
, cs
.size
);
202 buf
= g_malloc(cs
.size
);
203 s
= iov_to_buf(cmd
->elem
.out_sg
, cmd
->elem
.out_num
,
204 sizeof(cs
), buf
, cs
.size
);
206 qemu_log_mask(LOG_GUEST_ERROR
, "%s: size mismatch (%zd/%d)",
207 __func__
, s
, cs
.size
);
208 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
212 if (virtio_gpu_stats_enabled(g
->conf
)) {
214 g
->stats
.bytes_3d
+= cs
.size
;
217 virgl_renderer_submit_cmd(buf
, cs
.hdr
.ctx_id
, cs
.size
/ 4);
223 static void virgl_cmd_transfer_to_host_2d(VirtIOGPU
*g
,
224 struct virtio_gpu_ctrl_command
*cmd
)
226 struct virtio_gpu_transfer_to_host_2d t2d
;
227 struct virtio_gpu_box box
;
229 VIRTIO_GPU_FILL_CMD(t2d
);
230 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d
.resource_id
);
236 box
.h
= t2d
.r
.height
;
239 virgl_renderer_transfer_write_iov(t2d
.resource_id
,
244 (struct virgl_box
*)&box
,
245 t2d
.offset
, NULL
, 0);
248 static void virgl_cmd_transfer_to_host_3d(VirtIOGPU
*g
,
249 struct virtio_gpu_ctrl_command
*cmd
)
251 struct virtio_gpu_transfer_host_3d t3d
;
253 VIRTIO_GPU_FILL_CMD(t3d
);
254 trace_virtio_gpu_cmd_res_xfer_toh_3d(t3d
.resource_id
);
256 virgl_renderer_transfer_write_iov(t3d
.resource_id
,
261 (struct virgl_box
*)&t3d
.box
,
262 t3d
.offset
, NULL
, 0);
266 virgl_cmd_transfer_from_host_3d(VirtIOGPU
*g
,
267 struct virtio_gpu_ctrl_command
*cmd
)
269 struct virtio_gpu_transfer_host_3d tf3d
;
271 VIRTIO_GPU_FILL_CMD(tf3d
);
272 trace_virtio_gpu_cmd_res_xfer_fromh_3d(tf3d
.resource_id
);
274 virgl_renderer_transfer_read_iov(tf3d
.resource_id
,
279 (struct virgl_box
*)&tf3d
.box
,
280 tf3d
.offset
, NULL
, 0);
284 static void virgl_resource_attach_backing(VirtIOGPU
*g
,
285 struct virtio_gpu_ctrl_command
*cmd
)
287 struct virtio_gpu_resource_attach_backing att_rb
;
288 struct iovec
*res_iovs
;
291 VIRTIO_GPU_FILL_CMD(att_rb
);
292 trace_virtio_gpu_cmd_res_back_attach(att_rb
.resource_id
);
294 ret
= virtio_gpu_create_mapping_iov(g
, &att_rb
, cmd
, NULL
, &res_iovs
);
296 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
300 ret
= virgl_renderer_resource_attach_iov(att_rb
.resource_id
,
301 res_iovs
, att_rb
.nr_entries
);
304 virtio_gpu_cleanup_mapping_iov(g
, res_iovs
, att_rb
.nr_entries
);
307 static void virgl_resource_detach_backing(VirtIOGPU
*g
,
308 struct virtio_gpu_ctrl_command
*cmd
)
310 struct virtio_gpu_resource_detach_backing detach_rb
;
311 struct iovec
*res_iovs
= NULL
;
314 VIRTIO_GPU_FILL_CMD(detach_rb
);
315 trace_virtio_gpu_cmd_res_back_detach(detach_rb
.resource_id
);
317 virgl_renderer_resource_detach_iov(detach_rb
.resource_id
,
320 if (res_iovs
== NULL
|| num_iovs
== 0) {
323 virtio_gpu_cleanup_mapping_iov(g
, res_iovs
, num_iovs
);
327 static void virgl_cmd_ctx_attach_resource(VirtIOGPU
*g
,
328 struct virtio_gpu_ctrl_command
*cmd
)
330 struct virtio_gpu_ctx_resource att_res
;
332 VIRTIO_GPU_FILL_CMD(att_res
);
333 trace_virtio_gpu_cmd_ctx_res_attach(att_res
.hdr
.ctx_id
,
334 att_res
.resource_id
);
336 virgl_renderer_ctx_attach_resource(att_res
.hdr
.ctx_id
, att_res
.resource_id
);
339 static void virgl_cmd_ctx_detach_resource(VirtIOGPU
*g
,
340 struct virtio_gpu_ctrl_command
*cmd
)
342 struct virtio_gpu_ctx_resource det_res
;
344 VIRTIO_GPU_FILL_CMD(det_res
);
345 trace_virtio_gpu_cmd_ctx_res_detach(det_res
.hdr
.ctx_id
,
346 det_res
.resource_id
);
348 virgl_renderer_ctx_detach_resource(det_res
.hdr
.ctx_id
, det_res
.resource_id
);
351 static void virgl_cmd_get_capset_info(VirtIOGPU
*g
,
352 struct virtio_gpu_ctrl_command
*cmd
)
354 struct virtio_gpu_get_capset_info info
;
355 struct virtio_gpu_resp_capset_info resp
;
357 VIRTIO_GPU_FILL_CMD(info
);
359 memset(&resp
, 0, sizeof(resp
));
360 if (info
.capset_index
== 0) {
361 resp
.capset_id
= VIRTIO_GPU_CAPSET_VIRGL
;
362 virgl_renderer_get_cap_set(resp
.capset_id
,
363 &resp
.capset_max_version
,
364 &resp
.capset_max_size
);
365 } else if (info
.capset_index
== 1) {
366 resp
.capset_id
= VIRTIO_GPU_CAPSET_VIRGL2
;
367 virgl_renderer_get_cap_set(resp
.capset_id
,
368 &resp
.capset_max_version
,
369 &resp
.capset_max_size
);
371 resp
.capset_max_version
= 0;
372 resp
.capset_max_size
= 0;
374 resp
.hdr
.type
= VIRTIO_GPU_RESP_OK_CAPSET_INFO
;
375 virtio_gpu_ctrl_response(g
, cmd
, &resp
.hdr
, sizeof(resp
));
378 static void virgl_cmd_get_capset(VirtIOGPU
*g
,
379 struct virtio_gpu_ctrl_command
*cmd
)
381 struct virtio_gpu_get_capset gc
;
382 struct virtio_gpu_resp_capset
*resp
;
383 uint32_t max_ver
, max_size
;
384 VIRTIO_GPU_FILL_CMD(gc
);
386 virgl_renderer_get_cap_set(gc
.capset_id
, &max_ver
,
389 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
393 resp
= g_malloc0(sizeof(*resp
) + max_size
);
394 resp
->hdr
.type
= VIRTIO_GPU_RESP_OK_CAPSET
;
395 virgl_renderer_fill_caps(gc
.capset_id
,
397 (void *)resp
->capset_data
);
398 virtio_gpu_ctrl_response(g
, cmd
, &resp
->hdr
, sizeof(*resp
) + max_size
);
402 void virtio_gpu_virgl_process_cmd(VirtIOGPU
*g
,
403 struct virtio_gpu_ctrl_command
*cmd
)
405 VIRTIO_GPU_FILL_CMD(cmd
->cmd_hdr
);
407 cmd
->waiting
= g
->renderer_blocked
;
412 virgl_renderer_force_ctx_0();
413 switch (cmd
->cmd_hdr
.type
) {
414 case VIRTIO_GPU_CMD_CTX_CREATE
:
415 virgl_cmd_context_create(g
, cmd
);
417 case VIRTIO_GPU_CMD_CTX_DESTROY
:
418 virgl_cmd_context_destroy(g
, cmd
);
420 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
:
421 virgl_cmd_create_resource_2d(g
, cmd
);
423 case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D
:
424 virgl_cmd_create_resource_3d(g
, cmd
);
426 case VIRTIO_GPU_CMD_SUBMIT_3D
:
427 virgl_cmd_submit_3d(g
, cmd
);
429 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
:
430 virgl_cmd_transfer_to_host_2d(g
, cmd
);
432 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D
:
433 virgl_cmd_transfer_to_host_3d(g
, cmd
);
435 case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D
:
436 virgl_cmd_transfer_from_host_3d(g
, cmd
);
438 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
:
439 virgl_resource_attach_backing(g
, cmd
);
441 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
:
442 virgl_resource_detach_backing(g
, cmd
);
444 case VIRTIO_GPU_CMD_SET_SCANOUT
:
445 virgl_cmd_set_scanout(g
, cmd
);
447 case VIRTIO_GPU_CMD_RESOURCE_FLUSH
:
448 virgl_cmd_resource_flush(g
, cmd
);
450 case VIRTIO_GPU_CMD_RESOURCE_UNREF
:
451 virgl_cmd_resource_unref(g
, cmd
);
453 case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE
:
454 /* TODO add security */
455 virgl_cmd_ctx_attach_resource(g
, cmd
);
457 case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE
:
458 /* TODO add security */
459 virgl_cmd_ctx_detach_resource(g
, cmd
);
461 case VIRTIO_GPU_CMD_GET_CAPSET_INFO
:
462 virgl_cmd_get_capset_info(g
, cmd
);
464 case VIRTIO_GPU_CMD_GET_CAPSET
:
465 virgl_cmd_get_capset(g
, cmd
);
468 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO
:
469 virtio_gpu_get_display_info(g
, cmd
);
472 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
480 fprintf(stderr
, "%s: ctrl 0x%x, error 0x%x\n", __func__
,
481 cmd
->cmd_hdr
.type
, cmd
->error
);
482 virtio_gpu_ctrl_response_nodata(g
, cmd
, cmd
->error
);
485 if (!(cmd
->cmd_hdr
.flags
& VIRTIO_GPU_FLAG_FENCE
)) {
486 virtio_gpu_ctrl_response_nodata(g
, cmd
, VIRTIO_GPU_RESP_OK_NODATA
);
490 trace_virtio_gpu_fence_ctrl(cmd
->cmd_hdr
.fence_id
, cmd
->cmd_hdr
.type
);
491 virgl_renderer_create_fence(cmd
->cmd_hdr
.fence_id
, cmd
->cmd_hdr
.type
);
494 static void virgl_write_fence(void *opaque
, uint32_t fence
)
496 VirtIOGPU
*g
= opaque
;
497 struct virtio_gpu_ctrl_command
*cmd
, *tmp
;
499 QTAILQ_FOREACH_SAFE(cmd
, &g
->fenceq
, next
, tmp
) {
501 * the guest can end up emitting fences out of order
502 * so we should check all fenced cmds not just the first one.
504 if (cmd
->cmd_hdr
.fence_id
> fence
) {
507 trace_virtio_gpu_fence_resp(cmd
->cmd_hdr
.fence_id
);
508 virtio_gpu_ctrl_response_nodata(g
, cmd
, VIRTIO_GPU_RESP_OK_NODATA
);
509 QTAILQ_REMOVE(&g
->fenceq
, cmd
, next
);
512 if (virtio_gpu_stats_enabled(g
->conf
)) {
513 fprintf(stderr
, "inflight: %3d (-)\r", g
->inflight
);
518 static virgl_renderer_gl_context
519 virgl_create_context(void *opaque
, int scanout_idx
,
520 struct virgl_renderer_gl_ctx_param
*params
)
522 VirtIOGPU
*g
= opaque
;
524 QEMUGLParams qparams
;
526 qparams
.major_ver
= params
->major_ver
;
527 qparams
.minor_ver
= params
->minor_ver
;
529 ctx
= dpy_gl_ctx_create(g
->scanout
[scanout_idx
].con
, &qparams
);
530 return (virgl_renderer_gl_context
)ctx
;
533 static void virgl_destroy_context(void *opaque
, virgl_renderer_gl_context ctx
)
535 VirtIOGPU
*g
= opaque
;
536 QEMUGLContext qctx
= (QEMUGLContext
)ctx
;
538 dpy_gl_ctx_destroy(g
->scanout
[0].con
, qctx
);
541 static int virgl_make_context_current(void *opaque
, int scanout_idx
,
542 virgl_renderer_gl_context ctx
)
544 VirtIOGPU
*g
= opaque
;
545 QEMUGLContext qctx
= (QEMUGLContext
)ctx
;
547 return dpy_gl_ctx_make_current(g
->scanout
[scanout_idx
].con
, qctx
);
550 static struct virgl_renderer_callbacks virtio_gpu_3d_cbs
= {
552 .write_fence
= virgl_write_fence
,
553 .create_gl_context
= virgl_create_context
,
554 .destroy_gl_context
= virgl_destroy_context
,
555 .make_current
= virgl_make_context_current
,
558 static void virtio_gpu_print_stats(void *opaque
)
560 VirtIOGPU
*g
= opaque
;
562 if (g
->stats
.requests
) {
563 fprintf(stderr
, "stats: vq req %4d, %3d -- 3D %4d (%5d)\n",
565 g
->stats
.max_inflight
,
568 g
->stats
.requests
= 0;
569 g
->stats
.max_inflight
= 0;
571 g
->stats
.bytes_3d
= 0;
573 fprintf(stderr
, "stats: idle\r");
575 timer_mod(g
->print_stats
, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + 1000);
578 static void virtio_gpu_fence_poll(void *opaque
)
580 VirtIOGPU
*g
= opaque
;
582 virgl_renderer_poll();
583 virtio_gpu_process_cmdq(g
);
584 if (!QTAILQ_EMPTY(&g
->cmdq
) || !QTAILQ_EMPTY(&g
->fenceq
)) {
585 timer_mod(g
->fence_poll
, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + 10);
589 void virtio_gpu_virgl_fence_poll(VirtIOGPU
*g
)
591 virtio_gpu_fence_poll(g
);
594 void virtio_gpu_virgl_reset(VirtIOGPU
*g
)
598 /* virgl_renderer_reset() ??? */
599 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
601 dpy_gfx_replace_surface(g
->scanout
[i
].con
, NULL
);
603 dpy_gl_scanout_disable(g
->scanout
[i
].con
);
607 void virtio_gpu_gl_block(void *opaque
, bool block
)
609 VirtIOGPU
*g
= opaque
;
612 g
->renderer_blocked
++;
614 g
->renderer_blocked
--;
616 assert(g
->renderer_blocked
>= 0);
618 if (g
->renderer_blocked
== 0) {
619 virtio_gpu_process_cmdq(g
);
623 int virtio_gpu_virgl_init(VirtIOGPU
*g
)
627 ret
= virgl_renderer_init(g
, 0, &virtio_gpu_3d_cbs
);
632 g
->fence_poll
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
633 virtio_gpu_fence_poll
, g
);
635 if (virtio_gpu_stats_enabled(g
->conf
)) {
636 g
->print_stats
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
637 virtio_gpu_print_stats
, g
);
638 timer_mod(g
->print_stats
, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + 1000);
643 int virtio_gpu_virgl_get_num_capsets(VirtIOGPU
*g
)
645 uint32_t capset2_max_ver
, capset2_max_size
;
646 virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2
,
650 return capset2_max_ver
? 2 : 1;
653 #endif /* CONFIG_VIRGL */