4 * Copyright Red Hat, Inc. 2013-2014
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu-common.h"
17 #include "hw/virtio/virtio.h"
18 #include "hw/virtio/virtio-gpu.h"
22 #include "virglrenderer.h"
24 static struct virgl_renderer_callbacks virtio_gpu_3d_cbs
;
26 static void virgl_cmd_create_resource_2d(VirtIOGPU
*g
,
27 struct virtio_gpu_ctrl_command
*cmd
)
29 struct virtio_gpu_resource_create_2d c2d
;
30 struct virgl_renderer_resource_create_args args
;
32 VIRTIO_GPU_FILL_CMD(c2d
);
33 trace_virtio_gpu_cmd_res_create_2d(c2d
.resource_id
, c2d
.format
,
34 c2d
.width
, c2d
.height
);
36 args
.handle
= c2d
.resource_id
;
38 args
.format
= c2d
.format
;
40 args
.width
= c2d
.width
;
41 args
.height
= c2d
.height
;
46 args
.flags
= VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP
;
47 virgl_renderer_resource_create(&args
, NULL
, 0);
50 static void virgl_cmd_create_resource_3d(VirtIOGPU
*g
,
51 struct virtio_gpu_ctrl_command
*cmd
)
53 struct virtio_gpu_resource_create_3d c3d
;
54 struct virgl_renderer_resource_create_args args
;
56 VIRTIO_GPU_FILL_CMD(c3d
);
57 trace_virtio_gpu_cmd_res_create_3d(c3d
.resource_id
, c3d
.format
,
58 c3d
.width
, c3d
.height
, c3d
.depth
);
60 args
.handle
= c3d
.resource_id
;
61 args
.target
= c3d
.target
;
62 args
.format
= c3d
.format
;
64 args
.width
= c3d
.width
;
65 args
.height
= c3d
.height
;
66 args
.depth
= c3d
.depth
;
67 args
.array_size
= c3d
.array_size
;
68 args
.last_level
= c3d
.last_level
;
69 args
.nr_samples
= c3d
.nr_samples
;
70 args
.flags
= c3d
.flags
;
71 virgl_renderer_resource_create(&args
, NULL
, 0);
74 static void virgl_cmd_resource_unref(VirtIOGPU
*g
,
75 struct virtio_gpu_ctrl_command
*cmd
)
77 struct virtio_gpu_resource_unref unref
;
79 VIRTIO_GPU_FILL_CMD(unref
);
80 trace_virtio_gpu_cmd_res_unref(unref
.resource_id
);
82 virgl_renderer_resource_unref(unref
.resource_id
);
85 static void virgl_cmd_context_create(VirtIOGPU
*g
,
86 struct virtio_gpu_ctrl_command
*cmd
)
88 struct virtio_gpu_ctx_create cc
;
90 VIRTIO_GPU_FILL_CMD(cc
);
91 trace_virtio_gpu_cmd_ctx_create(cc
.hdr
.ctx_id
,
94 virgl_renderer_context_create(cc
.hdr
.ctx_id
, cc
.nlen
,
98 static void virgl_cmd_context_destroy(VirtIOGPU
*g
,
99 struct virtio_gpu_ctrl_command
*cmd
)
101 struct virtio_gpu_ctx_destroy cd
;
103 VIRTIO_GPU_FILL_CMD(cd
);
104 trace_virtio_gpu_cmd_ctx_destroy(cd
.hdr
.ctx_id
);
106 virgl_renderer_context_destroy(cd
.hdr
.ctx_id
);
109 static void virtio_gpu_rect_update(VirtIOGPU
*g
, int idx
, int x
, int y
,
110 int width
, int height
)
112 if (!g
->scanout
[idx
].con
) {
116 dpy_gl_update(g
->scanout
[idx
].con
, x
, y
, width
, height
);
119 static void virgl_cmd_resource_flush(VirtIOGPU
*g
,
120 struct virtio_gpu_ctrl_command
*cmd
)
122 struct virtio_gpu_resource_flush rf
;
125 VIRTIO_GPU_FILL_CMD(rf
);
126 trace_virtio_gpu_cmd_res_flush(rf
.resource_id
,
127 rf
.r
.width
, rf
.r
.height
, rf
.r
.x
, rf
.r
.y
);
129 for (i
= 0; i
< VIRTIO_GPU_MAX_SCANOUT
; i
++) {
130 if (g
->scanout
[i
].resource_id
!= rf
.resource_id
) {
133 virtio_gpu_rect_update(g
, i
, rf
.r
.x
, rf
.r
.y
, rf
.r
.width
, rf
.r
.height
);
137 static void virgl_cmd_set_scanout(VirtIOGPU
*g
,
138 struct virtio_gpu_ctrl_command
*cmd
)
140 struct virtio_gpu_set_scanout ss
;
141 struct virgl_renderer_resource_info info
;
144 VIRTIO_GPU_FILL_CMD(ss
);
145 trace_virtio_gpu_cmd_set_scanout(ss
.scanout_id
, ss
.resource_id
,
146 ss
.r
.width
, ss
.r
.height
, ss
.r
.x
, ss
.r
.y
);
148 if (ss
.scanout_id
>= VIRTIO_GPU_MAX_SCANOUT
) {
149 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout id specified %d",
150 __func__
, ss
.scanout_id
);
151 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
156 memset(&info
, 0, sizeof(info
));
158 if (ss
.resource_id
&& ss
.r
.width
&& ss
.r
.height
) {
159 ret
= virgl_renderer_resource_get_info(ss
.resource_id
, &info
);
161 qemu_log_mask(LOG_GUEST_ERROR
,
162 "%s: illegal resource specified %d\n",
163 __func__
, ss
.resource_id
);
164 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
167 qemu_console_resize(g
->scanout
[ss
.scanout_id
].con
,
168 ss
.r
.width
, ss
.r
.height
);
169 virgl_renderer_force_ctx_0();
170 dpy_gl_scanout(g
->scanout
[ss
.scanout_id
].con
, info
.tex_id
,
171 info
.flags
& 1 /* FIXME: Y_0_TOP */,
172 ss
.r
.x
, ss
.r
.y
, ss
.r
.width
, ss
.r
.height
);
174 if (ss
.scanout_id
!= 0) {
175 dpy_gfx_replace_surface(g
->scanout
[ss
.scanout_id
].con
, NULL
);
177 dpy_gl_scanout(g
->scanout
[ss
.scanout_id
].con
, 0, false,
180 g
->scanout
[ss
.scanout_id
].resource_id
= ss
.resource_id
;
183 static void virgl_cmd_submit_3d(VirtIOGPU
*g
,
184 struct virtio_gpu_ctrl_command
*cmd
)
186 struct virtio_gpu_cmd_submit cs
;
190 VIRTIO_GPU_FILL_CMD(cs
);
191 trace_virtio_gpu_cmd_ctx_submit(cs
.hdr
.ctx_id
, cs
.size
);
193 buf
= g_malloc(cs
.size
);
194 s
= iov_to_buf(cmd
->elem
.out_sg
, cmd
->elem
.out_num
,
195 sizeof(cs
), buf
, cs
.size
);
197 qemu_log_mask(LOG_GUEST_ERROR
, "%s: size mismatch (%zd/%d)",
198 __func__
, s
, cs
.size
);
199 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
203 if (virtio_gpu_stats_enabled(g
->conf
)) {
205 g
->stats
.bytes_3d
+= cs
.size
;
208 virgl_renderer_submit_cmd(buf
, cs
.hdr
.ctx_id
, cs
.size
/ 4);
213 static void virgl_cmd_transfer_to_host_2d(VirtIOGPU
*g
,
214 struct virtio_gpu_ctrl_command
*cmd
)
216 struct virtio_gpu_transfer_to_host_2d t2d
;
217 struct virtio_gpu_box box
;
219 VIRTIO_GPU_FILL_CMD(t2d
);
220 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d
.resource_id
);
226 box
.h
= t2d
.r
.height
;
229 virgl_renderer_transfer_write_iov(t2d
.resource_id
,
234 (struct virgl_box
*)&box
,
235 t2d
.offset
, NULL
, 0);
238 static void virgl_cmd_transfer_to_host_3d(VirtIOGPU
*g
,
239 struct virtio_gpu_ctrl_command
*cmd
)
241 struct virtio_gpu_transfer_host_3d t3d
;
243 VIRTIO_GPU_FILL_CMD(t3d
);
244 trace_virtio_gpu_cmd_res_xfer_toh_3d(t3d
.resource_id
);
246 virgl_renderer_transfer_write_iov(t3d
.resource_id
,
251 (struct virgl_box
*)&t3d
.box
,
252 t3d
.offset
, NULL
, 0);
256 virgl_cmd_transfer_from_host_3d(VirtIOGPU
*g
,
257 struct virtio_gpu_ctrl_command
*cmd
)
259 struct virtio_gpu_transfer_host_3d tf3d
;
261 VIRTIO_GPU_FILL_CMD(tf3d
);
262 trace_virtio_gpu_cmd_res_xfer_fromh_3d(tf3d
.resource_id
);
264 virgl_renderer_transfer_read_iov(tf3d
.resource_id
,
269 (struct virgl_box
*)&tf3d
.box
,
270 tf3d
.offset
, NULL
, 0);
274 static void virgl_resource_attach_backing(VirtIOGPU
*g
,
275 struct virtio_gpu_ctrl_command
*cmd
)
277 struct virtio_gpu_resource_attach_backing att_rb
;
278 struct iovec
*res_iovs
;
281 VIRTIO_GPU_FILL_CMD(att_rb
);
282 trace_virtio_gpu_cmd_res_back_attach(att_rb
.resource_id
);
284 ret
= virtio_gpu_create_mapping_iov(&att_rb
, cmd
, &res_iovs
);
286 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
290 virgl_renderer_resource_attach_iov(att_rb
.resource_id
,
291 res_iovs
, att_rb
.nr_entries
);
294 static void virgl_resource_detach_backing(VirtIOGPU
*g
,
295 struct virtio_gpu_ctrl_command
*cmd
)
297 struct virtio_gpu_resource_detach_backing detach_rb
;
298 struct iovec
*res_iovs
= NULL
;
301 VIRTIO_GPU_FILL_CMD(detach_rb
);
302 trace_virtio_gpu_cmd_res_back_detach(detach_rb
.resource_id
);
304 virgl_renderer_resource_detach_iov(detach_rb
.resource_id
,
307 if (res_iovs
== NULL
|| num_iovs
== 0) {
310 virtio_gpu_cleanup_mapping_iov(res_iovs
, num_iovs
);
314 static void virgl_cmd_ctx_attach_resource(VirtIOGPU
*g
,
315 struct virtio_gpu_ctrl_command
*cmd
)
317 struct virtio_gpu_ctx_resource att_res
;
319 VIRTIO_GPU_FILL_CMD(att_res
);
320 trace_virtio_gpu_cmd_ctx_res_attach(att_res
.hdr
.ctx_id
,
321 att_res
.resource_id
);
323 virgl_renderer_ctx_attach_resource(att_res
.hdr
.ctx_id
, att_res
.resource_id
);
326 static void virgl_cmd_ctx_detach_resource(VirtIOGPU
*g
,
327 struct virtio_gpu_ctrl_command
*cmd
)
329 struct virtio_gpu_ctx_resource det_res
;
331 VIRTIO_GPU_FILL_CMD(det_res
);
332 trace_virtio_gpu_cmd_ctx_res_detach(det_res
.hdr
.ctx_id
,
333 det_res
.resource_id
);
335 virgl_renderer_ctx_detach_resource(det_res
.hdr
.ctx_id
, det_res
.resource_id
);
338 static void virgl_cmd_get_capset_info(VirtIOGPU
*g
,
339 struct virtio_gpu_ctrl_command
*cmd
)
341 struct virtio_gpu_get_capset_info info
;
342 struct virtio_gpu_resp_capset_info resp
;
344 VIRTIO_GPU_FILL_CMD(info
);
346 if (info
.capset_index
== 0) {
347 resp
.capset_id
= VIRTIO_GPU_CAPSET_VIRGL
;
348 virgl_renderer_get_cap_set(resp
.capset_id
,
349 &resp
.capset_max_version
,
350 &resp
.capset_max_size
);
352 resp
.capset_max_version
= 0;
353 resp
.capset_max_size
= 0;
355 resp
.hdr
.type
= VIRTIO_GPU_RESP_OK_CAPSET_INFO
;
356 virtio_gpu_ctrl_response(g
, cmd
, &resp
.hdr
, sizeof(resp
));
359 static void virgl_cmd_get_capset(VirtIOGPU
*g
,
360 struct virtio_gpu_ctrl_command
*cmd
)
362 struct virtio_gpu_get_capset gc
;
363 struct virtio_gpu_resp_capset
*resp
;
364 uint32_t max_ver
, max_size
;
365 VIRTIO_GPU_FILL_CMD(gc
);
367 virgl_renderer_get_cap_set(gc
.capset_id
, &max_ver
,
369 resp
= g_malloc(sizeof(*resp
) + max_size
);
371 resp
->hdr
.type
= VIRTIO_GPU_RESP_OK_CAPSET
;
372 virgl_renderer_fill_caps(gc
.capset_id
,
374 (void *)resp
->capset_data
);
375 virtio_gpu_ctrl_response(g
, cmd
, &resp
->hdr
, sizeof(*resp
) + max_size
);
379 void virtio_gpu_virgl_process_cmd(VirtIOGPU
*g
,
380 struct virtio_gpu_ctrl_command
*cmd
)
382 VIRTIO_GPU_FILL_CMD(cmd
->cmd_hdr
);
384 virgl_renderer_force_ctx_0();
385 switch (cmd
->cmd_hdr
.type
) {
386 case VIRTIO_GPU_CMD_CTX_CREATE
:
387 virgl_cmd_context_create(g
, cmd
);
389 case VIRTIO_GPU_CMD_CTX_DESTROY
:
390 virgl_cmd_context_destroy(g
, cmd
);
392 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
:
393 virgl_cmd_create_resource_2d(g
, cmd
);
395 case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D
:
396 virgl_cmd_create_resource_3d(g
, cmd
);
398 case VIRTIO_GPU_CMD_SUBMIT_3D
:
399 virgl_cmd_submit_3d(g
, cmd
);
401 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
:
402 virgl_cmd_transfer_to_host_2d(g
, cmd
);
404 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D
:
405 virgl_cmd_transfer_to_host_3d(g
, cmd
);
407 case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D
:
408 virgl_cmd_transfer_from_host_3d(g
, cmd
);
410 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
:
411 virgl_resource_attach_backing(g
, cmd
);
413 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
:
414 virgl_resource_detach_backing(g
, cmd
);
416 case VIRTIO_GPU_CMD_SET_SCANOUT
:
417 virgl_cmd_set_scanout(g
, cmd
);
419 case VIRTIO_GPU_CMD_RESOURCE_FLUSH
:
420 virgl_cmd_resource_flush(g
, cmd
);
422 case VIRTIO_GPU_CMD_RESOURCE_UNREF
:
423 virgl_cmd_resource_unref(g
, cmd
);
425 case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE
:
426 /* TODO add security */
427 virgl_cmd_ctx_attach_resource(g
, cmd
);
429 case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE
:
430 /* TODO add security */
431 virgl_cmd_ctx_detach_resource(g
, cmd
);
433 case VIRTIO_GPU_CMD_GET_CAPSET_INFO
:
434 virgl_cmd_get_capset_info(g
, cmd
);
436 case VIRTIO_GPU_CMD_GET_CAPSET
:
437 virgl_cmd_get_capset(g
, cmd
);
440 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO
:
441 virtio_gpu_get_display_info(g
, cmd
);
444 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
452 fprintf(stderr
, "%s: ctrl 0x%x, error 0x%x\n", __func__
,
453 cmd
->cmd_hdr
.type
, cmd
->error
);
454 virtio_gpu_ctrl_response_nodata(g
, cmd
, cmd
->error
);
457 if (!(cmd
->cmd_hdr
.flags
& VIRTIO_GPU_FLAG_FENCE
)) {
458 virtio_gpu_ctrl_response_nodata(g
, cmd
, VIRTIO_GPU_RESP_OK_NODATA
);
462 trace_virtio_gpu_fence_ctrl(cmd
->cmd_hdr
.fence_id
, cmd
->cmd_hdr
.type
);
463 virgl_renderer_create_fence(cmd
->cmd_hdr
.fence_id
, cmd
->cmd_hdr
.type
);
466 static void virgl_write_fence(void *opaque
, uint32_t fence
)
468 VirtIOGPU
*g
= opaque
;
469 struct virtio_gpu_ctrl_command
*cmd
, *tmp
;
471 QTAILQ_FOREACH_SAFE(cmd
, &g
->fenceq
, next
, tmp
) {
473 * the guest can end up emitting fences out of order
474 * so we should check all fenced cmds not just the first one.
476 if (cmd
->cmd_hdr
.fence_id
> fence
) {
479 trace_virtio_gpu_fence_resp(cmd
->cmd_hdr
.fence_id
);
480 virtio_gpu_ctrl_response_nodata(g
, cmd
, VIRTIO_GPU_RESP_OK_NODATA
);
481 QTAILQ_REMOVE(&g
->fenceq
, cmd
, next
);
484 if (virtio_gpu_stats_enabled(g
->conf
)) {
485 fprintf(stderr
, "inflight: %3d (-)\r", g
->inflight
);
490 static virgl_renderer_gl_context
491 virgl_create_context(void *opaque
, int scanout_idx
,
492 struct virgl_renderer_gl_ctx_param
*params
)
494 VirtIOGPU
*g
= opaque
;
496 QEMUGLParams qparams
;
498 qparams
.major_ver
= params
->major_ver
;
499 qparams
.minor_ver
= params
->minor_ver
;
501 ctx
= dpy_gl_ctx_create(g
->scanout
[scanout_idx
].con
, &qparams
);
502 return (virgl_renderer_gl_context
)ctx
;
505 static void virgl_destroy_context(void *opaque
, virgl_renderer_gl_context ctx
)
507 VirtIOGPU
*g
= opaque
;
508 QEMUGLContext qctx
= (QEMUGLContext
)ctx
;
510 dpy_gl_ctx_destroy(g
->scanout
[0].con
, qctx
);
513 static int virgl_make_context_current(void *opaque
, int scanout_idx
,
514 virgl_renderer_gl_context ctx
)
516 VirtIOGPU
*g
= opaque
;
517 QEMUGLContext qctx
= (QEMUGLContext
)ctx
;
519 return dpy_gl_ctx_make_current(g
->scanout
[scanout_idx
].con
, qctx
);
522 static struct virgl_renderer_callbacks virtio_gpu_3d_cbs
= {
524 .write_fence
= virgl_write_fence
,
525 .create_gl_context
= virgl_create_context
,
526 .destroy_gl_context
= virgl_destroy_context
,
527 .make_current
= virgl_make_context_current
,
530 static void virtio_gpu_print_stats(void *opaque
)
532 VirtIOGPU
*g
= opaque
;
534 if (g
->stats
.requests
) {
535 fprintf(stderr
, "stats: vq req %4d, %3d -- 3D %4d (%5d)\n",
537 g
->stats
.max_inflight
,
540 g
->stats
.requests
= 0;
541 g
->stats
.max_inflight
= 0;
543 g
->stats
.bytes_3d
= 0;
545 fprintf(stderr
, "stats: idle\r");
547 timer_mod(g
->print_stats
, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + 1000);
550 static void virtio_gpu_fence_poll(void *opaque
)
552 VirtIOGPU
*g
= opaque
;
554 virgl_renderer_poll();
556 timer_mod(g
->fence_poll
, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + 10);
560 void virtio_gpu_virgl_fence_poll(VirtIOGPU
*g
)
562 virtio_gpu_fence_poll(g
);
565 void virtio_gpu_virgl_reset(VirtIOGPU
*g
)
569 /* virgl_renderer_reset() ??? */
570 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
572 dpy_gfx_replace_surface(g
->scanout
[i
].con
, NULL
);
574 dpy_gl_scanout(g
->scanout
[i
].con
, 0, false, 0, 0, 0, 0);
578 int virtio_gpu_virgl_init(VirtIOGPU
*g
)
582 ret
= virgl_renderer_init(g
, 0, &virtio_gpu_3d_cbs
);
587 g
->fence_poll
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
588 virtio_gpu_fence_poll
, g
);
590 if (virtio_gpu_stats_enabled(g
->conf
)) {
591 g
->print_stats
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
592 virtio_gpu_print_stats
, g
);
593 timer_mod(g
->print_stats
, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + 1000);
598 #endif /* CONFIG_VIRGL */