4 * Copyright Red Hat, Inc. 2013-2014
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/error-report.h"
18 #include "hw/virtio/virtio.h"
19 #include "hw/virtio/virtio-gpu.h"
21 #include "ui/egl-helpers.h"
23 #include <virglrenderer.h>
25 #if VIRGL_RENDERER_CALLBACKS_VERSION >= 4
27 virgl_get_egl_display(G_GNUC_UNUSED
void *cookie
)
29 return qemu_egl_display
;
33 static void virgl_cmd_create_resource_2d(VirtIOGPU
*g
,
34 struct virtio_gpu_ctrl_command
*cmd
)
36 struct virtio_gpu_resource_create_2d c2d
;
37 struct virgl_renderer_resource_create_args args
;
39 VIRTIO_GPU_FILL_CMD(c2d
);
40 trace_virtio_gpu_cmd_res_create_2d(c2d
.resource_id
, c2d
.format
,
41 c2d
.width
, c2d
.height
);
43 args
.handle
= c2d
.resource_id
;
45 args
.format
= c2d
.format
;
47 args
.width
= c2d
.width
;
48 args
.height
= c2d
.height
;
53 args
.flags
= VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP
;
54 virgl_renderer_resource_create(&args
, NULL
, 0);
57 static void virgl_cmd_create_resource_3d(VirtIOGPU
*g
,
58 struct virtio_gpu_ctrl_command
*cmd
)
60 struct virtio_gpu_resource_create_3d c3d
;
61 struct virgl_renderer_resource_create_args args
;
63 VIRTIO_GPU_FILL_CMD(c3d
);
64 trace_virtio_gpu_cmd_res_create_3d(c3d
.resource_id
, c3d
.format
,
65 c3d
.width
, c3d
.height
, c3d
.depth
);
67 args
.handle
= c3d
.resource_id
;
68 args
.target
= c3d
.target
;
69 args
.format
= c3d
.format
;
71 args
.width
= c3d
.width
;
72 args
.height
= c3d
.height
;
73 args
.depth
= c3d
.depth
;
74 args
.array_size
= c3d
.array_size
;
75 args
.last_level
= c3d
.last_level
;
76 args
.nr_samples
= c3d
.nr_samples
;
77 args
.flags
= c3d
.flags
;
78 virgl_renderer_resource_create(&args
, NULL
, 0);
81 static void virgl_cmd_resource_unref(VirtIOGPU
*g
,
82 struct virtio_gpu_ctrl_command
*cmd
)
84 struct virtio_gpu_resource_unref unref
;
85 struct iovec
*res_iovs
= NULL
;
88 VIRTIO_GPU_FILL_CMD(unref
);
89 trace_virtio_gpu_cmd_res_unref(unref
.resource_id
);
91 virgl_renderer_resource_detach_iov(unref
.resource_id
,
94 if (res_iovs
!= NULL
&& num_iovs
!= 0) {
95 virtio_gpu_cleanup_mapping_iov(g
, res_iovs
, num_iovs
);
97 virgl_renderer_resource_unref(unref
.resource_id
);
100 static void virgl_cmd_context_create(VirtIOGPU
*g
,
101 struct virtio_gpu_ctrl_command
*cmd
)
103 struct virtio_gpu_ctx_create cc
;
105 VIRTIO_GPU_FILL_CMD(cc
);
106 trace_virtio_gpu_cmd_ctx_create(cc
.hdr
.ctx_id
,
109 virgl_renderer_context_create(cc
.hdr
.ctx_id
, cc
.nlen
,
113 static void virgl_cmd_context_destroy(VirtIOGPU
*g
,
114 struct virtio_gpu_ctrl_command
*cmd
)
116 struct virtio_gpu_ctx_destroy cd
;
118 VIRTIO_GPU_FILL_CMD(cd
);
119 trace_virtio_gpu_cmd_ctx_destroy(cd
.hdr
.ctx_id
);
121 virgl_renderer_context_destroy(cd
.hdr
.ctx_id
);
124 static void virtio_gpu_rect_update(VirtIOGPU
*g
, int idx
, int x
, int y
,
125 int width
, int height
)
127 if (!g
->parent_obj
.scanout
[idx
].con
) {
131 dpy_gl_update(g
->parent_obj
.scanout
[idx
].con
, x
, y
, width
, height
);
134 static void virgl_cmd_resource_flush(VirtIOGPU
*g
,
135 struct virtio_gpu_ctrl_command
*cmd
)
137 struct virtio_gpu_resource_flush rf
;
140 VIRTIO_GPU_FILL_CMD(rf
);
141 trace_virtio_gpu_cmd_res_flush(rf
.resource_id
,
142 rf
.r
.width
, rf
.r
.height
, rf
.r
.x
, rf
.r
.y
);
144 for (i
= 0; i
< g
->parent_obj
.conf
.max_outputs
; i
++) {
145 if (g
->parent_obj
.scanout
[i
].resource_id
!= rf
.resource_id
) {
148 virtio_gpu_rect_update(g
, i
, rf
.r
.x
, rf
.r
.y
, rf
.r
.width
, rf
.r
.height
);
152 static void virgl_cmd_set_scanout(VirtIOGPU
*g
,
153 struct virtio_gpu_ctrl_command
*cmd
)
155 struct virtio_gpu_set_scanout ss
;
158 VIRTIO_GPU_FILL_CMD(ss
);
159 trace_virtio_gpu_cmd_set_scanout(ss
.scanout_id
, ss
.resource_id
,
160 ss
.r
.width
, ss
.r
.height
, ss
.r
.x
, ss
.r
.y
);
162 if (ss
.scanout_id
>= g
->parent_obj
.conf
.max_outputs
) {
163 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout id specified %d",
164 __func__
, ss
.scanout_id
);
165 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
168 g
->parent_obj
.enable
= 1;
170 if (ss
.resource_id
&& ss
.r
.width
&& ss
.r
.height
) {
171 struct virgl_renderer_resource_info info
;
172 void *d3d_tex2d
= NULL
;
174 #ifdef HAVE_VIRGL_D3D_INFO_EXT
175 struct virgl_renderer_resource_info_ext ext
;
176 memset(&ext
, 0, sizeof(ext
));
177 ret
= virgl_renderer_resource_get_info_ext(ss
.resource_id
, &ext
);
179 d3d_tex2d
= ext
.d3d_tex2d
;
181 memset(&info
, 0, sizeof(info
));
182 ret
= virgl_renderer_resource_get_info(ss
.resource_id
, &info
);
185 qemu_log_mask(LOG_GUEST_ERROR
,
186 "%s: illegal resource specified %d\n",
187 __func__
, ss
.resource_id
);
188 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
191 qemu_console_resize(g
->parent_obj
.scanout
[ss
.scanout_id
].con
,
192 ss
.r
.width
, ss
.r
.height
);
193 virgl_renderer_force_ctx_0();
194 dpy_gl_scanout_texture(
195 g
->parent_obj
.scanout
[ss
.scanout_id
].con
, info
.tex_id
,
196 info
.flags
& VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP
,
197 info
.width
, info
.height
,
198 ss
.r
.x
, ss
.r
.y
, ss
.r
.width
, ss
.r
.height
,
201 dpy_gfx_replace_surface(
202 g
->parent_obj
.scanout
[ss
.scanout_id
].con
, NULL
);
203 dpy_gl_scanout_disable(g
->parent_obj
.scanout
[ss
.scanout_id
].con
);
205 g
->parent_obj
.scanout
[ss
.scanout_id
].resource_id
= ss
.resource_id
;
208 static void virgl_cmd_submit_3d(VirtIOGPU
*g
,
209 struct virtio_gpu_ctrl_command
*cmd
)
211 struct virtio_gpu_cmd_submit cs
;
215 VIRTIO_GPU_FILL_CMD(cs
);
216 trace_virtio_gpu_cmd_ctx_submit(cs
.hdr
.ctx_id
, cs
.size
);
218 buf
= g_malloc(cs
.size
);
219 s
= iov_to_buf(cmd
->elem
.out_sg
, cmd
->elem
.out_num
,
220 sizeof(cs
), buf
, cs
.size
);
222 qemu_log_mask(LOG_GUEST_ERROR
, "%s: size mismatch (%zd/%d)",
223 __func__
, s
, cs
.size
);
224 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
228 if (virtio_gpu_stats_enabled(g
->parent_obj
.conf
)) {
230 g
->stats
.bytes_3d
+= cs
.size
;
233 virgl_renderer_submit_cmd(buf
, cs
.hdr
.ctx_id
, cs
.size
/ 4);
239 static void virgl_cmd_transfer_to_host_2d(VirtIOGPU
*g
,
240 struct virtio_gpu_ctrl_command
*cmd
)
242 struct virtio_gpu_transfer_to_host_2d t2d
;
243 struct virtio_gpu_box box
;
245 VIRTIO_GPU_FILL_CMD(t2d
);
246 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d
.resource_id
);
252 box
.h
= t2d
.r
.height
;
255 virgl_renderer_transfer_write_iov(t2d
.resource_id
,
260 (struct virgl_box
*)&box
,
261 t2d
.offset
, NULL
, 0);
264 static void virgl_cmd_transfer_to_host_3d(VirtIOGPU
*g
,
265 struct virtio_gpu_ctrl_command
*cmd
)
267 struct virtio_gpu_transfer_host_3d t3d
;
269 VIRTIO_GPU_FILL_CMD(t3d
);
270 trace_virtio_gpu_cmd_res_xfer_toh_3d(t3d
.resource_id
);
272 virgl_renderer_transfer_write_iov(t3d
.resource_id
,
277 (struct virgl_box
*)&t3d
.box
,
278 t3d
.offset
, NULL
, 0);
282 virgl_cmd_transfer_from_host_3d(VirtIOGPU
*g
,
283 struct virtio_gpu_ctrl_command
*cmd
)
285 struct virtio_gpu_transfer_host_3d tf3d
;
287 VIRTIO_GPU_FILL_CMD(tf3d
);
288 trace_virtio_gpu_cmd_res_xfer_fromh_3d(tf3d
.resource_id
);
290 virgl_renderer_transfer_read_iov(tf3d
.resource_id
,
295 (struct virgl_box
*)&tf3d
.box
,
296 tf3d
.offset
, NULL
, 0);
300 static void virgl_resource_attach_backing(VirtIOGPU
*g
,
301 struct virtio_gpu_ctrl_command
*cmd
)
303 struct virtio_gpu_resource_attach_backing att_rb
;
304 struct iovec
*res_iovs
;
308 VIRTIO_GPU_FILL_CMD(att_rb
);
309 trace_virtio_gpu_cmd_res_back_attach(att_rb
.resource_id
);
311 ret
= virtio_gpu_create_mapping_iov(g
, att_rb
.nr_entries
, sizeof(att_rb
),
312 cmd
, NULL
, &res_iovs
, &res_niov
);
314 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
318 ret
= virgl_renderer_resource_attach_iov(att_rb
.resource_id
,
322 virtio_gpu_cleanup_mapping_iov(g
, res_iovs
, res_niov
);
325 static void virgl_resource_detach_backing(VirtIOGPU
*g
,
326 struct virtio_gpu_ctrl_command
*cmd
)
328 struct virtio_gpu_resource_detach_backing detach_rb
;
329 struct iovec
*res_iovs
= NULL
;
332 VIRTIO_GPU_FILL_CMD(detach_rb
);
333 trace_virtio_gpu_cmd_res_back_detach(detach_rb
.resource_id
);
335 virgl_renderer_resource_detach_iov(detach_rb
.resource_id
,
338 if (res_iovs
== NULL
|| num_iovs
== 0) {
341 virtio_gpu_cleanup_mapping_iov(g
, res_iovs
, num_iovs
);
345 static void virgl_cmd_ctx_attach_resource(VirtIOGPU
*g
,
346 struct virtio_gpu_ctrl_command
*cmd
)
348 struct virtio_gpu_ctx_resource att_res
;
350 VIRTIO_GPU_FILL_CMD(att_res
);
351 trace_virtio_gpu_cmd_ctx_res_attach(att_res
.hdr
.ctx_id
,
352 att_res
.resource_id
);
354 virgl_renderer_ctx_attach_resource(att_res
.hdr
.ctx_id
, att_res
.resource_id
);
357 static void virgl_cmd_ctx_detach_resource(VirtIOGPU
*g
,
358 struct virtio_gpu_ctrl_command
*cmd
)
360 struct virtio_gpu_ctx_resource det_res
;
362 VIRTIO_GPU_FILL_CMD(det_res
);
363 trace_virtio_gpu_cmd_ctx_res_detach(det_res
.hdr
.ctx_id
,
364 det_res
.resource_id
);
366 virgl_renderer_ctx_detach_resource(det_res
.hdr
.ctx_id
, det_res
.resource_id
);
369 static void virgl_cmd_get_capset_info(VirtIOGPU
*g
,
370 struct virtio_gpu_ctrl_command
*cmd
)
372 struct virtio_gpu_get_capset_info info
;
373 struct virtio_gpu_resp_capset_info resp
;
375 VIRTIO_GPU_FILL_CMD(info
);
377 memset(&resp
, 0, sizeof(resp
));
378 if (info
.capset_index
== 0) {
379 resp
.capset_id
= VIRTIO_GPU_CAPSET_VIRGL
;
380 virgl_renderer_get_cap_set(resp
.capset_id
,
381 &resp
.capset_max_version
,
382 &resp
.capset_max_size
);
383 } else if (info
.capset_index
== 1) {
384 resp
.capset_id
= VIRTIO_GPU_CAPSET_VIRGL2
;
385 virgl_renderer_get_cap_set(resp
.capset_id
,
386 &resp
.capset_max_version
,
387 &resp
.capset_max_size
);
389 resp
.capset_max_version
= 0;
390 resp
.capset_max_size
= 0;
392 resp
.hdr
.type
= VIRTIO_GPU_RESP_OK_CAPSET_INFO
;
393 virtio_gpu_ctrl_response(g
, cmd
, &resp
.hdr
, sizeof(resp
));
396 static void virgl_cmd_get_capset(VirtIOGPU
*g
,
397 struct virtio_gpu_ctrl_command
*cmd
)
399 struct virtio_gpu_get_capset gc
;
400 struct virtio_gpu_resp_capset
*resp
;
401 uint32_t max_ver
, max_size
;
402 VIRTIO_GPU_FILL_CMD(gc
);
404 virgl_renderer_get_cap_set(gc
.capset_id
, &max_ver
,
407 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
411 resp
= g_malloc0(sizeof(*resp
) + max_size
);
412 resp
->hdr
.type
= VIRTIO_GPU_RESP_OK_CAPSET
;
413 virgl_renderer_fill_caps(gc
.capset_id
,
415 (void *)resp
->capset_data
);
416 virtio_gpu_ctrl_response(g
, cmd
, &resp
->hdr
, sizeof(*resp
) + max_size
);
420 void virtio_gpu_virgl_process_cmd(VirtIOGPU
*g
,
421 struct virtio_gpu_ctrl_command
*cmd
)
423 VIRTIO_GPU_FILL_CMD(cmd
->cmd_hdr
);
425 virgl_renderer_force_ctx_0();
426 switch (cmd
->cmd_hdr
.type
) {
427 case VIRTIO_GPU_CMD_CTX_CREATE
:
428 virgl_cmd_context_create(g
, cmd
);
430 case VIRTIO_GPU_CMD_CTX_DESTROY
:
431 virgl_cmd_context_destroy(g
, cmd
);
433 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
:
434 virgl_cmd_create_resource_2d(g
, cmd
);
436 case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D
:
437 virgl_cmd_create_resource_3d(g
, cmd
);
439 case VIRTIO_GPU_CMD_SUBMIT_3D
:
440 virgl_cmd_submit_3d(g
, cmd
);
442 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
:
443 virgl_cmd_transfer_to_host_2d(g
, cmd
);
445 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D
:
446 virgl_cmd_transfer_to_host_3d(g
, cmd
);
448 case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D
:
449 virgl_cmd_transfer_from_host_3d(g
, cmd
);
451 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
:
452 virgl_resource_attach_backing(g
, cmd
);
454 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
:
455 virgl_resource_detach_backing(g
, cmd
);
457 case VIRTIO_GPU_CMD_SET_SCANOUT
:
458 virgl_cmd_set_scanout(g
, cmd
);
460 case VIRTIO_GPU_CMD_RESOURCE_FLUSH
:
461 virgl_cmd_resource_flush(g
, cmd
);
463 case VIRTIO_GPU_CMD_RESOURCE_UNREF
:
464 virgl_cmd_resource_unref(g
, cmd
);
466 case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE
:
467 /* TODO add security */
468 virgl_cmd_ctx_attach_resource(g
, cmd
);
470 case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE
:
471 /* TODO add security */
472 virgl_cmd_ctx_detach_resource(g
, cmd
);
474 case VIRTIO_GPU_CMD_GET_CAPSET_INFO
:
475 virgl_cmd_get_capset_info(g
, cmd
);
477 case VIRTIO_GPU_CMD_GET_CAPSET
:
478 virgl_cmd_get_capset(g
, cmd
);
480 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO
:
481 virtio_gpu_get_display_info(g
, cmd
);
483 case VIRTIO_GPU_CMD_GET_EDID
:
484 virtio_gpu_get_edid(g
, cmd
);
487 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
495 fprintf(stderr
, "%s: ctrl 0x%x, error 0x%x\n", __func__
,
496 cmd
->cmd_hdr
.type
, cmd
->error
);
497 virtio_gpu_ctrl_response_nodata(g
, cmd
, cmd
->error
);
500 if (!(cmd
->cmd_hdr
.flags
& VIRTIO_GPU_FLAG_FENCE
)) {
501 virtio_gpu_ctrl_response_nodata(g
, cmd
, VIRTIO_GPU_RESP_OK_NODATA
);
505 trace_virtio_gpu_fence_ctrl(cmd
->cmd_hdr
.fence_id
, cmd
->cmd_hdr
.type
);
506 virgl_renderer_create_fence(cmd
->cmd_hdr
.fence_id
, cmd
->cmd_hdr
.type
);
509 static void virgl_write_fence(void *opaque
, uint32_t fence
)
511 VirtIOGPU
*g
= opaque
;
512 struct virtio_gpu_ctrl_command
*cmd
, *tmp
;
514 QTAILQ_FOREACH_SAFE(cmd
, &g
->fenceq
, next
, tmp
) {
516 * the guest can end up emitting fences out of order
517 * so we should check all fenced cmds not just the first one.
519 if (cmd
->cmd_hdr
.fence_id
> fence
) {
522 trace_virtio_gpu_fence_resp(cmd
->cmd_hdr
.fence_id
);
523 virtio_gpu_ctrl_response_nodata(g
, cmd
, VIRTIO_GPU_RESP_OK_NODATA
);
524 QTAILQ_REMOVE(&g
->fenceq
, cmd
, next
);
527 if (virtio_gpu_stats_enabled(g
->parent_obj
.conf
)) {
528 fprintf(stderr
, "inflight: %3d (-)\r", g
->inflight
);
533 static virgl_renderer_gl_context
534 virgl_create_context(void *opaque
, int scanout_idx
,
535 struct virgl_renderer_gl_ctx_param
*params
)
537 VirtIOGPU
*g
= opaque
;
539 QEMUGLParams qparams
;
541 qparams
.major_ver
= params
->major_ver
;
542 qparams
.minor_ver
= params
->minor_ver
;
544 ctx
= dpy_gl_ctx_create(g
->parent_obj
.scanout
[scanout_idx
].con
, &qparams
);
545 return (virgl_renderer_gl_context
)ctx
;
548 static void virgl_destroy_context(void *opaque
, virgl_renderer_gl_context ctx
)
550 VirtIOGPU
*g
= opaque
;
551 QEMUGLContext qctx
= (QEMUGLContext
)ctx
;
553 dpy_gl_ctx_destroy(g
->parent_obj
.scanout
[0].con
, qctx
);
556 static int virgl_make_context_current(void *opaque
, int scanout_idx
,
557 virgl_renderer_gl_context ctx
)
559 VirtIOGPU
*g
= opaque
;
560 QEMUGLContext qctx
= (QEMUGLContext
)ctx
;
562 return dpy_gl_ctx_make_current(g
->parent_obj
.scanout
[scanout_idx
].con
,
566 static struct virgl_renderer_callbacks virtio_gpu_3d_cbs
= {
568 .write_fence
= virgl_write_fence
,
569 .create_gl_context
= virgl_create_context
,
570 .destroy_gl_context
= virgl_destroy_context
,
571 .make_current
= virgl_make_context_current
,
574 static void virtio_gpu_print_stats(void *opaque
)
576 VirtIOGPU
*g
= opaque
;
578 if (g
->stats
.requests
) {
579 fprintf(stderr
, "stats: vq req %4d, %3d -- 3D %4d (%5d)\n",
581 g
->stats
.max_inflight
,
584 g
->stats
.requests
= 0;
585 g
->stats
.max_inflight
= 0;
587 g
->stats
.bytes_3d
= 0;
589 fprintf(stderr
, "stats: idle\r");
591 timer_mod(g
->print_stats
, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + 1000);
594 static void virtio_gpu_fence_poll(void *opaque
)
596 VirtIOGPU
*g
= opaque
;
598 virgl_renderer_poll();
599 virtio_gpu_process_cmdq(g
);
600 if (!QTAILQ_EMPTY(&g
->cmdq
) || !QTAILQ_EMPTY(&g
->fenceq
)) {
601 timer_mod(g
->fence_poll
, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + 10);
605 void virtio_gpu_virgl_fence_poll(VirtIOGPU
*g
)
607 virtio_gpu_fence_poll(g
);
610 void virtio_gpu_virgl_reset_scanout(VirtIOGPU
*g
)
614 for (i
= 0; i
< g
->parent_obj
.conf
.max_outputs
; i
++) {
615 dpy_gfx_replace_surface(g
->parent_obj
.scanout
[i
].con
, NULL
);
616 dpy_gl_scanout_disable(g
->parent_obj
.scanout
[i
].con
);
620 void virtio_gpu_virgl_reset(VirtIOGPU
*g
)
622 virgl_renderer_reset();
625 int virtio_gpu_virgl_init(VirtIOGPU
*g
)
630 #if VIRGL_RENDERER_CALLBACKS_VERSION >= 4
631 if (qemu_egl_display
) {
632 virtio_gpu_3d_cbs
.version
= 4;
633 virtio_gpu_3d_cbs
.get_egl_display
= virgl_get_egl_display
;
636 #ifdef VIRGL_RENDERER_D3D11_SHARE_TEXTURE
637 if (qemu_egl_angle_d3d
) {
638 flags
|= VIRGL_RENDERER_D3D11_SHARE_TEXTURE
;
642 ret
= virgl_renderer_init(g
, flags
, &virtio_gpu_3d_cbs
);
644 error_report("virgl could not be initialized: %d", ret
);
648 g
->fence_poll
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
649 virtio_gpu_fence_poll
, g
);
651 if (virtio_gpu_stats_enabled(g
->parent_obj
.conf
)) {
652 g
->print_stats
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
653 virtio_gpu_print_stats
, g
);
654 timer_mod(g
->print_stats
, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + 1000);
659 int virtio_gpu_virgl_get_num_capsets(VirtIOGPU
*g
)
661 uint32_t capset2_max_ver
, capset2_max_size
;
662 virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2
,
666 return capset2_max_ver
? 2 : 1;