1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 #include "qemu/osdep.h"
4 #include "qapi/error.h"
5 #include "qemu/error-report.h"
8 #include "hw/virtio/virtio.h"
9 #include "hw/virtio/virtio-gpu.h"
10 #include "hw/virtio/virtio-gpu-pixman.h"
11 #include "hw/virtio/virtio-iommu.h"
13 #include <glib/gmem.h>
14 #include <rutabaga_gfx/rutabaga_gfx_ffi.h>
16 #define CHECK(condition, cmd) \
19 error_report("CHECK failed in %s() %s:" "%d", __func__, \
20 __FILE__, __LINE__); \
21 (cmd)->error = VIRTIO_GPU_RESP_ERR_UNSPEC; \
26 struct rutabaga_aio_data
{
27 struct VirtIOGPURutabaga
*vr
;
28 struct rutabaga_fence fence
;
32 virtio_gpu_rutabaga_update_cursor(VirtIOGPU
*g
, struct virtio_gpu_scanout
*s
,
35 struct virtio_gpu_simple_resource
*res
;
36 struct rutabaga_transfer transfer
= { 0 };
37 struct iovec transfer_iovec
;
39 VirtIOGPURutabaga
*vr
= VIRTIO_GPU_RUTABAGA(g
);
41 res
= virtio_gpu_find_resource(g
, resource_id
);
46 if (res
->width
!= s
->current_cursor
->width
||
47 res
->height
!= s
->current_cursor
->height
) {
54 transfer
.w
= res
->width
;
55 transfer
.h
= res
->height
;
58 transfer_iovec
.iov_base
= s
->current_cursor
->data
;
59 transfer_iovec
.iov_len
= res
->width
* res
->height
* 4;
61 rutabaga_resource_transfer_read(vr
->rutabaga
, 0,
62 resource_id
, &transfer
,
67 virtio_gpu_rutabaga_gl_flushed(VirtIOGPUBase
*b
)
69 VirtIOGPU
*g
= VIRTIO_GPU(b
);
70 virtio_gpu_process_cmdq(g
);
74 rutabaga_cmd_create_resource_2d(VirtIOGPU
*g
,
75 struct virtio_gpu_ctrl_command
*cmd
)
78 struct rutabaga_create_3d rc_3d
= { 0 };
79 struct virtio_gpu_simple_resource
*res
;
80 struct virtio_gpu_resource_create_2d c2d
;
82 VirtIOGPURutabaga
*vr
= VIRTIO_GPU_RUTABAGA(g
);
84 VIRTIO_GPU_FILL_CMD(c2d
);
85 trace_virtio_gpu_cmd_res_create_2d(c2d
.resource_id
, c2d
.format
,
86 c2d
.width
, c2d
.height
);
89 rc_3d
.format
= c2d
.format
;
90 rc_3d
.bind
= (1 << 1);
91 rc_3d
.width
= c2d
.width
;
92 rc_3d
.height
= c2d
.height
;
97 rc_3d
.flags
= VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP
;
99 result
= rutabaga_resource_create_3d(vr
->rutabaga
, c2d
.resource_id
, &rc_3d
);
102 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
103 res
->width
= c2d
.width
;
104 res
->height
= c2d
.height
;
105 res
->format
= c2d
.format
;
106 res
->resource_id
= c2d
.resource_id
;
108 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
112 rutabaga_cmd_create_resource_3d(VirtIOGPU
*g
,
113 struct virtio_gpu_ctrl_command
*cmd
)
116 struct rutabaga_create_3d rc_3d
= { 0 };
117 struct virtio_gpu_simple_resource
*res
;
118 struct virtio_gpu_resource_create_3d c3d
;
120 VirtIOGPURutabaga
*vr
= VIRTIO_GPU_RUTABAGA(g
);
122 VIRTIO_GPU_FILL_CMD(c3d
);
124 trace_virtio_gpu_cmd_res_create_3d(c3d
.resource_id
, c3d
.format
,
125 c3d
.width
, c3d
.height
, c3d
.depth
);
127 rc_3d
.target
= c3d
.target
;
128 rc_3d
.format
= c3d
.format
;
129 rc_3d
.bind
= c3d
.bind
;
130 rc_3d
.width
= c3d
.width
;
131 rc_3d
.height
= c3d
.height
;
132 rc_3d
.depth
= c3d
.depth
;
133 rc_3d
.array_size
= c3d
.array_size
;
134 rc_3d
.last_level
= c3d
.last_level
;
135 rc_3d
.nr_samples
= c3d
.nr_samples
;
136 rc_3d
.flags
= c3d
.flags
;
138 result
= rutabaga_resource_create_3d(vr
->rutabaga
, c3d
.resource_id
, &rc_3d
);
141 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
142 res
->width
= c3d
.width
;
143 res
->height
= c3d
.height
;
144 res
->format
= c3d
.format
;
145 res
->resource_id
= c3d
.resource_id
;
147 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
151 virtio_gpu_rutabaga_resource_unref(VirtIOGPU
*g
,
152 struct virtio_gpu_simple_resource
*res
,
156 VirtIOGPURutabaga
*vr
= VIRTIO_GPU_RUTABAGA(g
);
158 result
= rutabaga_resource_unref(vr
->rutabaga
, res
->resource_id
);
160 error_setg_errno(errp
,
162 "%s: rutabaga_resource_unref returned %"PRIi32
163 " for resource_id = %"PRIu32
, __func__
, result
,
168 pixman_image_unref(res
->image
);
171 QTAILQ_REMOVE(&g
->reslist
, res
, next
);
176 rutabaga_cmd_resource_unref(VirtIOGPU
*g
,
177 struct virtio_gpu_ctrl_command
*cmd
)
180 struct virtio_gpu_simple_resource
*res
;
181 struct virtio_gpu_resource_unref unref
;
182 Error
*local_err
= NULL
;
184 VIRTIO_GPU_FILL_CMD(unref
);
186 trace_virtio_gpu_cmd_res_unref(unref
.resource_id
);
188 res
= virtio_gpu_find_resource(g
, unref
.resource_id
);
191 virtio_gpu_rutabaga_resource_unref(g
, res
, &local_err
);
193 error_report_err(local_err
);
194 /* local_err was freed, do not reuse it. */
202 rutabaga_cmd_context_create(VirtIOGPU
*g
,
203 struct virtio_gpu_ctrl_command
*cmd
)
206 struct virtio_gpu_ctx_create cc
;
208 VirtIOGPURutabaga
*vr
= VIRTIO_GPU_RUTABAGA(g
);
210 VIRTIO_GPU_FILL_CMD(cc
);
211 trace_virtio_gpu_cmd_ctx_create(cc
.hdr
.ctx_id
,
214 result
= rutabaga_context_create(vr
->rutabaga
, cc
.hdr
.ctx_id
,
215 cc
.context_init
, cc
.debug_name
, cc
.nlen
);
220 rutabaga_cmd_context_destroy(VirtIOGPU
*g
,
221 struct virtio_gpu_ctrl_command
*cmd
)
224 struct virtio_gpu_ctx_destroy cd
;
226 VirtIOGPURutabaga
*vr
= VIRTIO_GPU_RUTABAGA(g
);
228 VIRTIO_GPU_FILL_CMD(cd
);
229 trace_virtio_gpu_cmd_ctx_destroy(cd
.hdr
.ctx_id
);
231 result
= rutabaga_context_destroy(vr
->rutabaga
, cd
.hdr
.ctx_id
);
236 rutabaga_cmd_resource_flush(VirtIOGPU
*g
, struct virtio_gpu_ctrl_command
*cmd
)
239 struct virtio_gpu_scanout
*scanout
= NULL
;
240 struct virtio_gpu_simple_resource
*res
;
241 struct rutabaga_transfer transfer
= { 0 };
242 struct iovec transfer_iovec
;
243 struct virtio_gpu_resource_flush rf
;
246 VirtIOGPUBase
*vb
= VIRTIO_GPU_BASE(g
);
247 VirtIOGPURutabaga
*vr
= VIRTIO_GPU_RUTABAGA(g
);
252 VIRTIO_GPU_FILL_CMD(rf
);
253 trace_virtio_gpu_cmd_res_flush(rf
.resource_id
,
254 rf
.r
.width
, rf
.r
.height
, rf
.r
.x
, rf
.r
.y
);
256 res
= virtio_gpu_find_resource(g
, rf
.resource_id
);
259 for (i
= 0; i
< vb
->conf
.max_outputs
; i
++) {
260 scanout
= &vb
->scanout
[i
];
261 if (i
== res
->scanout_bitmask
) {
274 transfer
.w
= res
->width
;
275 transfer
.h
= res
->height
;
278 transfer_iovec
.iov_base
= pixman_image_get_data(res
->image
);
279 transfer_iovec
.iov_len
= res
->width
* res
->height
* 4;
281 result
= rutabaga_resource_transfer_read(vr
->rutabaga
, 0,
282 rf
.resource_id
, &transfer
,
285 dpy_gfx_update_full(scanout
->con
);
289 rutabaga_cmd_set_scanout(VirtIOGPU
*g
, struct virtio_gpu_ctrl_command
*cmd
)
291 struct virtio_gpu_simple_resource
*res
;
292 struct virtio_gpu_scanout
*scanout
= NULL
;
293 struct virtio_gpu_set_scanout ss
;
295 VirtIOGPUBase
*vb
= VIRTIO_GPU_BASE(g
);
296 VirtIOGPURutabaga
*vr
= VIRTIO_GPU_RUTABAGA(g
);
301 VIRTIO_GPU_FILL_CMD(ss
);
302 trace_virtio_gpu_cmd_set_scanout(ss
.scanout_id
, ss
.resource_id
,
303 ss
.r
.width
, ss
.r
.height
, ss
.r
.x
, ss
.r
.y
);
305 CHECK(ss
.scanout_id
< VIRTIO_GPU_MAX_SCANOUTS
, cmd
);
306 scanout
= &vb
->scanout
[ss
.scanout_id
];
308 if (ss
.resource_id
== 0) {
309 dpy_gfx_replace_surface(scanout
->con
, NULL
);
310 dpy_gl_scanout_disable(scanout
->con
);
314 res
= virtio_gpu_find_resource(g
, ss
.resource_id
);
318 pixman_format_code_t pformat
;
319 pformat
= virtio_gpu_get_pixman_format(res
->format
);
322 res
->image
= pixman_image_create_bits(pformat
,
326 CHECK(res
->image
, cmd
);
327 pixman_image_ref(res
->image
);
332 /* realloc the surface ptr */
333 scanout
->ds
= qemu_create_displaysurface_pixman(res
->image
);
334 dpy_gfx_replace_surface(scanout
->con
, NULL
);
335 dpy_gfx_replace_surface(scanout
->con
, scanout
->ds
);
336 res
->scanout_bitmask
= ss
.scanout_id
;
340 rutabaga_cmd_submit_3d(VirtIOGPU
*g
,
341 struct virtio_gpu_ctrl_command
*cmd
)
344 struct virtio_gpu_cmd_submit cs
;
345 struct rutabaga_command rutabaga_cmd
= { 0 };
346 g_autofree
uint8_t *buf
= NULL
;
349 VirtIOGPURutabaga
*vr
= VIRTIO_GPU_RUTABAGA(g
);
351 VIRTIO_GPU_FILL_CMD(cs
);
352 trace_virtio_gpu_cmd_ctx_submit(cs
.hdr
.ctx_id
, cs
.size
);
354 buf
= g_new0(uint8_t, cs
.size
);
355 s
= iov_to_buf(cmd
->elem
.out_sg
, cmd
->elem
.out_num
,
356 sizeof(cs
), buf
, cs
.size
);
357 CHECK(s
== cs
.size
, cmd
);
359 rutabaga_cmd
.ctx_id
= cs
.hdr
.ctx_id
;
360 rutabaga_cmd
.cmd
= buf
;
361 rutabaga_cmd
.cmd_size
= cs
.size
;
363 result
= rutabaga_submit_command(vr
->rutabaga
, &rutabaga_cmd
);
368 rutabaga_cmd_transfer_to_host_2d(VirtIOGPU
*g
,
369 struct virtio_gpu_ctrl_command
*cmd
)
372 struct rutabaga_transfer transfer
= { 0 };
373 struct virtio_gpu_transfer_to_host_2d t2d
;
375 VirtIOGPURutabaga
*vr
= VIRTIO_GPU_RUTABAGA(g
);
377 VIRTIO_GPU_FILL_CMD(t2d
);
378 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d
.resource_id
);
380 transfer
.x
= t2d
.r
.x
;
381 transfer
.y
= t2d
.r
.y
;
383 transfer
.w
= t2d
.r
.width
;
384 transfer
.h
= t2d
.r
.height
;
387 result
= rutabaga_resource_transfer_write(vr
->rutabaga
, 0, t2d
.resource_id
,
393 rutabaga_cmd_transfer_to_host_3d(VirtIOGPU
*g
,
394 struct virtio_gpu_ctrl_command
*cmd
)
397 struct rutabaga_transfer transfer
= { 0 };
398 struct virtio_gpu_transfer_host_3d t3d
;
400 VirtIOGPURutabaga
*vr
= VIRTIO_GPU_RUTABAGA(g
);
402 VIRTIO_GPU_FILL_CMD(t3d
);
403 trace_virtio_gpu_cmd_res_xfer_toh_3d(t3d
.resource_id
);
405 transfer
.x
= t3d
.box
.x
;
406 transfer
.y
= t3d
.box
.y
;
407 transfer
.z
= t3d
.box
.z
;
408 transfer
.w
= t3d
.box
.w
;
409 transfer
.h
= t3d
.box
.h
;
410 transfer
.d
= t3d
.box
.d
;
411 transfer
.level
= t3d
.level
;
412 transfer
.stride
= t3d
.stride
;
413 transfer
.layer_stride
= t3d
.layer_stride
;
414 transfer
.offset
= t3d
.offset
;
416 result
= rutabaga_resource_transfer_write(vr
->rutabaga
, t3d
.hdr
.ctx_id
,
417 t3d
.resource_id
, &transfer
);
422 rutabaga_cmd_transfer_from_host_3d(VirtIOGPU
*g
,
423 struct virtio_gpu_ctrl_command
*cmd
)
426 struct rutabaga_transfer transfer
= { 0 };
427 struct virtio_gpu_transfer_host_3d t3d
;
429 VirtIOGPURutabaga
*vr
= VIRTIO_GPU_RUTABAGA(g
);
431 VIRTIO_GPU_FILL_CMD(t3d
);
432 trace_virtio_gpu_cmd_res_xfer_fromh_3d(t3d
.resource_id
);
434 transfer
.x
= t3d
.box
.x
;
435 transfer
.y
= t3d
.box
.y
;
436 transfer
.z
= t3d
.box
.z
;
437 transfer
.w
= t3d
.box
.w
;
438 transfer
.h
= t3d
.box
.h
;
439 transfer
.d
= t3d
.box
.d
;
440 transfer
.level
= t3d
.level
;
441 transfer
.stride
= t3d
.stride
;
442 transfer
.layer_stride
= t3d
.layer_stride
;
443 transfer
.offset
= t3d
.offset
;
445 result
= rutabaga_resource_transfer_read(vr
->rutabaga
, t3d
.hdr
.ctx_id
,
446 t3d
.resource_id
, &transfer
, NULL
);
451 rutabaga_cmd_attach_backing(VirtIOGPU
*g
, struct virtio_gpu_ctrl_command
*cmd
)
453 struct rutabaga_iovecs vecs
= { 0 };
454 struct virtio_gpu_simple_resource
*res
;
455 struct virtio_gpu_resource_attach_backing att_rb
;
458 VirtIOGPURutabaga
*vr
= VIRTIO_GPU_RUTABAGA(g
);
460 VIRTIO_GPU_FILL_CMD(att_rb
);
461 trace_virtio_gpu_cmd_res_back_attach(att_rb
.resource_id
);
463 res
= virtio_gpu_find_resource(g
, att_rb
.resource_id
);
465 CHECK(!res
->iov
, cmd
);
467 ret
= virtio_gpu_create_mapping_iov(g
, att_rb
.nr_entries
, sizeof(att_rb
),
468 cmd
, NULL
, &res
->iov
, &res
->iov_cnt
);
471 vecs
.iovecs
= res
->iov
;
472 vecs
.num_iovecs
= res
->iov_cnt
;
474 ret
= rutabaga_resource_attach_backing(vr
->rutabaga
, att_rb
.resource_id
,
477 virtio_gpu_cleanup_mapping(g
, res
);
484 rutabaga_cmd_detach_backing(VirtIOGPU
*g
, struct virtio_gpu_ctrl_command
*cmd
)
486 struct virtio_gpu_simple_resource
*res
;
487 struct virtio_gpu_resource_detach_backing detach_rb
;
489 VirtIOGPURutabaga
*vr
= VIRTIO_GPU_RUTABAGA(g
);
491 VIRTIO_GPU_FILL_CMD(detach_rb
);
492 trace_virtio_gpu_cmd_res_back_detach(detach_rb
.resource_id
);
494 res
= virtio_gpu_find_resource(g
, detach_rb
.resource_id
);
497 rutabaga_resource_detach_backing(vr
->rutabaga
,
498 detach_rb
.resource_id
);
500 virtio_gpu_cleanup_mapping(g
, res
);
504 rutabaga_cmd_ctx_attach_resource(VirtIOGPU
*g
,
505 struct virtio_gpu_ctrl_command
*cmd
)
508 struct virtio_gpu_ctx_resource att_res
;
510 VirtIOGPURutabaga
*vr
= VIRTIO_GPU_RUTABAGA(g
);
512 VIRTIO_GPU_FILL_CMD(att_res
);
513 trace_virtio_gpu_cmd_ctx_res_attach(att_res
.hdr
.ctx_id
,
514 att_res
.resource_id
);
516 result
= rutabaga_context_attach_resource(vr
->rutabaga
, att_res
.hdr
.ctx_id
,
517 att_res
.resource_id
);
522 rutabaga_cmd_ctx_detach_resource(VirtIOGPU
*g
,
523 struct virtio_gpu_ctrl_command
*cmd
)
526 struct virtio_gpu_ctx_resource det_res
;
528 VirtIOGPURutabaga
*vr
= VIRTIO_GPU_RUTABAGA(g
);
530 VIRTIO_GPU_FILL_CMD(det_res
);
531 trace_virtio_gpu_cmd_ctx_res_detach(det_res
.hdr
.ctx_id
,
532 det_res
.resource_id
);
534 result
= rutabaga_context_detach_resource(vr
->rutabaga
, det_res
.hdr
.ctx_id
,
535 det_res
.resource_id
);
540 rutabaga_cmd_get_capset_info(VirtIOGPU
*g
, struct virtio_gpu_ctrl_command
*cmd
)
543 struct virtio_gpu_get_capset_info info
;
544 struct virtio_gpu_resp_capset_info resp
;
546 VirtIOGPURutabaga
*vr
= VIRTIO_GPU_RUTABAGA(g
);
548 VIRTIO_GPU_FILL_CMD(info
);
550 result
= rutabaga_get_capset_info(vr
->rutabaga
, info
.capset_index
,
551 &resp
.capset_id
, &resp
.capset_max_version
,
552 &resp
.capset_max_size
);
555 resp
.hdr
.type
= VIRTIO_GPU_RESP_OK_CAPSET_INFO
;
556 virtio_gpu_ctrl_response(g
, cmd
, &resp
.hdr
, sizeof(resp
));
560 rutabaga_cmd_get_capset(VirtIOGPU
*g
, struct virtio_gpu_ctrl_command
*cmd
)
563 struct virtio_gpu_get_capset gc
;
564 struct virtio_gpu_resp_capset
*resp
;
565 uint32_t capset_size
, capset_version
;
566 uint32_t current_id
, i
;
568 VirtIOGPURutabaga
*vr
= VIRTIO_GPU_RUTABAGA(g
);
570 VIRTIO_GPU_FILL_CMD(gc
);
571 for (i
= 0; i
< vr
->num_capsets
; i
++) {
572 result
= rutabaga_get_capset_info(vr
->rutabaga
, i
,
573 ¤t_id
, &capset_version
,
577 if (current_id
== gc
.capset_id
) {
582 CHECK(i
< vr
->num_capsets
, cmd
);
584 resp
= g_malloc0(sizeof(*resp
) + capset_size
);
585 resp
->hdr
.type
= VIRTIO_GPU_RESP_OK_CAPSET
;
586 rutabaga_get_capset(vr
->rutabaga
, gc
.capset_id
, gc
.capset_version
,
587 resp
->capset_data
, capset_size
);
589 virtio_gpu_ctrl_response(g
, cmd
, &resp
->hdr
, sizeof(*resp
) + capset_size
);
594 rutabaga_cmd_resource_create_blob(VirtIOGPU
*g
,
595 struct virtio_gpu_ctrl_command
*cmd
)
598 struct rutabaga_iovecs vecs
= { 0 };
599 g_autofree
struct virtio_gpu_simple_resource
*res
= NULL
;
600 struct virtio_gpu_resource_create_blob cblob
;
601 struct rutabaga_create_blob rc_blob
= { 0 };
603 VirtIOGPURutabaga
*vr
= VIRTIO_GPU_RUTABAGA(g
);
605 VIRTIO_GPU_FILL_CMD(cblob
);
606 trace_virtio_gpu_cmd_res_create_blob(cblob
.resource_id
, cblob
.size
);
608 CHECK(cblob
.resource_id
!= 0, cmd
);
610 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
612 res
->resource_id
= cblob
.resource_id
;
613 res
->blob_size
= cblob
.size
;
615 if (cblob
.blob_mem
!= VIRTIO_GPU_BLOB_MEM_HOST3D
) {
616 result
= virtio_gpu_create_mapping_iov(g
, cblob
.nr_entries
,
617 sizeof(cblob
), cmd
, &res
->addrs
,
618 &res
->iov
, &res
->iov_cnt
);
622 rc_blob
.blob_id
= cblob
.blob_id
;
623 rc_blob
.blob_mem
= cblob
.blob_mem
;
624 rc_blob
.blob_flags
= cblob
.blob_flags
;
625 rc_blob
.size
= cblob
.size
;
627 vecs
.iovecs
= res
->iov
;
628 vecs
.num_iovecs
= res
->iov_cnt
;
630 result
= rutabaga_resource_create_blob(vr
->rutabaga
, cblob
.hdr
.ctx_id
,
631 cblob
.resource_id
, &rc_blob
, &vecs
,
634 if (result
&& cblob
.blob_mem
!= VIRTIO_GPU_BLOB_MEM_HOST3D
) {
635 virtio_gpu_cleanup_mapping(g
, res
);
640 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
645 rutabaga_cmd_resource_map_blob(VirtIOGPU
*g
,
646 struct virtio_gpu_ctrl_command
*cmd
)
649 uint32_t map_info
= 0;
651 struct virtio_gpu_simple_resource
*res
;
652 struct rutabaga_mapping mapping
= { 0 };
653 struct virtio_gpu_resource_map_blob mblob
;
654 struct virtio_gpu_resp_map_info resp
= { 0 };
656 VirtIOGPUBase
*vb
= VIRTIO_GPU_BASE(g
);
657 VirtIOGPURutabaga
*vr
= VIRTIO_GPU_RUTABAGA(g
);
659 VIRTIO_GPU_FILL_CMD(mblob
);
661 CHECK(mblob
.resource_id
!= 0, cmd
);
663 res
= virtio_gpu_find_resource(g
, mblob
.resource_id
);
666 result
= rutabaga_resource_map_info(vr
->rutabaga
, mblob
.resource_id
,
671 * RUTABAGA_MAP_ACCESS_* flags are not part of the virtio-gpu spec, but do
672 * exist to potentially allow the hypervisor to restrict write access to
673 * memory. QEMU does not need to use this functionality at the moment.
675 resp
.map_info
= map_info
& RUTABAGA_MAP_CACHE_MASK
;
677 result
= rutabaga_resource_map(vr
->rutabaga
, mblob
.resource_id
, &mapping
);
681 * There is small risk of the MemoryRegion dereferencing the pointer after
682 * rutabaga unmaps it. Please see discussion here:
684 * https://lists.gnu.org/archive/html/qemu-devel/2023-09/msg05141.html
686 * It is highly unlikely to happen in practice and doesn't affect known
687 * use cases. However, it should be fixed and is noted here for posterity.
689 for (slot
= 0; slot
< MAX_SLOTS
; slot
++) {
690 if (vr
->memory_regions
[slot
].used
) {
694 MemoryRegion
*mr
= &(vr
->memory_regions
[slot
].mr
);
695 memory_region_init_ram_ptr(mr
, OBJECT(vr
), "blob", mapping
.size
,
697 memory_region_add_subregion(&vb
->hostmem
, mblob
.offset
, mr
);
698 vr
->memory_regions
[slot
].resource_id
= mblob
.resource_id
;
699 vr
->memory_regions
[slot
].used
= 1;
703 if (slot
>= MAX_SLOTS
) {
704 result
= rutabaga_resource_unmap(vr
->rutabaga
, mblob
.resource_id
);
708 CHECK(slot
< MAX_SLOTS
, cmd
);
710 resp
.hdr
.type
= VIRTIO_GPU_RESP_OK_MAP_INFO
;
711 virtio_gpu_ctrl_response(g
, cmd
, &resp
.hdr
, sizeof(resp
));
715 rutabaga_cmd_resource_unmap_blob(VirtIOGPU
*g
,
716 struct virtio_gpu_ctrl_command
*cmd
)
720 struct virtio_gpu_simple_resource
*res
;
721 struct virtio_gpu_resource_unmap_blob ublob
;
723 VirtIOGPUBase
*vb
= VIRTIO_GPU_BASE(g
);
724 VirtIOGPURutabaga
*vr
= VIRTIO_GPU_RUTABAGA(g
);
726 VIRTIO_GPU_FILL_CMD(ublob
);
728 CHECK(ublob
.resource_id
!= 0, cmd
);
730 res
= virtio_gpu_find_resource(g
, ublob
.resource_id
);
733 for (slot
= 0; slot
< MAX_SLOTS
; slot
++) {
734 if (vr
->memory_regions
[slot
].resource_id
!= ublob
.resource_id
) {
738 MemoryRegion
*mr
= &(vr
->memory_regions
[slot
].mr
);
739 memory_region_del_subregion(&vb
->hostmem
, mr
);
741 vr
->memory_regions
[slot
].resource_id
= 0;
742 vr
->memory_regions
[slot
].used
= 0;
746 CHECK(slot
< MAX_SLOTS
, cmd
);
747 result
= rutabaga_resource_unmap(vr
->rutabaga
, res
->resource_id
);
752 virtio_gpu_rutabaga_process_cmd(VirtIOGPU
*g
,
753 struct virtio_gpu_ctrl_command
*cmd
)
755 struct rutabaga_fence fence
= { 0 };
758 VirtIOGPURutabaga
*vr
= VIRTIO_GPU_RUTABAGA(g
);
760 VIRTIO_GPU_FILL_CMD(cmd
->cmd_hdr
);
762 switch (cmd
->cmd_hdr
.type
) {
763 case VIRTIO_GPU_CMD_CTX_CREATE
:
764 rutabaga_cmd_context_create(g
, cmd
);
766 case VIRTIO_GPU_CMD_CTX_DESTROY
:
767 rutabaga_cmd_context_destroy(g
, cmd
);
769 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
:
770 rutabaga_cmd_create_resource_2d(g
, cmd
);
772 case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D
:
773 rutabaga_cmd_create_resource_3d(g
, cmd
);
775 case VIRTIO_GPU_CMD_SUBMIT_3D
:
776 rutabaga_cmd_submit_3d(g
, cmd
);
778 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
:
779 rutabaga_cmd_transfer_to_host_2d(g
, cmd
);
781 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D
:
782 rutabaga_cmd_transfer_to_host_3d(g
, cmd
);
784 case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D
:
785 rutabaga_cmd_transfer_from_host_3d(g
, cmd
);
787 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
:
788 rutabaga_cmd_attach_backing(g
, cmd
);
790 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
:
791 rutabaga_cmd_detach_backing(g
, cmd
);
793 case VIRTIO_GPU_CMD_SET_SCANOUT
:
794 rutabaga_cmd_set_scanout(g
, cmd
);
796 case VIRTIO_GPU_CMD_RESOURCE_FLUSH
:
797 rutabaga_cmd_resource_flush(g
, cmd
);
799 case VIRTIO_GPU_CMD_RESOURCE_UNREF
:
800 rutabaga_cmd_resource_unref(g
, cmd
);
802 case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE
:
803 rutabaga_cmd_ctx_attach_resource(g
, cmd
);
805 case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE
:
806 rutabaga_cmd_ctx_detach_resource(g
, cmd
);
808 case VIRTIO_GPU_CMD_GET_CAPSET_INFO
:
809 rutabaga_cmd_get_capset_info(g
, cmd
);
811 case VIRTIO_GPU_CMD_GET_CAPSET
:
812 rutabaga_cmd_get_capset(g
, cmd
);
814 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO
:
815 virtio_gpu_get_display_info(g
, cmd
);
817 case VIRTIO_GPU_CMD_GET_EDID
:
818 virtio_gpu_get_edid(g
, cmd
);
820 case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB
:
821 rutabaga_cmd_resource_create_blob(g
, cmd
);
823 case VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB
:
824 rutabaga_cmd_resource_map_blob(g
, cmd
);
826 case VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB
:
827 rutabaga_cmd_resource_unmap_blob(g
, cmd
);
830 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
838 error_report("%s: ctrl 0x%x, error 0x%x", __func__
,
839 cmd
->cmd_hdr
.type
, cmd
->error
);
840 virtio_gpu_ctrl_response_nodata(g
, cmd
, cmd
->error
);
843 if (!(cmd
->cmd_hdr
.flags
& VIRTIO_GPU_FLAG_FENCE
)) {
844 virtio_gpu_ctrl_response_nodata(g
, cmd
, VIRTIO_GPU_RESP_OK_NODATA
);
848 fence
.flags
= cmd
->cmd_hdr
.flags
;
849 fence
.ctx_id
= cmd
->cmd_hdr
.ctx_id
;
850 fence
.fence_id
= cmd
->cmd_hdr
.fence_id
;
851 fence
.ring_idx
= cmd
->cmd_hdr
.ring_idx
;
853 trace_virtio_gpu_fence_ctrl(cmd
->cmd_hdr
.fence_id
, cmd
->cmd_hdr
.type
);
855 result
= rutabaga_create_fence(vr
->rutabaga
, &fence
);
860 virtio_gpu_rutabaga_aio_cb(void *opaque
)
862 struct rutabaga_aio_data
*data
= opaque
;
863 VirtIOGPU
*g
= VIRTIO_GPU(data
->vr
);
864 struct rutabaga_fence fence_data
= data
->fence
;
865 struct virtio_gpu_ctrl_command
*cmd
, *tmp
;
867 uint32_t signaled_ctx_specific
= fence_data
.flags
&
868 RUTABAGA_FLAG_INFO_RING_IDX
;
870 QTAILQ_FOREACH_SAFE(cmd
, &g
->fenceq
, next
, tmp
) {
872 * Due to context specific timelines.
874 uint32_t target_ctx_specific
= cmd
->cmd_hdr
.flags
&
875 RUTABAGA_FLAG_INFO_RING_IDX
;
877 if (signaled_ctx_specific
!= target_ctx_specific
) {
881 if (signaled_ctx_specific
&&
882 (cmd
->cmd_hdr
.ring_idx
!= fence_data
.ring_idx
)) {
886 if (cmd
->cmd_hdr
.fence_id
> fence_data
.fence_id
) {
890 trace_virtio_gpu_fence_resp(cmd
->cmd_hdr
.fence_id
);
891 virtio_gpu_ctrl_response_nodata(g
, cmd
, VIRTIO_GPU_RESP_OK_NODATA
);
892 QTAILQ_REMOVE(&g
->fenceq
, cmd
, next
);
900 virtio_gpu_rutabaga_fence_cb(uint64_t user_data
,
901 const struct rutabaga_fence
*fence
)
903 struct rutabaga_aio_data
*data
;
904 VirtIOGPU
*g
= (VirtIOGPU
*)user_data
;
905 VirtIOGPURutabaga
*vr
= VIRTIO_GPU_RUTABAGA(g
);
908 * gfxstream and both cross-domain (and even newer versions virglrenderer:
909 * see VIRGL_RENDERER_ASYNC_FENCE_CB) like to signal fence completion on
910 * threads ("callback threads") that are different from the thread that
911 * processes the command queue ("main thread").
913 * crosvm and other virtio-gpu 1.1 implementations enable callback threads
914 * via locking. However, on QEMU a deadlock is observed if
915 * virtio_gpu_ctrl_response_nodata(..) [used in the fence callback] is used
916 * from a thread that is not the main thread.
918 * The reason is QEMU's internal locking is designed to work with QEMU
919 * threads (see rcu_register_thread()) and not generic C/C++/Rust threads.
920 * For now, we can workaround this by scheduling the return of the
921 * fence descriptors on the main thread.
924 data
= g_new0(struct rutabaga_aio_data
, 1);
926 data
->fence
= *fence
;
927 aio_bh_schedule_oneshot(qemu_get_aio_context(),
928 virtio_gpu_rutabaga_aio_cb
,
933 virtio_gpu_rutabaga_debug_cb(uint64_t user_data
,
934 const struct rutabaga_debug
*debug
)
936 switch (debug
->debug_type
) {
937 case RUTABAGA_DEBUG_ERROR
:
938 error_report("%s", debug
->message
);
940 case RUTABAGA_DEBUG_WARN
:
941 warn_report("%s", debug
->message
);
943 case RUTABAGA_DEBUG_INFO
:
944 info_report("%s", debug
->message
);
947 error_report("unknown debug type: %u", debug
->debug_type
);
951 static bool virtio_gpu_rutabaga_init(VirtIOGPU
*g
, Error
**errp
)
954 struct rutabaga_builder builder
= { 0 };
955 struct rutabaga_channel channel
= { 0 };
956 struct rutabaga_channels channels
= { 0 };
958 VirtIOGPURutabaga
*vr
= VIRTIO_GPU_RUTABAGA(g
);
961 builder
.wsi
= RUTABAGA_WSI_SURFACELESS
;
963 * Currently, if WSI is specified, the only valid strings are "surfaceless"
964 * or "headless". Surfaceless doesn't create a native window surface, but
965 * does copy from the render target to the Pixman buffer if a virtio-gpu
966 * 2D hypercall is issued. Surfacless is the default.
968 * Headless is like surfaceless, but doesn't copy to the Pixman buffer. The
969 * use case is automated testing environments where there is no need to view
972 * In the future, more performant virtio-gpu 2D UI integration may be added.
975 if (g_str_equal(vr
->wsi
, "surfaceless")) {
976 vr
->headless
= false;
977 } else if (g_str_equal(vr
->wsi
, "headless")) {
980 error_setg(errp
, "invalid wsi option selected");
985 builder
.fence_cb
= virtio_gpu_rutabaga_fence_cb
;
986 builder
.debug_cb
= virtio_gpu_rutabaga_debug_cb
;
987 builder
.capset_mask
= vr
->capset_mask
;
988 builder
.user_data
= (uint64_t)g
;
991 * If the user doesn't specify the wayland socket path, we try to infer
992 * the socket via a process similar to the one used by libwayland.
993 * libwayland does the following:
995 * 1) If $WAYLAND_DISPLAY is set, attempt to connect to
996 * $XDG_RUNTIME_DIR/$WAYLAND_DISPLAY
997 * 2) Otherwise, attempt to connect to $XDG_RUNTIME_DIR/wayland-0
998 * 3) Otherwise, don't pass a wayland socket to rutabaga. If a guest
999 * wayland proxy is launched, it will fail to work.
1001 channel
.channel_type
= RUTABAGA_CHANNEL_TYPE_WAYLAND
;
1002 g_autofree gchar
*path
= NULL
;
1003 if (!vr
->wayland_socket_path
) {
1004 const gchar
*runtime_dir
= g_get_user_runtime_dir();
1005 const gchar
*display
= g_getenv("WAYLAND_DISPLAY");
1007 display
= "wayland-0";
1011 path
= g_build_filename(runtime_dir
, display
, NULL
);
1012 channel
.channel_name
= path
;
1015 channel
.channel_name
= vr
->wayland_socket_path
;
1018 if ((builder
.capset_mask
& (1 << RUTABAGA_CAPSET_CROSS_DOMAIN
))) {
1019 if (channel
.channel_name
) {
1020 channels
.channels
= &channel
;
1021 channels
.num_channels
= 1;
1022 builder
.channels
= &channels
;
1026 result
= rutabaga_init(&builder
, &vr
->rutabaga
);
1028 error_setg_errno(errp
, -result
, "Failed to init rutabaga");
1035 static int virtio_gpu_rutabaga_get_num_capsets(VirtIOGPU
*g
)
1038 uint32_t num_capsets
;
1039 VirtIOGPURutabaga
*vr
= VIRTIO_GPU_RUTABAGA(g
);
1041 result
= rutabaga_get_num_capsets(vr
->rutabaga
, &num_capsets
);
1043 error_report("Failed to get capsets");
1046 vr
->num_capsets
= num_capsets
;
1050 static void virtio_gpu_rutabaga_handle_ctrl(VirtIODevice
*vdev
, VirtQueue
*vq
)
1052 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
1053 struct virtio_gpu_ctrl_command
*cmd
;
1055 if (!virtio_queue_ready(vq
)) {
1059 cmd
= virtqueue_pop(vq
, sizeof(struct virtio_gpu_ctrl_command
));
1063 cmd
->finished
= false;
1064 QTAILQ_INSERT_TAIL(&g
->cmdq
, cmd
, next
);
1065 cmd
= virtqueue_pop(vq
, sizeof(struct virtio_gpu_ctrl_command
));
1068 virtio_gpu_process_cmdq(g
);
1071 static void virtio_gpu_rutabaga_realize(DeviceState
*qdev
, Error
**errp
)
1074 VirtIOGPUBase
*bdev
= VIRTIO_GPU_BASE(qdev
);
1075 VirtIOGPU
*gpudev
= VIRTIO_GPU(qdev
);
1078 error_setg(errp
, "rutabaga is not supported on bigendian platforms");
1082 if (!virtio_gpu_rutabaga_init(gpudev
, errp
)) {
1086 num_capsets
= virtio_gpu_rutabaga_get_num_capsets(gpudev
);
1091 bdev
->conf
.flags
|= (1 << VIRTIO_GPU_FLAG_RUTABAGA_ENABLED
);
1092 bdev
->conf
.flags
|= (1 << VIRTIO_GPU_FLAG_BLOB_ENABLED
);
1093 bdev
->conf
.flags
|= (1 << VIRTIO_GPU_FLAG_CONTEXT_INIT_ENABLED
);
1095 bdev
->virtio_config
.num_capsets
= num_capsets
;
1096 virtio_gpu_device_realize(qdev
, errp
);
1099 static Property virtio_gpu_rutabaga_properties
[] = {
1100 DEFINE_PROP_BIT64("gfxstream-vulkan", VirtIOGPURutabaga
, capset_mask
,
1101 RUTABAGA_CAPSET_GFXSTREAM_VULKAN
, false),
1102 DEFINE_PROP_BIT64("cross-domain", VirtIOGPURutabaga
, capset_mask
,
1103 RUTABAGA_CAPSET_CROSS_DOMAIN
, false),
1104 DEFINE_PROP_BIT64("x-gfxstream-gles", VirtIOGPURutabaga
, capset_mask
,
1105 RUTABAGA_CAPSET_GFXSTREAM_GLES
, false),
1106 DEFINE_PROP_BIT64("x-gfxstream-composer", VirtIOGPURutabaga
, capset_mask
,
1107 RUTABAGA_CAPSET_GFXSTREAM_COMPOSER
, false),
1108 DEFINE_PROP_STRING("wayland-socket-path", VirtIOGPURutabaga
,
1109 wayland_socket_path
),
1110 DEFINE_PROP_STRING("wsi", VirtIOGPURutabaga
, wsi
),
1111 DEFINE_PROP_END_OF_LIST(),
1114 static void virtio_gpu_rutabaga_class_init(ObjectClass
*klass
, void *data
)
1116 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1117 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
1118 VirtIOGPUBaseClass
*vbc
= VIRTIO_GPU_BASE_CLASS(klass
);
1119 VirtIOGPUClass
*vgc
= VIRTIO_GPU_CLASS(klass
);
1121 vbc
->gl_flushed
= virtio_gpu_rutabaga_gl_flushed
;
1122 vgc
->handle_ctrl
= virtio_gpu_rutabaga_handle_ctrl
;
1123 vgc
->process_cmd
= virtio_gpu_rutabaga_process_cmd
;
1124 vgc
->update_cursor_data
= virtio_gpu_rutabaga_update_cursor
;
1125 vgc
->resource_destroy
= virtio_gpu_rutabaga_resource_unref
;
1126 vdc
->realize
= virtio_gpu_rutabaga_realize
;
1127 device_class_set_props(dc
, virtio_gpu_rutabaga_properties
);
1130 static const TypeInfo virtio_gpu_rutabaga_info
[] = {
1132 .name
= TYPE_VIRTIO_GPU_RUTABAGA
,
1133 .parent
= TYPE_VIRTIO_GPU
,
1134 .instance_size
= sizeof(VirtIOGPURutabaga
),
1135 .class_init
= virtio_gpu_rutabaga_class_init
,
1139 DEFINE_TYPES(virtio_gpu_rutabaga_info
)
1141 module_obj(TYPE_VIRTIO_GPU_RUTABAGA
);
1142 module_kconfig(VIRTIO_GPU
);
1143 module_dep("hw-display-virtio-gpu");