MAINTAINERS: Add artist.c to the hppa machine section
[qemu/kevin.git] / hw / display / virtio-gpu-rutabaga.c
blob9e67f9bd51bb3c329275c6a20db64ed40abbba89
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 #include "qemu/osdep.h"
4 #include "qapi/error.h"
5 #include "qemu/error-report.h"
6 #include "qemu/iov.h"
7 #include "trace.h"
8 #include "hw/virtio/virtio.h"
9 #include "hw/virtio/virtio-gpu.h"
10 #include "hw/virtio/virtio-gpu-pixman.h"
11 #include "hw/virtio/virtio-iommu.h"
13 #include <glib/gmem.h>
14 #include <rutabaga_gfx/rutabaga_gfx_ffi.h>
16 #define CHECK(condition, cmd) \
17 do { \
18 if (!(condition)) { \
19 error_report("CHECK failed in %s() %s:" "%d", __func__, \
20 __FILE__, __LINE__); \
21 (cmd)->error = VIRTIO_GPU_RESP_ERR_UNSPEC; \
22 return; \
23 } \
24 } while (0)
26 struct rutabaga_aio_data {
27 struct VirtIOGPURutabaga *vr;
28 struct rutabaga_fence fence;
31 static void
32 virtio_gpu_rutabaga_update_cursor(VirtIOGPU *g, struct virtio_gpu_scanout *s,
33 uint32_t resource_id)
35 struct virtio_gpu_simple_resource *res;
36 struct rutabaga_transfer transfer = { 0 };
37 struct iovec transfer_iovec;
39 VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
41 res = virtio_gpu_find_resource(g, resource_id);
42 if (!res) {
43 return;
46 if (res->width != s->current_cursor->width ||
47 res->height != s->current_cursor->height) {
48 return;
51 transfer.x = 0;
52 transfer.y = 0;
53 transfer.z = 0;
54 transfer.w = res->width;
55 transfer.h = res->height;
56 transfer.d = 1;
58 transfer_iovec.iov_base = s->current_cursor->data;
59 transfer_iovec.iov_len = res->width * res->height * 4;
61 rutabaga_resource_transfer_read(vr->rutabaga, 0,
62 resource_id, &transfer,
63 &transfer_iovec);
66 static void
67 virtio_gpu_rutabaga_gl_flushed(VirtIOGPUBase *b)
69 VirtIOGPU *g = VIRTIO_GPU(b);
70 virtio_gpu_process_cmdq(g);
73 static void
74 rutabaga_cmd_create_resource_2d(VirtIOGPU *g,
75 struct virtio_gpu_ctrl_command *cmd)
77 int32_t result;
78 struct rutabaga_create_3d rc_3d = { 0 };
79 struct virtio_gpu_simple_resource *res;
80 struct virtio_gpu_resource_create_2d c2d;
82 VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
84 VIRTIO_GPU_FILL_CMD(c2d);
85 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
86 c2d.width, c2d.height);
88 rc_3d.target = 2;
89 rc_3d.format = c2d.format;
90 rc_3d.bind = (1 << 1);
91 rc_3d.width = c2d.width;
92 rc_3d.height = c2d.height;
93 rc_3d.depth = 1;
94 rc_3d.array_size = 1;
95 rc_3d.last_level = 0;
96 rc_3d.nr_samples = 0;
97 rc_3d.flags = VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP;
99 result = rutabaga_resource_create_3d(vr->rutabaga, c2d.resource_id, &rc_3d);
100 CHECK(!result, cmd);
102 res = g_new0(struct virtio_gpu_simple_resource, 1);
103 res->width = c2d.width;
104 res->height = c2d.height;
105 res->format = c2d.format;
106 res->resource_id = c2d.resource_id;
108 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
111 static void
112 rutabaga_cmd_create_resource_3d(VirtIOGPU *g,
113 struct virtio_gpu_ctrl_command *cmd)
115 int32_t result;
116 struct rutabaga_create_3d rc_3d = { 0 };
117 struct virtio_gpu_simple_resource *res;
118 struct virtio_gpu_resource_create_3d c3d;
120 VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
122 VIRTIO_GPU_FILL_CMD(c3d);
124 trace_virtio_gpu_cmd_res_create_3d(c3d.resource_id, c3d.format,
125 c3d.width, c3d.height, c3d.depth);
127 rc_3d.target = c3d.target;
128 rc_3d.format = c3d.format;
129 rc_3d.bind = c3d.bind;
130 rc_3d.width = c3d.width;
131 rc_3d.height = c3d.height;
132 rc_3d.depth = c3d.depth;
133 rc_3d.array_size = c3d.array_size;
134 rc_3d.last_level = c3d.last_level;
135 rc_3d.nr_samples = c3d.nr_samples;
136 rc_3d.flags = c3d.flags;
138 result = rutabaga_resource_create_3d(vr->rutabaga, c3d.resource_id, &rc_3d);
139 CHECK(!result, cmd);
141 res = g_new0(struct virtio_gpu_simple_resource, 1);
142 res->width = c3d.width;
143 res->height = c3d.height;
144 res->format = c3d.format;
145 res->resource_id = c3d.resource_id;
147 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
150 static void
151 rutabaga_cmd_resource_unref(VirtIOGPU *g,
152 struct virtio_gpu_ctrl_command *cmd)
154 int32_t result;
155 struct virtio_gpu_simple_resource *res;
156 struct virtio_gpu_resource_unref unref;
158 VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
160 VIRTIO_GPU_FILL_CMD(unref);
162 trace_virtio_gpu_cmd_res_unref(unref.resource_id);
164 res = virtio_gpu_find_resource(g, unref.resource_id);
165 CHECK(res, cmd);
167 result = rutabaga_resource_unref(vr->rutabaga, unref.resource_id);
168 CHECK(!result, cmd);
170 if (res->image) {
171 pixman_image_unref(res->image);
174 QTAILQ_REMOVE(&g->reslist, res, next);
175 g_free(res);
178 static void
179 rutabaga_cmd_context_create(VirtIOGPU *g,
180 struct virtio_gpu_ctrl_command *cmd)
182 int32_t result;
183 struct virtio_gpu_ctx_create cc;
185 VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
187 VIRTIO_GPU_FILL_CMD(cc);
188 trace_virtio_gpu_cmd_ctx_create(cc.hdr.ctx_id,
189 cc.debug_name);
191 result = rutabaga_context_create(vr->rutabaga, cc.hdr.ctx_id,
192 cc.context_init, cc.debug_name, cc.nlen);
193 CHECK(!result, cmd);
196 static void
197 rutabaga_cmd_context_destroy(VirtIOGPU *g,
198 struct virtio_gpu_ctrl_command *cmd)
200 int32_t result;
201 struct virtio_gpu_ctx_destroy cd;
203 VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
205 VIRTIO_GPU_FILL_CMD(cd);
206 trace_virtio_gpu_cmd_ctx_destroy(cd.hdr.ctx_id);
208 result = rutabaga_context_destroy(vr->rutabaga, cd.hdr.ctx_id);
209 CHECK(!result, cmd);
212 static void
213 rutabaga_cmd_resource_flush(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd)
215 int32_t result, i;
216 struct virtio_gpu_scanout *scanout = NULL;
217 struct virtio_gpu_simple_resource *res;
218 struct rutabaga_transfer transfer = { 0 };
219 struct iovec transfer_iovec;
220 struct virtio_gpu_resource_flush rf;
221 bool found = false;
223 VirtIOGPUBase *vb = VIRTIO_GPU_BASE(g);
224 VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
225 if (vr->headless) {
226 return;
229 VIRTIO_GPU_FILL_CMD(rf);
230 trace_virtio_gpu_cmd_res_flush(rf.resource_id,
231 rf.r.width, rf.r.height, rf.r.x, rf.r.y);
233 res = virtio_gpu_find_resource(g, rf.resource_id);
234 CHECK(res, cmd);
236 for (i = 0; i < vb->conf.max_outputs; i++) {
237 scanout = &vb->scanout[i];
238 if (i == res->scanout_bitmask) {
239 found = true;
240 break;
244 if (!found) {
245 return;
248 transfer.x = 0;
249 transfer.y = 0;
250 transfer.z = 0;
251 transfer.w = res->width;
252 transfer.h = res->height;
253 transfer.d = 1;
255 transfer_iovec.iov_base = pixman_image_get_data(res->image);
256 transfer_iovec.iov_len = res->width * res->height * 4;
258 result = rutabaga_resource_transfer_read(vr->rutabaga, 0,
259 rf.resource_id, &transfer,
260 &transfer_iovec);
261 CHECK(!result, cmd);
262 dpy_gfx_update_full(scanout->con);
265 static void
266 rutabaga_cmd_set_scanout(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd)
268 struct virtio_gpu_simple_resource *res;
269 struct virtio_gpu_scanout *scanout = NULL;
270 struct virtio_gpu_set_scanout ss;
272 VirtIOGPUBase *vb = VIRTIO_GPU_BASE(g);
273 VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
274 if (vr->headless) {
275 return;
278 VIRTIO_GPU_FILL_CMD(ss);
279 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
280 ss.r.width, ss.r.height, ss.r.x, ss.r.y);
282 CHECK(ss.scanout_id < VIRTIO_GPU_MAX_SCANOUTS, cmd);
283 scanout = &vb->scanout[ss.scanout_id];
285 if (ss.resource_id == 0) {
286 dpy_gfx_replace_surface(scanout->con, NULL);
287 dpy_gl_scanout_disable(scanout->con);
288 return;
291 res = virtio_gpu_find_resource(g, ss.resource_id);
292 CHECK(res, cmd);
294 if (!res->image) {
295 pixman_format_code_t pformat;
296 pformat = virtio_gpu_get_pixman_format(res->format);
297 CHECK(pformat, cmd);
299 res->image = pixman_image_create_bits(pformat,
300 res->width,
301 res->height,
302 NULL, 0);
303 CHECK(res->image, cmd);
304 pixman_image_ref(res->image);
307 vb->enable = 1;
309 /* realloc the surface ptr */
310 scanout->ds = qemu_create_displaysurface_pixman(res->image);
311 dpy_gfx_replace_surface(scanout->con, NULL);
312 dpy_gfx_replace_surface(scanout->con, scanout->ds);
313 res->scanout_bitmask = ss.scanout_id;
316 static void
317 rutabaga_cmd_submit_3d(VirtIOGPU *g,
318 struct virtio_gpu_ctrl_command *cmd)
320 int32_t result;
321 struct virtio_gpu_cmd_submit cs;
322 struct rutabaga_command rutabaga_cmd = { 0 };
323 g_autofree uint8_t *buf = NULL;
324 size_t s;
326 VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
328 VIRTIO_GPU_FILL_CMD(cs);
329 trace_virtio_gpu_cmd_ctx_submit(cs.hdr.ctx_id, cs.size);
331 buf = g_new0(uint8_t, cs.size);
332 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
333 sizeof(cs), buf, cs.size);
334 CHECK(s == cs.size, cmd);
336 rutabaga_cmd.ctx_id = cs.hdr.ctx_id;
337 rutabaga_cmd.cmd = buf;
338 rutabaga_cmd.cmd_size = cs.size;
340 result = rutabaga_submit_command(vr->rutabaga, &rutabaga_cmd);
341 CHECK(!result, cmd);
344 static void
345 rutabaga_cmd_transfer_to_host_2d(VirtIOGPU *g,
346 struct virtio_gpu_ctrl_command *cmd)
348 int32_t result;
349 struct rutabaga_transfer transfer = { 0 };
350 struct virtio_gpu_transfer_to_host_2d t2d;
352 VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
354 VIRTIO_GPU_FILL_CMD(t2d);
355 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
357 transfer.x = t2d.r.x;
358 transfer.y = t2d.r.y;
359 transfer.z = 0;
360 transfer.w = t2d.r.width;
361 transfer.h = t2d.r.height;
362 transfer.d = 1;
364 result = rutabaga_resource_transfer_write(vr->rutabaga, 0, t2d.resource_id,
365 &transfer);
366 CHECK(!result, cmd);
369 static void
370 rutabaga_cmd_transfer_to_host_3d(VirtIOGPU *g,
371 struct virtio_gpu_ctrl_command *cmd)
373 int32_t result;
374 struct rutabaga_transfer transfer = { 0 };
375 struct virtio_gpu_transfer_host_3d t3d;
377 VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
379 VIRTIO_GPU_FILL_CMD(t3d);
380 trace_virtio_gpu_cmd_res_xfer_toh_3d(t3d.resource_id);
382 transfer.x = t3d.box.x;
383 transfer.y = t3d.box.y;
384 transfer.z = t3d.box.z;
385 transfer.w = t3d.box.w;
386 transfer.h = t3d.box.h;
387 transfer.d = t3d.box.d;
388 transfer.level = t3d.level;
389 transfer.stride = t3d.stride;
390 transfer.layer_stride = t3d.layer_stride;
391 transfer.offset = t3d.offset;
393 result = rutabaga_resource_transfer_write(vr->rutabaga, t3d.hdr.ctx_id,
394 t3d.resource_id, &transfer);
395 CHECK(!result, cmd);
398 static void
399 rutabaga_cmd_transfer_from_host_3d(VirtIOGPU *g,
400 struct virtio_gpu_ctrl_command *cmd)
402 int32_t result;
403 struct rutabaga_transfer transfer = { 0 };
404 struct virtio_gpu_transfer_host_3d t3d;
406 VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
408 VIRTIO_GPU_FILL_CMD(t3d);
409 trace_virtio_gpu_cmd_res_xfer_fromh_3d(t3d.resource_id);
411 transfer.x = t3d.box.x;
412 transfer.y = t3d.box.y;
413 transfer.z = t3d.box.z;
414 transfer.w = t3d.box.w;
415 transfer.h = t3d.box.h;
416 transfer.d = t3d.box.d;
417 transfer.level = t3d.level;
418 transfer.stride = t3d.stride;
419 transfer.layer_stride = t3d.layer_stride;
420 transfer.offset = t3d.offset;
422 result = rutabaga_resource_transfer_read(vr->rutabaga, t3d.hdr.ctx_id,
423 t3d.resource_id, &transfer, NULL);
424 CHECK(!result, cmd);
427 static void
428 rutabaga_cmd_attach_backing(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd)
430 struct rutabaga_iovecs vecs = { 0 };
431 struct virtio_gpu_simple_resource *res;
432 struct virtio_gpu_resource_attach_backing att_rb;
433 int ret;
435 VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
437 VIRTIO_GPU_FILL_CMD(att_rb);
438 trace_virtio_gpu_cmd_res_back_attach(att_rb.resource_id);
440 res = virtio_gpu_find_resource(g, att_rb.resource_id);
441 CHECK(res, cmd);
442 CHECK(!res->iov, cmd);
444 ret = virtio_gpu_create_mapping_iov(g, att_rb.nr_entries, sizeof(att_rb),
445 cmd, NULL, &res->iov, &res->iov_cnt);
446 CHECK(!ret, cmd);
448 vecs.iovecs = res->iov;
449 vecs.num_iovecs = res->iov_cnt;
451 ret = rutabaga_resource_attach_backing(vr->rutabaga, att_rb.resource_id,
452 &vecs);
453 if (ret != 0) {
454 virtio_gpu_cleanup_mapping(g, res);
457 CHECK(!ret, cmd);
460 static void
461 rutabaga_cmd_detach_backing(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd)
463 struct virtio_gpu_simple_resource *res;
464 struct virtio_gpu_resource_detach_backing detach_rb;
466 VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
468 VIRTIO_GPU_FILL_CMD(detach_rb);
469 trace_virtio_gpu_cmd_res_back_detach(detach_rb.resource_id);
471 res = virtio_gpu_find_resource(g, detach_rb.resource_id);
472 CHECK(res, cmd);
474 rutabaga_resource_detach_backing(vr->rutabaga,
475 detach_rb.resource_id);
477 virtio_gpu_cleanup_mapping(g, res);
480 static void
481 rutabaga_cmd_ctx_attach_resource(VirtIOGPU *g,
482 struct virtio_gpu_ctrl_command *cmd)
484 int32_t result;
485 struct virtio_gpu_ctx_resource att_res;
487 VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
489 VIRTIO_GPU_FILL_CMD(att_res);
490 trace_virtio_gpu_cmd_ctx_res_attach(att_res.hdr.ctx_id,
491 att_res.resource_id);
493 result = rutabaga_context_attach_resource(vr->rutabaga, att_res.hdr.ctx_id,
494 att_res.resource_id);
495 CHECK(!result, cmd);
498 static void
499 rutabaga_cmd_ctx_detach_resource(VirtIOGPU *g,
500 struct virtio_gpu_ctrl_command *cmd)
502 int32_t result;
503 struct virtio_gpu_ctx_resource det_res;
505 VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
507 VIRTIO_GPU_FILL_CMD(det_res);
508 trace_virtio_gpu_cmd_ctx_res_detach(det_res.hdr.ctx_id,
509 det_res.resource_id);
511 result = rutabaga_context_detach_resource(vr->rutabaga, det_res.hdr.ctx_id,
512 det_res.resource_id);
513 CHECK(!result, cmd);
516 static void
517 rutabaga_cmd_get_capset_info(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd)
519 int32_t result;
520 struct virtio_gpu_get_capset_info info;
521 struct virtio_gpu_resp_capset_info resp;
523 VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
525 VIRTIO_GPU_FILL_CMD(info);
527 result = rutabaga_get_capset_info(vr->rutabaga, info.capset_index,
528 &resp.capset_id, &resp.capset_max_version,
529 &resp.capset_max_size);
530 CHECK(!result, cmd);
532 resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO;
533 virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
536 static void
537 rutabaga_cmd_get_capset(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd)
539 int32_t result;
540 struct virtio_gpu_get_capset gc;
541 struct virtio_gpu_resp_capset *resp;
542 uint32_t capset_size, capset_version;
543 uint32_t current_id, i;
545 VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
547 VIRTIO_GPU_FILL_CMD(gc);
548 for (i = 0; i < vr->num_capsets; i++) {
549 result = rutabaga_get_capset_info(vr->rutabaga, i,
550 &current_id, &capset_version,
551 &capset_size);
552 CHECK(!result, cmd);
554 if (current_id == gc.capset_id) {
555 break;
559 CHECK(i < vr->num_capsets, cmd);
561 resp = g_malloc0(sizeof(*resp) + capset_size);
562 resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET;
563 rutabaga_get_capset(vr->rutabaga, gc.capset_id, gc.capset_version,
564 resp->capset_data, capset_size);
566 virtio_gpu_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + capset_size);
567 g_free(resp);
570 static void
571 rutabaga_cmd_resource_create_blob(VirtIOGPU *g,
572 struct virtio_gpu_ctrl_command *cmd)
574 int result;
575 struct rutabaga_iovecs vecs = { 0 };
576 g_autofree struct virtio_gpu_simple_resource *res = NULL;
577 struct virtio_gpu_resource_create_blob cblob;
578 struct rutabaga_create_blob rc_blob = { 0 };
580 VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
582 VIRTIO_GPU_FILL_CMD(cblob);
583 trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size);
585 CHECK(cblob.resource_id != 0, cmd);
587 res = g_new0(struct virtio_gpu_simple_resource, 1);
589 res->resource_id = cblob.resource_id;
590 res->blob_size = cblob.size;
592 if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) {
593 result = virtio_gpu_create_mapping_iov(g, cblob.nr_entries,
594 sizeof(cblob), cmd, &res->addrs,
595 &res->iov, &res->iov_cnt);
596 CHECK(!result, cmd);
599 rc_blob.blob_id = cblob.blob_id;
600 rc_blob.blob_mem = cblob.blob_mem;
601 rc_blob.blob_flags = cblob.blob_flags;
602 rc_blob.size = cblob.size;
604 vecs.iovecs = res->iov;
605 vecs.num_iovecs = res->iov_cnt;
607 result = rutabaga_resource_create_blob(vr->rutabaga, cblob.hdr.ctx_id,
608 cblob.resource_id, &rc_blob, &vecs,
609 NULL);
611 if (result && cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) {
612 virtio_gpu_cleanup_mapping(g, res);
615 CHECK(!result, cmd);
617 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
618 res = NULL;
621 static void
622 rutabaga_cmd_resource_map_blob(VirtIOGPU *g,
623 struct virtio_gpu_ctrl_command *cmd)
625 int32_t result;
626 uint32_t map_info = 0;
627 uint32_t slot = 0;
628 struct virtio_gpu_simple_resource *res;
629 struct rutabaga_mapping mapping = { 0 };
630 struct virtio_gpu_resource_map_blob mblob;
631 struct virtio_gpu_resp_map_info resp = { 0 };
633 VirtIOGPUBase *vb = VIRTIO_GPU_BASE(g);
634 VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
636 VIRTIO_GPU_FILL_CMD(mblob);
638 CHECK(mblob.resource_id != 0, cmd);
640 res = virtio_gpu_find_resource(g, mblob.resource_id);
641 CHECK(res, cmd);
643 result = rutabaga_resource_map_info(vr->rutabaga, mblob.resource_id,
644 &map_info);
645 CHECK(!result, cmd);
648 * RUTABAGA_MAP_ACCESS_* flags are not part of the virtio-gpu spec, but do
649 * exist to potentially allow the hypervisor to restrict write access to
650 * memory. QEMU does not need to use this functionality at the moment.
652 resp.map_info = map_info & RUTABAGA_MAP_CACHE_MASK;
654 result = rutabaga_resource_map(vr->rutabaga, mblob.resource_id, &mapping);
655 CHECK(!result, cmd);
658 * There is small risk of the MemoryRegion dereferencing the pointer after
659 * rutabaga unmaps it. Please see discussion here:
661 * https://lists.gnu.org/archive/html/qemu-devel/2023-09/msg05141.html
663 * It is highly unlikely to happen in practice and doesn't affect known
664 * use cases. However, it should be fixed and is noted here for posterity.
666 for (slot = 0; slot < MAX_SLOTS; slot++) {
667 if (vr->memory_regions[slot].used) {
668 continue;
671 MemoryRegion *mr = &(vr->memory_regions[slot].mr);
672 memory_region_init_ram_ptr(mr, OBJECT(vr), "blob", mapping.size,
673 mapping.ptr);
674 memory_region_add_subregion(&vb->hostmem, mblob.offset, mr);
675 vr->memory_regions[slot].resource_id = mblob.resource_id;
676 vr->memory_regions[slot].used = 1;
677 break;
680 if (slot >= MAX_SLOTS) {
681 result = rutabaga_resource_unmap(vr->rutabaga, mblob.resource_id);
682 CHECK(!result, cmd);
685 CHECK(slot < MAX_SLOTS, cmd);
687 resp.hdr.type = VIRTIO_GPU_RESP_OK_MAP_INFO;
688 virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
691 static void
692 rutabaga_cmd_resource_unmap_blob(VirtIOGPU *g,
693 struct virtio_gpu_ctrl_command *cmd)
695 int32_t result;
696 uint32_t slot = 0;
697 struct virtio_gpu_simple_resource *res;
698 struct virtio_gpu_resource_unmap_blob ublob;
700 VirtIOGPUBase *vb = VIRTIO_GPU_BASE(g);
701 VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
703 VIRTIO_GPU_FILL_CMD(ublob);
705 CHECK(ublob.resource_id != 0, cmd);
707 res = virtio_gpu_find_resource(g, ublob.resource_id);
708 CHECK(res, cmd);
710 for (slot = 0; slot < MAX_SLOTS; slot++) {
711 if (vr->memory_regions[slot].resource_id != ublob.resource_id) {
712 continue;
715 MemoryRegion *mr = &(vr->memory_regions[slot].mr);
716 memory_region_del_subregion(&vb->hostmem, mr);
718 vr->memory_regions[slot].resource_id = 0;
719 vr->memory_regions[slot].used = 0;
720 break;
723 CHECK(slot < MAX_SLOTS, cmd);
724 result = rutabaga_resource_unmap(vr->rutabaga, res->resource_id);
725 CHECK(!result, cmd);
728 static void
729 virtio_gpu_rutabaga_process_cmd(VirtIOGPU *g,
730 struct virtio_gpu_ctrl_command *cmd)
732 struct rutabaga_fence fence = { 0 };
733 int32_t result;
735 VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
737 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
739 switch (cmd->cmd_hdr.type) {
740 case VIRTIO_GPU_CMD_CTX_CREATE:
741 rutabaga_cmd_context_create(g, cmd);
742 break;
743 case VIRTIO_GPU_CMD_CTX_DESTROY:
744 rutabaga_cmd_context_destroy(g, cmd);
745 break;
746 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
747 rutabaga_cmd_create_resource_2d(g, cmd);
748 break;
749 case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D:
750 rutabaga_cmd_create_resource_3d(g, cmd);
751 break;
752 case VIRTIO_GPU_CMD_SUBMIT_3D:
753 rutabaga_cmd_submit_3d(g, cmd);
754 break;
755 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
756 rutabaga_cmd_transfer_to_host_2d(g, cmd);
757 break;
758 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D:
759 rutabaga_cmd_transfer_to_host_3d(g, cmd);
760 break;
761 case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D:
762 rutabaga_cmd_transfer_from_host_3d(g, cmd);
763 break;
764 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
765 rutabaga_cmd_attach_backing(g, cmd);
766 break;
767 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
768 rutabaga_cmd_detach_backing(g, cmd);
769 break;
770 case VIRTIO_GPU_CMD_SET_SCANOUT:
771 rutabaga_cmd_set_scanout(g, cmd);
772 break;
773 case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
774 rutabaga_cmd_resource_flush(g, cmd);
775 break;
776 case VIRTIO_GPU_CMD_RESOURCE_UNREF:
777 rutabaga_cmd_resource_unref(g, cmd);
778 break;
779 case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE:
780 rutabaga_cmd_ctx_attach_resource(g, cmd);
781 break;
782 case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE:
783 rutabaga_cmd_ctx_detach_resource(g, cmd);
784 break;
785 case VIRTIO_GPU_CMD_GET_CAPSET_INFO:
786 rutabaga_cmd_get_capset_info(g, cmd);
787 break;
788 case VIRTIO_GPU_CMD_GET_CAPSET:
789 rutabaga_cmd_get_capset(g, cmd);
790 break;
791 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
792 virtio_gpu_get_display_info(g, cmd);
793 break;
794 case VIRTIO_GPU_CMD_GET_EDID:
795 virtio_gpu_get_edid(g, cmd);
796 break;
797 case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB:
798 rutabaga_cmd_resource_create_blob(g, cmd);
799 break;
800 case VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB:
801 rutabaga_cmd_resource_map_blob(g, cmd);
802 break;
803 case VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB:
804 rutabaga_cmd_resource_unmap_blob(g, cmd);
805 break;
806 default:
807 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
808 break;
811 if (cmd->finished) {
812 return;
814 if (cmd->error) {
815 error_report("%s: ctrl 0x%x, error 0x%x", __func__,
816 cmd->cmd_hdr.type, cmd->error);
817 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error);
818 return;
820 if (!(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) {
821 virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
822 return;
825 fence.flags = cmd->cmd_hdr.flags;
826 fence.ctx_id = cmd->cmd_hdr.ctx_id;
827 fence.fence_id = cmd->cmd_hdr.fence_id;
828 fence.ring_idx = cmd->cmd_hdr.ring_idx;
830 trace_virtio_gpu_fence_ctrl(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
832 result = rutabaga_create_fence(vr->rutabaga, &fence);
833 CHECK(!result, cmd);
836 static void
837 virtio_gpu_rutabaga_aio_cb(void *opaque)
839 struct rutabaga_aio_data *data = opaque;
840 VirtIOGPU *g = VIRTIO_GPU(data->vr);
841 struct rutabaga_fence fence_data = data->fence;
842 struct virtio_gpu_ctrl_command *cmd, *tmp;
844 uint32_t signaled_ctx_specific = fence_data.flags &
845 RUTABAGA_FLAG_INFO_RING_IDX;
847 QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
849 * Due to context specific timelines.
851 uint32_t target_ctx_specific = cmd->cmd_hdr.flags &
852 RUTABAGA_FLAG_INFO_RING_IDX;
854 if (signaled_ctx_specific != target_ctx_specific) {
855 continue;
858 if (signaled_ctx_specific &&
859 (cmd->cmd_hdr.ring_idx != fence_data.ring_idx)) {
860 continue;
863 if (cmd->cmd_hdr.fence_id > fence_data.fence_id) {
864 continue;
867 trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id);
868 virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
869 QTAILQ_REMOVE(&g->fenceq, cmd, next);
870 g_free(cmd);
873 g_free(data);
876 static void
877 virtio_gpu_rutabaga_fence_cb(uint64_t user_data,
878 const struct rutabaga_fence *fence)
880 struct rutabaga_aio_data *data;
881 VirtIOGPU *g = (VirtIOGPU *)user_data;
882 VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
885 * gfxstream and both cross-domain (and even newer versions virglrenderer:
886 * see VIRGL_RENDERER_ASYNC_FENCE_CB) like to signal fence completion on
887 * threads ("callback threads") that are different from the thread that
888 * processes the command queue ("main thread").
890 * crosvm and other virtio-gpu 1.1 implementations enable callback threads
891 * via locking. However, on QEMU a deadlock is observed if
892 * virtio_gpu_ctrl_response_nodata(..) [used in the fence callback] is used
893 * from a thread that is not the main thread.
895 * The reason is QEMU's internal locking is designed to work with QEMU
896 * threads (see rcu_register_thread()) and not generic C/C++/Rust threads.
897 * For now, we can workaround this by scheduling the return of the
898 * fence descriptors on the main thread.
901 data = g_new0(struct rutabaga_aio_data, 1);
902 data->vr = vr;
903 data->fence = *fence;
904 aio_bh_schedule_oneshot(qemu_get_aio_context(),
905 virtio_gpu_rutabaga_aio_cb,
906 data);
909 static void
910 virtio_gpu_rutabaga_debug_cb(uint64_t user_data,
911 const struct rutabaga_debug *debug)
913 switch (debug->debug_type) {
914 case RUTABAGA_DEBUG_ERROR:
915 error_report("%s", debug->message);
916 break;
917 case RUTABAGA_DEBUG_WARN:
918 warn_report("%s", debug->message);
919 break;
920 case RUTABAGA_DEBUG_INFO:
921 info_report("%s", debug->message);
922 break;
923 default:
924 error_report("unknown debug type: %u", debug->debug_type);
928 static bool virtio_gpu_rutabaga_init(VirtIOGPU *g, Error **errp)
930 int result;
931 struct rutabaga_builder builder = { 0 };
932 struct rutabaga_channel channel = { 0 };
933 struct rutabaga_channels channels = { 0 };
935 VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
936 vr->rutabaga = NULL;
938 builder.wsi = RUTABAGA_WSI_SURFACELESS;
940 * Currently, if WSI is specified, the only valid strings are "surfaceless"
941 * or "headless". Surfaceless doesn't create a native window surface, but
942 * does copy from the render target to the Pixman buffer if a virtio-gpu
943 * 2D hypercall is issued. Surfacless is the default.
945 * Headless is like surfaceless, but doesn't copy to the Pixman buffer. The
946 * use case is automated testing environments where there is no need to view
947 * results.
949 * In the future, more performant virtio-gpu 2D UI integration may be added.
951 if (vr->wsi) {
952 if (g_str_equal(vr->wsi, "surfaceless")) {
953 vr->headless = false;
954 } else if (g_str_equal(vr->wsi, "headless")) {
955 vr->headless = true;
956 } else {
957 error_setg(errp, "invalid wsi option selected");
958 return false;
962 builder.fence_cb = virtio_gpu_rutabaga_fence_cb;
963 builder.debug_cb = virtio_gpu_rutabaga_debug_cb;
964 builder.capset_mask = vr->capset_mask;
965 builder.user_data = (uint64_t)g;
968 * If the user doesn't specify the wayland socket path, we try to infer
969 * the socket via a process similar to the one used by libwayland.
970 * libwayland does the following:
972 * 1) If $WAYLAND_DISPLAY is set, attempt to connect to
973 * $XDG_RUNTIME_DIR/$WAYLAND_DISPLAY
974 * 2) Otherwise, attempt to connect to $XDG_RUNTIME_DIR/wayland-0
975 * 3) Otherwise, don't pass a wayland socket to rutabaga. If a guest
976 * wayland proxy is launched, it will fail to work.
978 channel.channel_type = RUTABAGA_CHANNEL_TYPE_WAYLAND;
979 g_autofree gchar *path = NULL;
980 if (!vr->wayland_socket_path) {
981 const gchar *runtime_dir = g_get_user_runtime_dir();
982 const gchar *display = g_getenv("WAYLAND_DISPLAY");
983 if (!display) {
984 display = "wayland-0";
987 if (runtime_dir) {
988 path = g_build_filename(runtime_dir, display, NULL);
989 channel.channel_name = path;
991 } else {
992 channel.channel_name = vr->wayland_socket_path;
995 if ((builder.capset_mask & (1 << RUTABAGA_CAPSET_CROSS_DOMAIN))) {
996 if (channel.channel_name) {
997 channels.channels = &channel;
998 channels.num_channels = 1;
999 builder.channels = &channels;
1003 result = rutabaga_init(&builder, &vr->rutabaga);
1004 if (result) {
1005 error_setg_errno(errp, -result, "Failed to init rutabaga");
1006 return false;
1009 return true;
1012 static int virtio_gpu_rutabaga_get_num_capsets(VirtIOGPU *g)
1014 int result;
1015 uint32_t num_capsets;
1016 VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
1018 result = rutabaga_get_num_capsets(vr->rutabaga, &num_capsets);
1019 if (result) {
1020 error_report("Failed to get capsets");
1021 return 0;
1023 vr->num_capsets = num_capsets;
1024 return num_capsets;
1027 static void virtio_gpu_rutabaga_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
1029 VirtIOGPU *g = VIRTIO_GPU(vdev);
1030 struct virtio_gpu_ctrl_command *cmd;
1032 if (!virtio_queue_ready(vq)) {
1033 return;
1036 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
1037 while (cmd) {
1038 cmd->vq = vq;
1039 cmd->error = 0;
1040 cmd->finished = false;
1041 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
1042 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
1045 virtio_gpu_process_cmdq(g);
1048 static void virtio_gpu_rutabaga_realize(DeviceState *qdev, Error **errp)
1050 int num_capsets;
1051 VirtIOGPUBase *bdev = VIRTIO_GPU_BASE(qdev);
1052 VirtIOGPU *gpudev = VIRTIO_GPU(qdev);
1054 #if HOST_BIG_ENDIAN
1055 error_setg(errp, "rutabaga is not supported on bigendian platforms");
1056 return;
1057 #endif
1059 if (!virtio_gpu_rutabaga_init(gpudev, errp)) {
1060 return;
1063 num_capsets = virtio_gpu_rutabaga_get_num_capsets(gpudev);
1064 if (!num_capsets) {
1065 return;
1068 bdev->conf.flags |= (1 << VIRTIO_GPU_FLAG_RUTABAGA_ENABLED);
1069 bdev->conf.flags |= (1 << VIRTIO_GPU_FLAG_BLOB_ENABLED);
1070 bdev->conf.flags |= (1 << VIRTIO_GPU_FLAG_CONTEXT_INIT_ENABLED);
1072 bdev->virtio_config.num_capsets = num_capsets;
1073 virtio_gpu_device_realize(qdev, errp);
1076 static Property virtio_gpu_rutabaga_properties[] = {
1077 DEFINE_PROP_BIT64("gfxstream-vulkan", VirtIOGPURutabaga, capset_mask,
1078 RUTABAGA_CAPSET_GFXSTREAM_VULKAN, false),
1079 DEFINE_PROP_BIT64("cross-domain", VirtIOGPURutabaga, capset_mask,
1080 RUTABAGA_CAPSET_CROSS_DOMAIN, false),
1081 DEFINE_PROP_BIT64("x-gfxstream-gles", VirtIOGPURutabaga, capset_mask,
1082 RUTABAGA_CAPSET_GFXSTREAM_GLES, false),
1083 DEFINE_PROP_BIT64("x-gfxstream-composer", VirtIOGPURutabaga, capset_mask,
1084 RUTABAGA_CAPSET_GFXSTREAM_COMPOSER, false),
1085 DEFINE_PROP_STRING("wayland-socket-path", VirtIOGPURutabaga,
1086 wayland_socket_path),
1087 DEFINE_PROP_STRING("wsi", VirtIOGPURutabaga, wsi),
1088 DEFINE_PROP_END_OF_LIST(),
1091 static void virtio_gpu_rutabaga_class_init(ObjectClass *klass, void *data)
1093 DeviceClass *dc = DEVICE_CLASS(klass);
1094 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1095 VirtIOGPUBaseClass *vbc = VIRTIO_GPU_BASE_CLASS(klass);
1096 VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass);
1098 vbc->gl_flushed = virtio_gpu_rutabaga_gl_flushed;
1099 vgc->handle_ctrl = virtio_gpu_rutabaga_handle_ctrl;
1100 vgc->process_cmd = virtio_gpu_rutabaga_process_cmd;
1101 vgc->update_cursor_data = virtio_gpu_rutabaga_update_cursor;
1103 vdc->realize = virtio_gpu_rutabaga_realize;
1104 device_class_set_props(dc, virtio_gpu_rutabaga_properties);
1107 static const TypeInfo virtio_gpu_rutabaga_info[] = {
1109 .name = TYPE_VIRTIO_GPU_RUTABAGA,
1110 .parent = TYPE_VIRTIO_GPU,
1111 .instance_size = sizeof(VirtIOGPURutabaga),
1112 .class_init = virtio_gpu_rutabaga_class_init,
1116 DEFINE_TYPES(virtio_gpu_rutabaga_info)
1118 module_obj(TYPE_VIRTIO_GPU_RUTABAGA);
1119 module_kconfig(VIRTIO_GPU);
1120 module_dep("hw-display-virtio-gpu");