virtio-gpu: check if the resource already exists in virtio_gpu_load()
[qemu/ar7.git] / hw / display / virtio-gpu.c
blob25d9e327fc698c5d046084d870f76c9a02227ee4
1 /*
2 * Virtio GPU Device
4 * Copyright Red Hat, Inc. 2013-2014
6 * Authors:
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/units.h"
16 #include "qemu/iov.h"
17 #include "ui/console.h"
18 #include "trace.h"
19 #include "sysemu/dma.h"
20 #include "hw/virtio/virtio.h"
21 #include "hw/virtio/virtio-gpu.h"
22 #include "hw/virtio/virtio-gpu-bswap.h"
23 #include "hw/virtio/virtio-gpu-pixman.h"
24 #include "hw/virtio/virtio-bus.h"
25 #include "hw/display/edid.h"
26 #include "qemu/log.h"
27 #include "qemu/module.h"
28 #include "qapi/error.h"
29 #include "qemu/error-report.h"
31 #define VIRTIO_GPU_VM_VERSION 1
33 static struct virtio_gpu_simple_resource*
34 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
36 static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
37 struct virtio_gpu_simple_resource *res);
39 #ifdef CONFIG_VIRGL
40 #include <virglrenderer.h>
41 #define VIRGL(_g, _virgl, _simple, ...) \
42 do { \
43 if (_g->parent_obj.use_virgl_renderer) { \
44 _virgl(__VA_ARGS__); \
45 } else { \
46 _simple(__VA_ARGS__); \
47 } \
48 } while (0)
49 #else
50 #define VIRGL(_g, _virgl, _simple, ...) \
51 do { \
52 _simple(__VA_ARGS__); \
53 } while (0)
54 #endif
56 static void update_cursor_data_simple(VirtIOGPU *g,
57 struct virtio_gpu_scanout *s,
58 uint32_t resource_id)
60 struct virtio_gpu_simple_resource *res;
61 uint32_t pixels;
63 res = virtio_gpu_find_resource(g, resource_id);
64 if (!res) {
65 return;
68 if (pixman_image_get_width(res->image) != s->current_cursor->width ||
69 pixman_image_get_height(res->image) != s->current_cursor->height) {
70 return;
73 pixels = s->current_cursor->width * s->current_cursor->height;
74 memcpy(s->current_cursor->data,
75 pixman_image_get_data(res->image),
76 pixels * sizeof(uint32_t));
79 #ifdef CONFIG_VIRGL
81 static void update_cursor_data_virgl(VirtIOGPU *g,
82 struct virtio_gpu_scanout *s,
83 uint32_t resource_id)
85 uint32_t width, height;
86 uint32_t pixels, *data;
88 data = virgl_renderer_get_cursor_data(resource_id, &width, &height);
89 if (!data) {
90 return;
93 if (width != s->current_cursor->width ||
94 height != s->current_cursor->height) {
95 free(data);
96 return;
99 pixels = s->current_cursor->width * s->current_cursor->height;
100 memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t));
101 free(data);
104 #endif
106 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
108 struct virtio_gpu_scanout *s;
109 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
111 if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) {
112 return;
114 s = &g->parent_obj.scanout[cursor->pos.scanout_id];
116 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id,
117 cursor->pos.x,
118 cursor->pos.y,
119 move ? "move" : "update",
120 cursor->resource_id);
122 if (!move) {
123 if (!s->current_cursor) {
124 s->current_cursor = cursor_alloc(64, 64);
127 s->current_cursor->hot_x = cursor->hot_x;
128 s->current_cursor->hot_y = cursor->hot_y;
130 if (cursor->resource_id > 0) {
131 VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple,
132 g, s, cursor->resource_id);
134 dpy_cursor_define(s->con, s->current_cursor);
136 s->cursor = *cursor;
137 } else {
138 s->cursor.pos.x = cursor->pos.x;
139 s->cursor.pos.y = cursor->pos.y;
141 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y,
142 cursor->resource_id ? 1 : 0);
145 static struct virtio_gpu_simple_resource *
146 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
148 struct virtio_gpu_simple_resource *res;
150 QTAILQ_FOREACH(res, &g->reslist, next) {
151 if (res->resource_id == resource_id) {
152 return res;
155 return NULL;
158 void virtio_gpu_ctrl_response(VirtIOGPU *g,
159 struct virtio_gpu_ctrl_command *cmd,
160 struct virtio_gpu_ctrl_hdr *resp,
161 size_t resp_len)
163 size_t s;
165 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
166 resp->flags |= VIRTIO_GPU_FLAG_FENCE;
167 resp->fence_id = cmd->cmd_hdr.fence_id;
168 resp->ctx_id = cmd->cmd_hdr.ctx_id;
170 virtio_gpu_ctrl_hdr_bswap(resp);
171 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
172 if (s != resp_len) {
173 qemu_log_mask(LOG_GUEST_ERROR,
174 "%s: response size incorrect %zu vs %zu\n",
175 __func__, s, resp_len);
177 virtqueue_push(cmd->vq, &cmd->elem, s);
178 virtio_notify(VIRTIO_DEVICE(g), cmd->vq);
179 cmd->finished = true;
182 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
183 struct virtio_gpu_ctrl_command *cmd,
184 enum virtio_gpu_ctrl_type type)
186 struct virtio_gpu_ctrl_hdr resp;
188 memset(&resp, 0, sizeof(resp));
189 resp.type = type;
190 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
193 void virtio_gpu_get_display_info(VirtIOGPU *g,
194 struct virtio_gpu_ctrl_command *cmd)
196 struct virtio_gpu_resp_display_info display_info;
198 trace_virtio_gpu_cmd_get_display_info();
199 memset(&display_info, 0, sizeof(display_info));
200 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
201 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
202 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
203 sizeof(display_info));
206 static void
207 virtio_gpu_generate_edid(VirtIOGPU *g, int scanout,
208 struct virtio_gpu_resp_edid *edid)
210 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
211 qemu_edid_info info = {
212 .prefx = b->req_state[scanout].width,
213 .prefy = b->req_state[scanout].height,
216 edid->size = cpu_to_le32(sizeof(edid->edid));
217 qemu_edid_generate(edid->edid, sizeof(edid->edid), &info);
220 void virtio_gpu_get_edid(VirtIOGPU *g,
221 struct virtio_gpu_ctrl_command *cmd)
223 struct virtio_gpu_resp_edid edid;
224 struct virtio_gpu_cmd_get_edid get_edid;
225 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
227 VIRTIO_GPU_FILL_CMD(get_edid);
228 virtio_gpu_bswap_32(&get_edid, sizeof(get_edid));
230 if (get_edid.scanout >= b->conf.max_outputs) {
231 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
232 return;
235 trace_virtio_gpu_cmd_get_edid(get_edid.scanout);
236 memset(&edid, 0, sizeof(edid));
237 edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID;
238 virtio_gpu_generate_edid(g, get_edid.scanout, &edid);
239 virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid));
242 static uint32_t calc_image_hostmem(pixman_format_code_t pformat,
243 uint32_t width, uint32_t height)
245 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check.
246 * pixman_image_create_bits will fail in case it overflow.
249 int bpp = PIXMAN_FORMAT_BPP(pformat);
250 int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t);
251 return height * stride;
254 static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
255 struct virtio_gpu_ctrl_command *cmd)
257 pixman_format_code_t pformat;
258 struct virtio_gpu_simple_resource *res;
259 struct virtio_gpu_resource_create_2d c2d;
261 VIRTIO_GPU_FILL_CMD(c2d);
262 virtio_gpu_bswap_32(&c2d, sizeof(c2d));
263 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
264 c2d.width, c2d.height);
266 if (c2d.resource_id == 0) {
267 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
268 __func__);
269 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
270 return;
273 res = virtio_gpu_find_resource(g, c2d.resource_id);
274 if (res) {
275 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
276 __func__, c2d.resource_id);
277 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
278 return;
281 res = g_new0(struct virtio_gpu_simple_resource, 1);
283 res->width = c2d.width;
284 res->height = c2d.height;
285 res->format = c2d.format;
286 res->resource_id = c2d.resource_id;
288 pformat = virtio_gpu_get_pixman_format(c2d.format);
289 if (!pformat) {
290 qemu_log_mask(LOG_GUEST_ERROR,
291 "%s: host couldn't handle guest format %d\n",
292 __func__, c2d.format);
293 g_free(res);
294 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
295 return;
298 res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height);
299 if (res->hostmem + g->hostmem < g->conf_max_hostmem) {
300 res->image = pixman_image_create_bits(pformat,
301 c2d.width,
302 c2d.height,
303 NULL, 0);
306 if (!res->image) {
307 qemu_log_mask(LOG_GUEST_ERROR,
308 "%s: resource creation failed %d %d %d\n",
309 __func__, c2d.resource_id, c2d.width, c2d.height);
310 g_free(res);
311 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
312 return;
315 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
316 g->hostmem += res->hostmem;
319 static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id)
321 struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id];
322 struct virtio_gpu_simple_resource *res;
323 DisplaySurface *ds = NULL;
325 if (scanout->resource_id == 0) {
326 return;
329 res = virtio_gpu_find_resource(g, scanout->resource_id);
330 if (res) {
331 res->scanout_bitmask &= ~(1 << scanout_id);
334 if (scanout_id == 0) {
335 /* primary head */
336 ds = qemu_create_message_surface(scanout->width ?: 640,
337 scanout->height ?: 480,
338 "Guest disabled display.");
340 dpy_gfx_replace_surface(scanout->con, ds);
341 scanout->resource_id = 0;
342 scanout->ds = NULL;
343 scanout->width = 0;
344 scanout->height = 0;
347 static void virtio_gpu_resource_destroy(VirtIOGPU *g,
348 struct virtio_gpu_simple_resource *res)
350 int i;
352 if (res->scanout_bitmask) {
353 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
354 if (res->scanout_bitmask & (1 << i)) {
355 virtio_gpu_disable_scanout(g, i);
360 pixman_image_unref(res->image);
361 virtio_gpu_cleanup_mapping(g, res);
362 QTAILQ_REMOVE(&g->reslist, res, next);
363 g->hostmem -= res->hostmem;
364 g_free(res);
367 static void virtio_gpu_resource_unref(VirtIOGPU *g,
368 struct virtio_gpu_ctrl_command *cmd)
370 struct virtio_gpu_simple_resource *res;
371 struct virtio_gpu_resource_unref unref;
373 VIRTIO_GPU_FILL_CMD(unref);
374 virtio_gpu_bswap_32(&unref, sizeof(unref));
375 trace_virtio_gpu_cmd_res_unref(unref.resource_id);
377 res = virtio_gpu_find_resource(g, unref.resource_id);
378 if (!res) {
379 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
380 __func__, unref.resource_id);
381 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
382 return;
384 virtio_gpu_resource_destroy(g, res);
387 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
388 struct virtio_gpu_ctrl_command *cmd)
390 struct virtio_gpu_simple_resource *res;
391 int h;
392 uint32_t src_offset, dst_offset, stride;
393 int bpp;
394 pixman_format_code_t format;
395 struct virtio_gpu_transfer_to_host_2d t2d;
397 VIRTIO_GPU_FILL_CMD(t2d);
398 virtio_gpu_t2d_bswap(&t2d);
399 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
401 res = virtio_gpu_find_resource(g, t2d.resource_id);
402 if (!res || !res->iov) {
403 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
404 __func__, t2d.resource_id);
405 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
406 return;
409 if (t2d.r.x > res->width ||
410 t2d.r.y > res->height ||
411 t2d.r.width > res->width ||
412 t2d.r.height > res->height ||
413 t2d.r.x + t2d.r.width > res->width ||
414 t2d.r.y + t2d.r.height > res->height) {
415 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource"
416 " bounds for resource %d: %d %d %d %d vs %d %d\n",
417 __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
418 t2d.r.width, t2d.r.height, res->width, res->height);
419 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
420 return;
423 format = pixman_image_get_format(res->image);
424 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
425 stride = pixman_image_get_stride(res->image);
427 if (t2d.offset || t2d.r.x || t2d.r.y ||
428 t2d.r.width != pixman_image_get_width(res->image)) {
429 void *img_data = pixman_image_get_data(res->image);
430 for (h = 0; h < t2d.r.height; h++) {
431 src_offset = t2d.offset + stride * h;
432 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
434 iov_to_buf(res->iov, res->iov_cnt, src_offset,
435 (uint8_t *)img_data
436 + dst_offset, t2d.r.width * bpp);
438 } else {
439 iov_to_buf(res->iov, res->iov_cnt, 0,
440 pixman_image_get_data(res->image),
441 pixman_image_get_stride(res->image)
442 * pixman_image_get_height(res->image));
446 static void virtio_gpu_resource_flush(VirtIOGPU *g,
447 struct virtio_gpu_ctrl_command *cmd)
449 struct virtio_gpu_simple_resource *res;
450 struct virtio_gpu_resource_flush rf;
451 pixman_region16_t flush_region;
452 int i;
454 VIRTIO_GPU_FILL_CMD(rf);
455 virtio_gpu_bswap_32(&rf, sizeof(rf));
456 trace_virtio_gpu_cmd_res_flush(rf.resource_id,
457 rf.r.width, rf.r.height, rf.r.x, rf.r.y);
459 res = virtio_gpu_find_resource(g, rf.resource_id);
460 if (!res) {
461 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
462 __func__, rf.resource_id);
463 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
464 return;
467 if (rf.r.x > res->width ||
468 rf.r.y > res->height ||
469 rf.r.width > res->width ||
470 rf.r.height > res->height ||
471 rf.r.x + rf.r.width > res->width ||
472 rf.r.y + rf.r.height > res->height) {
473 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource"
474 " bounds for resource %d: %d %d %d %d vs %d %d\n",
475 __func__, rf.resource_id, rf.r.x, rf.r.y,
476 rf.r.width, rf.r.height, res->width, res->height);
477 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
478 return;
481 pixman_region_init_rect(&flush_region,
482 rf.r.x, rf.r.y, rf.r.width, rf.r.height);
483 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
484 struct virtio_gpu_scanout *scanout;
485 pixman_region16_t region, finalregion;
486 pixman_box16_t *extents;
488 if (!(res->scanout_bitmask & (1 << i))) {
489 continue;
491 scanout = &g->parent_obj.scanout[i];
493 pixman_region_init(&finalregion);
494 pixman_region_init_rect(&region, scanout->x, scanout->y,
495 scanout->width, scanout->height);
497 pixman_region_intersect(&finalregion, &flush_region, &region);
498 pixman_region_translate(&finalregion, -scanout->x, -scanout->y);
499 extents = pixman_region_extents(&finalregion);
500 /* work out the area we need to update for each console */
501 dpy_gfx_update(g->parent_obj.scanout[i].con,
502 extents->x1, extents->y1,
503 extents->x2 - extents->x1,
504 extents->y2 - extents->y1);
506 pixman_region_fini(&region);
507 pixman_region_fini(&finalregion);
509 pixman_region_fini(&flush_region);
512 static void virtio_unref_resource(pixman_image_t *image, void *data)
514 pixman_image_unref(data);
517 static void virtio_gpu_set_scanout(VirtIOGPU *g,
518 struct virtio_gpu_ctrl_command *cmd)
520 struct virtio_gpu_simple_resource *res, *ores;
521 struct virtio_gpu_scanout *scanout;
522 pixman_format_code_t format;
523 uint32_t offset;
524 int bpp;
525 struct virtio_gpu_set_scanout ss;
527 VIRTIO_GPU_FILL_CMD(ss);
528 virtio_gpu_bswap_32(&ss, sizeof(ss));
529 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
530 ss.r.width, ss.r.height, ss.r.x, ss.r.y);
532 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
533 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
534 __func__, ss.scanout_id);
535 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
536 return;
539 g->parent_obj.enable = 1;
540 if (ss.resource_id == 0) {
541 virtio_gpu_disable_scanout(g, ss.scanout_id);
542 return;
545 /* create a surface for this scanout */
546 res = virtio_gpu_find_resource(g, ss.resource_id);
547 if (!res) {
548 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
549 __func__, ss.resource_id);
550 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
551 return;
554 if (ss.r.x > res->width ||
555 ss.r.y > res->height ||
556 ss.r.width < 16 ||
557 ss.r.height < 16 ||
558 ss.r.width > res->width ||
559 ss.r.height > res->height ||
560 ss.r.x + ss.r.width > res->width ||
561 ss.r.y + ss.r.height > res->height) {
562 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
563 " resource %d, (%d,%d)+%d,%d vs %d %d\n",
564 __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y,
565 ss.r.width, ss.r.height, res->width, res->height);
566 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
567 return;
570 scanout = &g->parent_obj.scanout[ss.scanout_id];
572 format = pixman_image_get_format(res->image);
573 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
574 offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image);
575 if (!scanout->ds || surface_data(scanout->ds)
576 != ((uint8_t *)pixman_image_get_data(res->image) + offset) ||
577 scanout->width != ss.r.width ||
578 scanout->height != ss.r.height) {
579 pixman_image_t *rect;
580 void *ptr = (uint8_t *)pixman_image_get_data(res->image) + offset;
581 rect = pixman_image_create_bits(format, ss.r.width, ss.r.height, ptr,
582 pixman_image_get_stride(res->image));
583 pixman_image_ref(res->image);
584 pixman_image_set_destroy_function(rect, virtio_unref_resource,
585 res->image);
586 /* realloc the surface ptr */
587 scanout->ds = qemu_create_displaysurface_pixman(rect);
588 if (!scanout->ds) {
589 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
590 return;
592 pixman_image_unref(rect);
593 dpy_gfx_replace_surface(g->parent_obj.scanout[ss.scanout_id].con,
594 scanout->ds);
597 ores = virtio_gpu_find_resource(g, scanout->resource_id);
598 if (ores) {
599 ores->scanout_bitmask &= ~(1 << ss.scanout_id);
602 res->scanout_bitmask |= (1 << ss.scanout_id);
603 scanout->resource_id = ss.resource_id;
604 scanout->x = ss.r.x;
605 scanout->y = ss.r.y;
606 scanout->width = ss.r.width;
607 scanout->height = ss.r.height;
610 int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
611 struct virtio_gpu_resource_attach_backing *ab,
612 struct virtio_gpu_ctrl_command *cmd,
613 uint64_t **addr, struct iovec **iov)
615 struct virtio_gpu_mem_entry *ents;
616 size_t esize, s;
617 int i;
619 if (ab->nr_entries > 16384) {
620 qemu_log_mask(LOG_GUEST_ERROR,
621 "%s: nr_entries is too big (%d > 16384)\n",
622 __func__, ab->nr_entries);
623 return -1;
626 esize = sizeof(*ents) * ab->nr_entries;
627 ents = g_malloc(esize);
628 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
629 sizeof(*ab), ents, esize);
630 if (s != esize) {
631 qemu_log_mask(LOG_GUEST_ERROR,
632 "%s: command data size incorrect %zu vs %zu\n",
633 __func__, s, esize);
634 g_free(ents);
635 return -1;
638 *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries);
639 if (addr) {
640 *addr = g_malloc0(sizeof(uint64_t) * ab->nr_entries);
642 for (i = 0; i < ab->nr_entries; i++) {
643 uint64_t a = le64_to_cpu(ents[i].addr);
644 uint32_t l = le32_to_cpu(ents[i].length);
645 hwaddr len = l;
646 (*iov)[i].iov_len = l;
647 (*iov)[i].iov_base = dma_memory_map(VIRTIO_DEVICE(g)->dma_as,
648 a, &len, DMA_DIRECTION_TO_DEVICE);
649 if (addr) {
650 (*addr)[i] = a;
652 if (!(*iov)[i].iov_base || len != l) {
653 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
654 " resource %d element %d\n",
655 __func__, ab->resource_id, i);
656 virtio_gpu_cleanup_mapping_iov(g, *iov, i);
657 g_free(ents);
658 *iov = NULL;
659 if (addr) {
660 g_free(*addr);
661 *addr = NULL;
663 return -1;
666 g_free(ents);
667 return 0;
670 void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g,
671 struct iovec *iov, uint32_t count)
673 int i;
675 for (i = 0; i < count; i++) {
676 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
677 iov[i].iov_base, iov[i].iov_len,
678 DMA_DIRECTION_TO_DEVICE,
679 iov[i].iov_len);
681 g_free(iov);
684 static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
685 struct virtio_gpu_simple_resource *res)
687 virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt);
688 res->iov = NULL;
689 res->iov_cnt = 0;
690 g_free(res->addrs);
691 res->addrs = NULL;
694 static void
695 virtio_gpu_resource_attach_backing(VirtIOGPU *g,
696 struct virtio_gpu_ctrl_command *cmd)
698 struct virtio_gpu_simple_resource *res;
699 struct virtio_gpu_resource_attach_backing ab;
700 int ret;
702 VIRTIO_GPU_FILL_CMD(ab);
703 virtio_gpu_bswap_32(&ab, sizeof(ab));
704 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
706 res = virtio_gpu_find_resource(g, ab.resource_id);
707 if (!res) {
708 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
709 __func__, ab.resource_id);
710 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
711 return;
714 if (res->iov) {
715 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
716 return;
719 ret = virtio_gpu_create_mapping_iov(g, &ab, cmd, &res->addrs, &res->iov);
720 if (ret != 0) {
721 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
722 return;
725 res->iov_cnt = ab.nr_entries;
728 static void
729 virtio_gpu_resource_detach_backing(VirtIOGPU *g,
730 struct virtio_gpu_ctrl_command *cmd)
732 struct virtio_gpu_simple_resource *res;
733 struct virtio_gpu_resource_detach_backing detach;
735 VIRTIO_GPU_FILL_CMD(detach);
736 virtio_gpu_bswap_32(&detach, sizeof(detach));
737 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id);
739 res = virtio_gpu_find_resource(g, detach.resource_id);
740 if (!res || !res->iov) {
741 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
742 __func__, detach.resource_id);
743 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
744 return;
746 virtio_gpu_cleanup_mapping(g, res);
749 static void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
750 struct virtio_gpu_ctrl_command *cmd)
752 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
753 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
755 switch (cmd->cmd_hdr.type) {
756 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
757 virtio_gpu_get_display_info(g, cmd);
758 break;
759 case VIRTIO_GPU_CMD_GET_EDID:
760 virtio_gpu_get_edid(g, cmd);
761 break;
762 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
763 virtio_gpu_resource_create_2d(g, cmd);
764 break;
765 case VIRTIO_GPU_CMD_RESOURCE_UNREF:
766 virtio_gpu_resource_unref(g, cmd);
767 break;
768 case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
769 virtio_gpu_resource_flush(g, cmd);
770 break;
771 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
772 virtio_gpu_transfer_to_host_2d(g, cmd);
773 break;
774 case VIRTIO_GPU_CMD_SET_SCANOUT:
775 virtio_gpu_set_scanout(g, cmd);
776 break;
777 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
778 virtio_gpu_resource_attach_backing(g, cmd);
779 break;
780 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
781 virtio_gpu_resource_detach_backing(g, cmd);
782 break;
783 default:
784 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
785 break;
787 if (!cmd->finished) {
788 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
789 VIRTIO_GPU_RESP_OK_NODATA);
793 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq)
795 VirtIOGPU *g = VIRTIO_GPU(vdev);
796 qemu_bh_schedule(g->ctrl_bh);
799 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq)
801 VirtIOGPU *g = VIRTIO_GPU(vdev);
802 qemu_bh_schedule(g->cursor_bh);
805 void virtio_gpu_process_cmdq(VirtIOGPU *g)
807 struct virtio_gpu_ctrl_command *cmd;
809 while (!QTAILQ_EMPTY(&g->cmdq)) {
810 cmd = QTAILQ_FIRST(&g->cmdq);
812 if (g->parent_obj.renderer_blocked) {
813 break;
816 /* process command */
817 VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd,
818 g, cmd);
820 QTAILQ_REMOVE(&g->cmdq, cmd, next);
821 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
822 g->stats.requests++;
825 if (!cmd->finished) {
826 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
827 g->inflight++;
828 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
829 if (g->stats.max_inflight < g->inflight) {
830 g->stats.max_inflight = g->inflight;
832 fprintf(stderr, "inflight: %3d (+)\r", g->inflight);
834 } else {
835 g_free(cmd);
840 static void virtio_gpu_gl_unblock(VirtIOGPUBase *b)
842 VirtIOGPU *g = VIRTIO_GPU(b);
844 #ifdef CONFIG_VIRGL
845 if (g->renderer_reset) {
846 g->renderer_reset = false;
847 virtio_gpu_virgl_reset(g);
849 #endif
850 virtio_gpu_process_cmdq(g);
853 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
855 VirtIOGPU *g = VIRTIO_GPU(vdev);
856 struct virtio_gpu_ctrl_command *cmd;
858 if (!virtio_queue_ready(vq)) {
859 return;
862 #ifdef CONFIG_VIRGL
863 if (!g->renderer_inited && g->parent_obj.use_virgl_renderer) {
864 virtio_gpu_virgl_init(g);
865 g->renderer_inited = true;
867 #endif
869 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
870 while (cmd) {
871 cmd->vq = vq;
872 cmd->error = 0;
873 cmd->finished = false;
874 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
875 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
878 virtio_gpu_process_cmdq(g);
880 #ifdef CONFIG_VIRGL
881 if (g->parent_obj.use_virgl_renderer) {
882 virtio_gpu_virgl_fence_poll(g);
884 #endif
887 static void virtio_gpu_ctrl_bh(void *opaque)
889 VirtIOGPU *g = opaque;
890 virtio_gpu_handle_ctrl(&g->parent_obj.parent_obj, g->ctrl_vq);
893 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
895 VirtIOGPU *g = VIRTIO_GPU(vdev);
896 VirtQueueElement *elem;
897 size_t s;
898 struct virtio_gpu_update_cursor cursor_info;
900 if (!virtio_queue_ready(vq)) {
901 return;
903 for (;;) {
904 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
905 if (!elem) {
906 break;
909 s = iov_to_buf(elem->out_sg, elem->out_num, 0,
910 &cursor_info, sizeof(cursor_info));
911 if (s != sizeof(cursor_info)) {
912 qemu_log_mask(LOG_GUEST_ERROR,
913 "%s: cursor size incorrect %zu vs %zu\n",
914 __func__, s, sizeof(cursor_info));
915 } else {
916 virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info));
917 update_cursor(g, &cursor_info);
919 virtqueue_push(vq, elem, 0);
920 virtio_notify(vdev, vq);
921 g_free(elem);
925 static void virtio_gpu_cursor_bh(void *opaque)
927 VirtIOGPU *g = opaque;
928 virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq);
931 static const VMStateDescription vmstate_virtio_gpu_scanout = {
932 .name = "virtio-gpu-one-scanout",
933 .version_id = 1,
934 .fields = (VMStateField[]) {
935 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout),
936 VMSTATE_UINT32(width, struct virtio_gpu_scanout),
937 VMSTATE_UINT32(height, struct virtio_gpu_scanout),
938 VMSTATE_INT32(x, struct virtio_gpu_scanout),
939 VMSTATE_INT32(y, struct virtio_gpu_scanout),
940 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout),
941 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout),
942 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout),
943 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout),
944 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout),
945 VMSTATE_END_OF_LIST()
949 static const VMStateDescription vmstate_virtio_gpu_scanouts = {
950 .name = "virtio-gpu-scanouts",
951 .version_id = 1,
952 .fields = (VMStateField[]) {
953 VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU),
954 VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs,
955 struct VirtIOGPU, NULL),
956 VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU,
957 parent_obj.conf.max_outputs, 1,
958 vmstate_virtio_gpu_scanout,
959 struct virtio_gpu_scanout),
960 VMSTATE_END_OF_LIST()
964 static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size,
965 const VMStateField *field, QJSON *vmdesc)
967 VirtIOGPU *g = opaque;
968 struct virtio_gpu_simple_resource *res;
969 int i;
971 /* in 2d mode we should never find unprocessed commands here */
972 assert(QTAILQ_EMPTY(&g->cmdq));
974 QTAILQ_FOREACH(res, &g->reslist, next) {
975 qemu_put_be32(f, res->resource_id);
976 qemu_put_be32(f, res->width);
977 qemu_put_be32(f, res->height);
978 qemu_put_be32(f, res->format);
979 qemu_put_be32(f, res->iov_cnt);
980 for (i = 0; i < res->iov_cnt; i++) {
981 qemu_put_be64(f, res->addrs[i]);
982 qemu_put_be32(f, res->iov[i].iov_len);
984 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image),
985 pixman_image_get_stride(res->image) * res->height);
987 qemu_put_be32(f, 0); /* end of list */
989 return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL);
992 static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
993 const VMStateField *field)
995 VirtIOGPU *g = opaque;
996 struct virtio_gpu_simple_resource *res;
997 struct virtio_gpu_scanout *scanout;
998 uint32_t resource_id, pformat;
999 int i;
1001 g->hostmem = 0;
1003 resource_id = qemu_get_be32(f);
1004 while (resource_id != 0) {
1005 res = virtio_gpu_find_resource(g, resource_id);
1006 if (res) {
1007 return -EINVAL;
1010 res = g_new0(struct virtio_gpu_simple_resource, 1);
1011 res->resource_id = resource_id;
1012 res->width = qemu_get_be32(f);
1013 res->height = qemu_get_be32(f);
1014 res->format = qemu_get_be32(f);
1015 res->iov_cnt = qemu_get_be32(f);
1017 /* allocate */
1018 pformat = virtio_gpu_get_pixman_format(res->format);
1019 if (!pformat) {
1020 g_free(res);
1021 return -EINVAL;
1023 res->image = pixman_image_create_bits(pformat,
1024 res->width, res->height,
1025 NULL, 0);
1026 if (!res->image) {
1027 g_free(res);
1028 return -EINVAL;
1031 res->hostmem = calc_image_hostmem(pformat, res->width, res->height);
1033 res->addrs = g_new(uint64_t, res->iov_cnt);
1034 res->iov = g_new(struct iovec, res->iov_cnt);
1036 /* read data */
1037 for (i = 0; i < res->iov_cnt; i++) {
1038 res->addrs[i] = qemu_get_be64(f);
1039 res->iov[i].iov_len = qemu_get_be32(f);
1041 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image),
1042 pixman_image_get_stride(res->image) * res->height);
1044 /* restore mapping */
1045 for (i = 0; i < res->iov_cnt; i++) {
1046 hwaddr len = res->iov[i].iov_len;
1047 res->iov[i].iov_base =
1048 dma_memory_map(VIRTIO_DEVICE(g)->dma_as,
1049 res->addrs[i], &len, DMA_DIRECTION_TO_DEVICE);
1051 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
1052 /* Clean up the half-a-mapping we just created... */
1053 if (res->iov[i].iov_base) {
1054 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
1055 res->iov[i].iov_base,
1056 len,
1057 DMA_DIRECTION_TO_DEVICE,
1060 /* ...and the mappings for previous loop iterations */
1061 res->iov_cnt = i;
1062 virtio_gpu_cleanup_mapping(g, res);
1063 pixman_image_unref(res->image);
1064 g_free(res);
1065 return -EINVAL;
1069 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
1070 g->hostmem += res->hostmem;
1072 resource_id = qemu_get_be32(f);
1075 /* load & apply scanout state */
1076 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
1077 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
1078 scanout = &g->parent_obj.scanout[i];
1079 if (!scanout->resource_id) {
1080 continue;
1082 res = virtio_gpu_find_resource(g, scanout->resource_id);
1083 if (!res) {
1084 return -EINVAL;
1086 scanout->ds = qemu_create_displaysurface_pixman(res->image);
1087 if (!scanout->ds) {
1088 return -EINVAL;
1091 dpy_gfx_replace_surface(scanout->con, scanout->ds);
1092 dpy_gfx_update_full(scanout->con);
1093 if (scanout->cursor.resource_id) {
1094 update_cursor(g, &scanout->cursor);
1096 res->scanout_bitmask |= (1 << i);
1099 return 0;
1102 static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
1104 VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
1105 VirtIOGPU *g = VIRTIO_GPU(qdev);
1106 bool have_virgl;
1108 #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN)
1109 have_virgl = false;
1110 #else
1111 have_virgl = display_opengl;
1112 #endif
1113 if (!have_virgl) {
1114 g->parent_obj.conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED);
1115 } else {
1116 #if defined(CONFIG_VIRGL)
1117 VIRTIO_GPU_BASE(g)->virtio_config.num_capsets =
1118 virtio_gpu_virgl_get_num_capsets(g);
1119 #endif
1122 if (!virtio_gpu_base_device_realize(qdev,
1123 virtio_gpu_handle_ctrl_cb,
1124 virtio_gpu_handle_cursor_cb,
1125 errp)) {
1126 return;
1129 g->ctrl_vq = virtio_get_queue(vdev, 0);
1130 g->cursor_vq = virtio_get_queue(vdev, 1);
1131 g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g);
1132 g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g);
1133 QTAILQ_INIT(&g->reslist);
1134 QTAILQ_INIT(&g->cmdq);
1135 QTAILQ_INIT(&g->fenceq);
1138 static void virtio_gpu_reset(VirtIODevice *vdev)
1140 VirtIOGPU *g = VIRTIO_GPU(vdev);
1141 struct virtio_gpu_simple_resource *res, *tmp;
1142 struct virtio_gpu_ctrl_command *cmd;
1144 #ifdef CONFIG_VIRGL
1145 if (g->parent_obj.use_virgl_renderer) {
1146 virtio_gpu_virgl_reset(g);
1148 #endif
1150 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
1151 virtio_gpu_resource_destroy(g, res);
1154 while (!QTAILQ_EMPTY(&g->cmdq)) {
1155 cmd = QTAILQ_FIRST(&g->cmdq);
1156 QTAILQ_REMOVE(&g->cmdq, cmd, next);
1157 g_free(cmd);
1160 while (!QTAILQ_EMPTY(&g->fenceq)) {
1161 cmd = QTAILQ_FIRST(&g->fenceq);
1162 QTAILQ_REMOVE(&g->fenceq, cmd, next);
1163 g->inflight--;
1164 g_free(cmd);
1167 #ifdef CONFIG_VIRGL
1168 if (g->parent_obj.use_virgl_renderer) {
1169 if (g->parent_obj.renderer_blocked) {
1170 g->renderer_reset = true;
1171 } else {
1172 virtio_gpu_virgl_reset(g);
1174 g->parent_obj.use_virgl_renderer = false;
1176 #endif
1178 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
1181 static void
1182 virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
1184 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
1186 memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
1189 static void
1190 virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
1192 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
1193 const struct virtio_gpu_config *vgconfig =
1194 (const struct virtio_gpu_config *)config;
1196 if (vgconfig->events_clear) {
1197 g->virtio_config.events_read &= ~vgconfig->events_clear;
1202 * For historical reasons virtio_gpu does not adhere to virtio migration
1203 * scheme as described in doc/virtio-migration.txt, in a sense that no
1204 * save/load callback are provided to the core. Instead the device data
1205 * is saved/loaded after the core data.
1207 * Because of this we need a special vmsd.
1209 static const VMStateDescription vmstate_virtio_gpu = {
1210 .name = "virtio-gpu",
1211 .minimum_version_id = VIRTIO_GPU_VM_VERSION,
1212 .version_id = VIRTIO_GPU_VM_VERSION,
1213 .fields = (VMStateField[]) {
1214 VMSTATE_VIRTIO_DEVICE /* core */,
1216 .name = "virtio-gpu",
1217 .info = &(const VMStateInfo) {
1218 .name = "virtio-gpu",
1219 .get = virtio_gpu_load,
1220 .put = virtio_gpu_save,
1222 .flags = VMS_SINGLE,
1223 } /* device */,
1224 VMSTATE_END_OF_LIST()
1228 static Property virtio_gpu_properties[] = {
1229 VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf),
1230 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem,
1231 256 * MiB),
1232 #ifdef CONFIG_VIRGL
1233 DEFINE_PROP_BIT("virgl", VirtIOGPU, parent_obj.conf.flags,
1234 VIRTIO_GPU_FLAG_VIRGL_ENABLED, true),
1235 DEFINE_PROP_BIT("stats", VirtIOGPU, parent_obj.conf.flags,
1236 VIRTIO_GPU_FLAG_STATS_ENABLED, false),
1237 #endif
1238 DEFINE_PROP_END_OF_LIST(),
1241 static void virtio_gpu_class_init(ObjectClass *klass, void *data)
1243 DeviceClass *dc = DEVICE_CLASS(klass);
1244 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1245 VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_CLASS(klass);
1247 vgc->gl_unblock = virtio_gpu_gl_unblock;
1248 vdc->realize = virtio_gpu_device_realize;
1249 vdc->reset = virtio_gpu_reset;
1250 vdc->get_config = virtio_gpu_get_config;
1251 vdc->set_config = virtio_gpu_set_config;
1253 dc->vmsd = &vmstate_virtio_gpu;
1254 dc->props = virtio_gpu_properties;
1257 static const TypeInfo virtio_gpu_info = {
1258 .name = TYPE_VIRTIO_GPU,
1259 .parent = TYPE_VIRTIO_GPU_BASE,
1260 .instance_size = sizeof(VirtIOGPU),
1261 .class_init = virtio_gpu_class_init,
1264 static void virtio_register_types(void)
1266 type_register_static(&virtio_gpu_info);
1269 type_init(virtio_register_types)