virtio-gpu: move update_cursor_data
[qemu/ar7.git] / hw / display / virtio-gpu.c
blob921a8212a7aa04c7123c59d9a07b6dcefd2167ce
1 /*
2 * Virtio GPU Device
4 * Copyright Red Hat, Inc. 2013-2014
6 * Authors:
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/units.h"
16 #include "qemu/iov.h"
17 #include "ui/console.h"
18 #include "trace.h"
19 #include "sysemu/dma.h"
20 #include "sysemu/sysemu.h"
21 #include "hw/virtio/virtio.h"
22 #include "migration/qemu-file-types.h"
23 #include "hw/virtio/virtio-gpu.h"
24 #include "hw/virtio/virtio-gpu-bswap.h"
25 #include "hw/virtio/virtio-gpu-pixman.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "hw/display/edid.h"
28 #include "hw/qdev-properties.h"
29 #include "qemu/log.h"
30 #include "qemu/module.h"
31 #include "qapi/error.h"
32 #include "qemu/error-report.h"
34 #define VIRTIO_GPU_VM_VERSION 1
36 static struct virtio_gpu_simple_resource*
37 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
39 static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
40 struct virtio_gpu_simple_resource *res);
42 #ifdef CONFIG_VIRGL
43 #include <virglrenderer.h>
44 #define VIRGL(_g, _virgl, _simple, ...) \
45 do { \
46 if (_g->parent_obj.use_virgl_renderer) { \
47 _virgl(__VA_ARGS__); \
48 } else { \
49 _simple(__VA_ARGS__); \
50 } \
51 } while (0)
52 #else
53 #define VIRGL(_g, _virgl, _simple, ...) \
54 do { \
55 _simple(__VA_ARGS__); \
56 } while (0)
57 #endif
59 void virtio_gpu_update_cursor_data(VirtIOGPU *g,
60 struct virtio_gpu_scanout *s,
61 uint32_t resource_id)
63 struct virtio_gpu_simple_resource *res;
64 uint32_t pixels;
66 res = virtio_gpu_find_resource(g, resource_id);
67 if (!res) {
68 return;
71 if (pixman_image_get_width(res->image) != s->current_cursor->width ||
72 pixman_image_get_height(res->image) != s->current_cursor->height) {
73 return;
76 pixels = s->current_cursor->width * s->current_cursor->height;
77 memcpy(s->current_cursor->data,
78 pixman_image_get_data(res->image),
79 pixels * sizeof(uint32_t));
82 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
84 struct virtio_gpu_scanout *s;
85 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
86 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
88 if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) {
89 return;
91 s = &g->parent_obj.scanout[cursor->pos.scanout_id];
93 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id,
94 cursor->pos.x,
95 cursor->pos.y,
96 move ? "move" : "update",
97 cursor->resource_id);
99 if (!move) {
100 if (!s->current_cursor) {
101 s->current_cursor = cursor_alloc(64, 64);
104 s->current_cursor->hot_x = cursor->hot_x;
105 s->current_cursor->hot_y = cursor->hot_y;
107 if (cursor->resource_id > 0) {
108 vgc->update_cursor_data(g, s, cursor->resource_id);
110 dpy_cursor_define(s->con, s->current_cursor);
112 s->cursor = *cursor;
113 } else {
114 s->cursor.pos.x = cursor->pos.x;
115 s->cursor.pos.y = cursor->pos.y;
117 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y,
118 cursor->resource_id ? 1 : 0);
121 static struct virtio_gpu_simple_resource *
122 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
124 struct virtio_gpu_simple_resource *res;
126 QTAILQ_FOREACH(res, &g->reslist, next) {
127 if (res->resource_id == resource_id) {
128 return res;
131 return NULL;
134 void virtio_gpu_ctrl_response(VirtIOGPU *g,
135 struct virtio_gpu_ctrl_command *cmd,
136 struct virtio_gpu_ctrl_hdr *resp,
137 size_t resp_len)
139 size_t s;
141 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
142 resp->flags |= VIRTIO_GPU_FLAG_FENCE;
143 resp->fence_id = cmd->cmd_hdr.fence_id;
144 resp->ctx_id = cmd->cmd_hdr.ctx_id;
146 virtio_gpu_ctrl_hdr_bswap(resp);
147 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
148 if (s != resp_len) {
149 qemu_log_mask(LOG_GUEST_ERROR,
150 "%s: response size incorrect %zu vs %zu\n",
151 __func__, s, resp_len);
153 virtqueue_push(cmd->vq, &cmd->elem, s);
154 virtio_notify(VIRTIO_DEVICE(g), cmd->vq);
155 cmd->finished = true;
158 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
159 struct virtio_gpu_ctrl_command *cmd,
160 enum virtio_gpu_ctrl_type type)
162 struct virtio_gpu_ctrl_hdr resp;
164 memset(&resp, 0, sizeof(resp));
165 resp.type = type;
166 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
169 void virtio_gpu_get_display_info(VirtIOGPU *g,
170 struct virtio_gpu_ctrl_command *cmd)
172 struct virtio_gpu_resp_display_info display_info;
174 trace_virtio_gpu_cmd_get_display_info();
175 memset(&display_info, 0, sizeof(display_info));
176 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
177 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
178 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
179 sizeof(display_info));
182 static void
183 virtio_gpu_generate_edid(VirtIOGPU *g, int scanout,
184 struct virtio_gpu_resp_edid *edid)
186 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
187 qemu_edid_info info = {
188 .width_mm = b->req_state[scanout].width_mm,
189 .height_mm = b->req_state[scanout].height_mm,
190 .prefx = b->req_state[scanout].width,
191 .prefy = b->req_state[scanout].height,
194 edid->size = cpu_to_le32(sizeof(edid->edid));
195 qemu_edid_generate(edid->edid, sizeof(edid->edid), &info);
198 void virtio_gpu_get_edid(VirtIOGPU *g,
199 struct virtio_gpu_ctrl_command *cmd)
201 struct virtio_gpu_resp_edid edid;
202 struct virtio_gpu_cmd_get_edid get_edid;
203 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
205 VIRTIO_GPU_FILL_CMD(get_edid);
206 virtio_gpu_bswap_32(&get_edid, sizeof(get_edid));
208 if (get_edid.scanout >= b->conf.max_outputs) {
209 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
210 return;
213 trace_virtio_gpu_cmd_get_edid(get_edid.scanout);
214 memset(&edid, 0, sizeof(edid));
215 edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID;
216 virtio_gpu_generate_edid(g, get_edid.scanout, &edid);
217 virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid));
220 static uint32_t calc_image_hostmem(pixman_format_code_t pformat,
221 uint32_t width, uint32_t height)
223 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check.
224 * pixman_image_create_bits will fail in case it overflow.
227 int bpp = PIXMAN_FORMAT_BPP(pformat);
228 int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t);
229 return height * stride;
232 static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
233 struct virtio_gpu_ctrl_command *cmd)
235 pixman_format_code_t pformat;
236 struct virtio_gpu_simple_resource *res;
237 struct virtio_gpu_resource_create_2d c2d;
239 VIRTIO_GPU_FILL_CMD(c2d);
240 virtio_gpu_bswap_32(&c2d, sizeof(c2d));
241 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
242 c2d.width, c2d.height);
244 if (c2d.resource_id == 0) {
245 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
246 __func__);
247 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
248 return;
251 res = virtio_gpu_find_resource(g, c2d.resource_id);
252 if (res) {
253 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
254 __func__, c2d.resource_id);
255 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
256 return;
259 res = g_new0(struct virtio_gpu_simple_resource, 1);
261 res->width = c2d.width;
262 res->height = c2d.height;
263 res->format = c2d.format;
264 res->resource_id = c2d.resource_id;
266 pformat = virtio_gpu_get_pixman_format(c2d.format);
267 if (!pformat) {
268 qemu_log_mask(LOG_GUEST_ERROR,
269 "%s: host couldn't handle guest format %d\n",
270 __func__, c2d.format);
271 g_free(res);
272 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
273 return;
276 res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height);
277 if (res->hostmem + g->hostmem < g->conf_max_hostmem) {
278 res->image = pixman_image_create_bits(pformat,
279 c2d.width,
280 c2d.height,
281 NULL, 0);
284 if (!res->image) {
285 qemu_log_mask(LOG_GUEST_ERROR,
286 "%s: resource creation failed %d %d %d\n",
287 __func__, c2d.resource_id, c2d.width, c2d.height);
288 g_free(res);
289 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
290 return;
293 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
294 g->hostmem += res->hostmem;
297 static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id)
299 struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id];
300 struct virtio_gpu_simple_resource *res;
302 if (scanout->resource_id == 0) {
303 return;
306 res = virtio_gpu_find_resource(g, scanout->resource_id);
307 if (res) {
308 res->scanout_bitmask &= ~(1 << scanout_id);
311 dpy_gfx_replace_surface(scanout->con, NULL);
312 scanout->resource_id = 0;
313 scanout->ds = NULL;
314 scanout->width = 0;
315 scanout->height = 0;
318 static void virtio_gpu_resource_destroy(VirtIOGPU *g,
319 struct virtio_gpu_simple_resource *res)
321 int i;
323 if (res->scanout_bitmask) {
324 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
325 if (res->scanout_bitmask & (1 << i)) {
326 virtio_gpu_disable_scanout(g, i);
331 pixman_image_unref(res->image);
332 virtio_gpu_cleanup_mapping(g, res);
333 QTAILQ_REMOVE(&g->reslist, res, next);
334 g->hostmem -= res->hostmem;
335 g_free(res);
338 static void virtio_gpu_resource_unref(VirtIOGPU *g,
339 struct virtio_gpu_ctrl_command *cmd)
341 struct virtio_gpu_simple_resource *res;
342 struct virtio_gpu_resource_unref unref;
344 VIRTIO_GPU_FILL_CMD(unref);
345 virtio_gpu_bswap_32(&unref, sizeof(unref));
346 trace_virtio_gpu_cmd_res_unref(unref.resource_id);
348 res = virtio_gpu_find_resource(g, unref.resource_id);
349 if (!res) {
350 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
351 __func__, unref.resource_id);
352 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
353 return;
355 virtio_gpu_resource_destroy(g, res);
358 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
359 struct virtio_gpu_ctrl_command *cmd)
361 struct virtio_gpu_simple_resource *res;
362 int h;
363 uint32_t src_offset, dst_offset, stride;
364 int bpp;
365 pixman_format_code_t format;
366 struct virtio_gpu_transfer_to_host_2d t2d;
368 VIRTIO_GPU_FILL_CMD(t2d);
369 virtio_gpu_t2d_bswap(&t2d);
370 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
372 res = virtio_gpu_find_resource(g, t2d.resource_id);
373 if (!res || !res->iov) {
374 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
375 __func__, t2d.resource_id);
376 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
377 return;
380 if (t2d.r.x > res->width ||
381 t2d.r.y > res->height ||
382 t2d.r.width > res->width ||
383 t2d.r.height > res->height ||
384 t2d.r.x + t2d.r.width > res->width ||
385 t2d.r.y + t2d.r.height > res->height) {
386 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource"
387 " bounds for resource %d: %d %d %d %d vs %d %d\n",
388 __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
389 t2d.r.width, t2d.r.height, res->width, res->height);
390 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
391 return;
394 format = pixman_image_get_format(res->image);
395 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
396 stride = pixman_image_get_stride(res->image);
398 if (t2d.offset || t2d.r.x || t2d.r.y ||
399 t2d.r.width != pixman_image_get_width(res->image)) {
400 void *img_data = pixman_image_get_data(res->image);
401 for (h = 0; h < t2d.r.height; h++) {
402 src_offset = t2d.offset + stride * h;
403 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
405 iov_to_buf(res->iov, res->iov_cnt, src_offset,
406 (uint8_t *)img_data
407 + dst_offset, t2d.r.width * bpp);
409 } else {
410 iov_to_buf(res->iov, res->iov_cnt, 0,
411 pixman_image_get_data(res->image),
412 pixman_image_get_stride(res->image)
413 * pixman_image_get_height(res->image));
417 static void virtio_gpu_resource_flush(VirtIOGPU *g,
418 struct virtio_gpu_ctrl_command *cmd)
420 struct virtio_gpu_simple_resource *res;
421 struct virtio_gpu_resource_flush rf;
422 pixman_region16_t flush_region;
423 int i;
425 VIRTIO_GPU_FILL_CMD(rf);
426 virtio_gpu_bswap_32(&rf, sizeof(rf));
427 trace_virtio_gpu_cmd_res_flush(rf.resource_id,
428 rf.r.width, rf.r.height, rf.r.x, rf.r.y);
430 res = virtio_gpu_find_resource(g, rf.resource_id);
431 if (!res) {
432 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
433 __func__, rf.resource_id);
434 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
435 return;
438 if (rf.r.x > res->width ||
439 rf.r.y > res->height ||
440 rf.r.width > res->width ||
441 rf.r.height > res->height ||
442 rf.r.x + rf.r.width > res->width ||
443 rf.r.y + rf.r.height > res->height) {
444 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource"
445 " bounds for resource %d: %d %d %d %d vs %d %d\n",
446 __func__, rf.resource_id, rf.r.x, rf.r.y,
447 rf.r.width, rf.r.height, res->width, res->height);
448 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
449 return;
452 pixman_region_init_rect(&flush_region,
453 rf.r.x, rf.r.y, rf.r.width, rf.r.height);
454 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
455 struct virtio_gpu_scanout *scanout;
456 pixman_region16_t region, finalregion;
457 pixman_box16_t *extents;
459 if (!(res->scanout_bitmask & (1 << i))) {
460 continue;
462 scanout = &g->parent_obj.scanout[i];
464 pixman_region_init(&finalregion);
465 pixman_region_init_rect(&region, scanout->x, scanout->y,
466 scanout->width, scanout->height);
468 pixman_region_intersect(&finalregion, &flush_region, &region);
469 pixman_region_translate(&finalregion, -scanout->x, -scanout->y);
470 extents = pixman_region_extents(&finalregion);
471 /* work out the area we need to update for each console */
472 dpy_gfx_update(g->parent_obj.scanout[i].con,
473 extents->x1, extents->y1,
474 extents->x2 - extents->x1,
475 extents->y2 - extents->y1);
477 pixman_region_fini(&region);
478 pixman_region_fini(&finalregion);
480 pixman_region_fini(&flush_region);
483 static void virtio_unref_resource(pixman_image_t *image, void *data)
485 pixman_image_unref(data);
488 static void virtio_gpu_set_scanout(VirtIOGPU *g,
489 struct virtio_gpu_ctrl_command *cmd)
491 struct virtio_gpu_simple_resource *res, *ores;
492 struct virtio_gpu_scanout *scanout;
493 pixman_format_code_t format;
494 uint32_t offset;
495 int bpp;
496 struct virtio_gpu_set_scanout ss;
498 VIRTIO_GPU_FILL_CMD(ss);
499 virtio_gpu_bswap_32(&ss, sizeof(ss));
500 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
501 ss.r.width, ss.r.height, ss.r.x, ss.r.y);
503 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
504 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
505 __func__, ss.scanout_id);
506 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
507 return;
510 g->parent_obj.enable = 1;
511 if (ss.resource_id == 0) {
512 virtio_gpu_disable_scanout(g, ss.scanout_id);
513 return;
516 /* create a surface for this scanout */
517 res = virtio_gpu_find_resource(g, ss.resource_id);
518 if (!res) {
519 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
520 __func__, ss.resource_id);
521 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
522 return;
525 if (ss.r.x > res->width ||
526 ss.r.y > res->height ||
527 ss.r.width < 16 ||
528 ss.r.height < 16 ||
529 ss.r.width > res->width ||
530 ss.r.height > res->height ||
531 ss.r.x + ss.r.width > res->width ||
532 ss.r.y + ss.r.height > res->height) {
533 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
534 " resource %d, (%d,%d)+%d,%d vs %d %d\n",
535 __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y,
536 ss.r.width, ss.r.height, res->width, res->height);
537 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
538 return;
541 scanout = &g->parent_obj.scanout[ss.scanout_id];
543 format = pixman_image_get_format(res->image);
544 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
545 offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image);
546 if (!scanout->ds || surface_data(scanout->ds)
547 != ((uint8_t *)pixman_image_get_data(res->image) + offset) ||
548 scanout->width != ss.r.width ||
549 scanout->height != ss.r.height) {
550 pixman_image_t *rect;
551 void *ptr = (uint8_t *)pixman_image_get_data(res->image) + offset;
552 rect = pixman_image_create_bits(format, ss.r.width, ss.r.height, ptr,
553 pixman_image_get_stride(res->image));
554 pixman_image_ref(res->image);
555 pixman_image_set_destroy_function(rect, virtio_unref_resource,
556 res->image);
557 /* realloc the surface ptr */
558 scanout->ds = qemu_create_displaysurface_pixman(rect);
559 if (!scanout->ds) {
560 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
561 return;
563 pixman_image_unref(rect);
564 dpy_gfx_replace_surface(g->parent_obj.scanout[ss.scanout_id].con,
565 scanout->ds);
568 ores = virtio_gpu_find_resource(g, scanout->resource_id);
569 if (ores) {
570 ores->scanout_bitmask &= ~(1 << ss.scanout_id);
573 res->scanout_bitmask |= (1 << ss.scanout_id);
574 scanout->resource_id = ss.resource_id;
575 scanout->x = ss.r.x;
576 scanout->y = ss.r.y;
577 scanout->width = ss.r.width;
578 scanout->height = ss.r.height;
581 int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
582 struct virtio_gpu_resource_attach_backing *ab,
583 struct virtio_gpu_ctrl_command *cmd,
584 uint64_t **addr, struct iovec **iov,
585 uint32_t *niov)
587 struct virtio_gpu_mem_entry *ents;
588 size_t esize, s;
589 int e, v;
591 if (ab->nr_entries > 16384) {
592 qemu_log_mask(LOG_GUEST_ERROR,
593 "%s: nr_entries is too big (%d > 16384)\n",
594 __func__, ab->nr_entries);
595 return -1;
598 esize = sizeof(*ents) * ab->nr_entries;
599 ents = g_malloc(esize);
600 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
601 sizeof(*ab), ents, esize);
602 if (s != esize) {
603 qemu_log_mask(LOG_GUEST_ERROR,
604 "%s: command data size incorrect %zu vs %zu\n",
605 __func__, s, esize);
606 g_free(ents);
607 return -1;
610 *iov = NULL;
611 if (addr) {
612 *addr = NULL;
614 for (e = 0, v = 0; e < ab->nr_entries; e++) {
615 uint64_t a = le64_to_cpu(ents[e].addr);
616 uint32_t l = le32_to_cpu(ents[e].length);
617 hwaddr len;
618 void *map;
620 do {
621 len = l;
622 map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as,
623 a, &len, DMA_DIRECTION_TO_DEVICE);
624 if (!map) {
625 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
626 " resource %d element %d\n",
627 __func__, ab->resource_id, e);
628 virtio_gpu_cleanup_mapping_iov(g, *iov, v);
629 g_free(ents);
630 *iov = NULL;
631 if (addr) {
632 g_free(*addr);
633 *addr = NULL;
635 return -1;
638 if (!(v % 16)) {
639 *iov = g_realloc(*iov, sizeof(struct iovec) * (v + 16));
640 if (addr) {
641 *addr = g_realloc(*addr, sizeof(uint64_t) * (v + 16));
644 (*iov)[v].iov_base = map;
645 (*iov)[v].iov_len = len;
646 if (addr) {
647 (*addr)[v] = a;
650 a += len;
651 l -= len;
652 v += 1;
653 } while (l > 0);
655 *niov = v;
657 g_free(ents);
658 return 0;
661 void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g,
662 struct iovec *iov, uint32_t count)
664 int i;
666 for (i = 0; i < count; i++) {
667 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
668 iov[i].iov_base, iov[i].iov_len,
669 DMA_DIRECTION_TO_DEVICE,
670 iov[i].iov_len);
672 g_free(iov);
675 static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
676 struct virtio_gpu_simple_resource *res)
678 virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt);
679 res->iov = NULL;
680 res->iov_cnt = 0;
681 g_free(res->addrs);
682 res->addrs = NULL;
685 static void
686 virtio_gpu_resource_attach_backing(VirtIOGPU *g,
687 struct virtio_gpu_ctrl_command *cmd)
689 struct virtio_gpu_simple_resource *res;
690 struct virtio_gpu_resource_attach_backing ab;
691 int ret;
693 VIRTIO_GPU_FILL_CMD(ab);
694 virtio_gpu_bswap_32(&ab, sizeof(ab));
695 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
697 res = virtio_gpu_find_resource(g, ab.resource_id);
698 if (!res) {
699 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
700 __func__, ab.resource_id);
701 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
702 return;
705 if (res->iov) {
706 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
707 return;
710 ret = virtio_gpu_create_mapping_iov(g, &ab, cmd, &res->addrs,
711 &res->iov, &res->iov_cnt);
712 if (ret != 0) {
713 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
714 return;
718 static void
719 virtio_gpu_resource_detach_backing(VirtIOGPU *g,
720 struct virtio_gpu_ctrl_command *cmd)
722 struct virtio_gpu_simple_resource *res;
723 struct virtio_gpu_resource_detach_backing detach;
725 VIRTIO_GPU_FILL_CMD(detach);
726 virtio_gpu_bswap_32(&detach, sizeof(detach));
727 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id);
729 res = virtio_gpu_find_resource(g, detach.resource_id);
730 if (!res || !res->iov) {
731 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
732 __func__, detach.resource_id);
733 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
734 return;
736 virtio_gpu_cleanup_mapping(g, res);
739 void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
740 struct virtio_gpu_ctrl_command *cmd)
742 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
743 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
745 switch (cmd->cmd_hdr.type) {
746 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
747 virtio_gpu_get_display_info(g, cmd);
748 break;
749 case VIRTIO_GPU_CMD_GET_EDID:
750 virtio_gpu_get_edid(g, cmd);
751 break;
752 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
753 virtio_gpu_resource_create_2d(g, cmd);
754 break;
755 case VIRTIO_GPU_CMD_RESOURCE_UNREF:
756 virtio_gpu_resource_unref(g, cmd);
757 break;
758 case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
759 virtio_gpu_resource_flush(g, cmd);
760 break;
761 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
762 virtio_gpu_transfer_to_host_2d(g, cmd);
763 break;
764 case VIRTIO_GPU_CMD_SET_SCANOUT:
765 virtio_gpu_set_scanout(g, cmd);
766 break;
767 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
768 virtio_gpu_resource_attach_backing(g, cmd);
769 break;
770 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
771 virtio_gpu_resource_detach_backing(g, cmd);
772 break;
773 default:
774 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
775 break;
777 if (!cmd->finished) {
778 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
779 VIRTIO_GPU_RESP_OK_NODATA);
783 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq)
785 VirtIOGPU *g = VIRTIO_GPU(vdev);
786 qemu_bh_schedule(g->ctrl_bh);
789 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq)
791 VirtIOGPU *g = VIRTIO_GPU(vdev);
792 qemu_bh_schedule(g->cursor_bh);
795 void virtio_gpu_process_cmdq(VirtIOGPU *g)
797 struct virtio_gpu_ctrl_command *cmd;
798 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
800 if (g->processing_cmdq) {
801 return;
803 g->processing_cmdq = true;
804 while (!QTAILQ_EMPTY(&g->cmdq)) {
805 cmd = QTAILQ_FIRST(&g->cmdq);
807 if (g->parent_obj.renderer_blocked) {
808 break;
811 /* process command */
812 vgc->process_cmd(g, cmd);
814 QTAILQ_REMOVE(&g->cmdq, cmd, next);
815 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
816 g->stats.requests++;
819 if (!cmd->finished) {
820 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
821 g->inflight++;
822 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
823 if (g->stats.max_inflight < g->inflight) {
824 g->stats.max_inflight = g->inflight;
826 fprintf(stderr, "inflight: %3d (+)\r", g->inflight);
828 } else {
829 g_free(cmd);
832 g->processing_cmdq = false;
835 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
837 VirtIOGPU *g = VIRTIO_GPU(vdev);
838 struct virtio_gpu_ctrl_command *cmd;
840 if (!virtio_queue_ready(vq)) {
841 return;
844 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
845 while (cmd) {
846 cmd->vq = vq;
847 cmd->error = 0;
848 cmd->finished = false;
849 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
850 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
853 virtio_gpu_process_cmdq(g);
856 static void virtio_gpu_ctrl_bh(void *opaque)
858 VirtIOGPU *g = opaque;
859 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
861 vgc->handle_ctrl(&g->parent_obj.parent_obj, g->ctrl_vq);
864 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
866 VirtIOGPU *g = VIRTIO_GPU(vdev);
867 VirtQueueElement *elem;
868 size_t s;
869 struct virtio_gpu_update_cursor cursor_info;
871 if (!virtio_queue_ready(vq)) {
872 return;
874 for (;;) {
875 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
876 if (!elem) {
877 break;
880 s = iov_to_buf(elem->out_sg, elem->out_num, 0,
881 &cursor_info, sizeof(cursor_info));
882 if (s != sizeof(cursor_info)) {
883 qemu_log_mask(LOG_GUEST_ERROR,
884 "%s: cursor size incorrect %zu vs %zu\n",
885 __func__, s, sizeof(cursor_info));
886 } else {
887 virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info));
888 update_cursor(g, &cursor_info);
890 virtqueue_push(vq, elem, 0);
891 virtio_notify(vdev, vq);
892 g_free(elem);
896 static void virtio_gpu_cursor_bh(void *opaque)
898 VirtIOGPU *g = opaque;
899 virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq);
902 static const VMStateDescription vmstate_virtio_gpu_scanout = {
903 .name = "virtio-gpu-one-scanout",
904 .version_id = 1,
905 .fields = (VMStateField[]) {
906 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout),
907 VMSTATE_UINT32(width, struct virtio_gpu_scanout),
908 VMSTATE_UINT32(height, struct virtio_gpu_scanout),
909 VMSTATE_INT32(x, struct virtio_gpu_scanout),
910 VMSTATE_INT32(y, struct virtio_gpu_scanout),
911 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout),
912 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout),
913 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout),
914 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout),
915 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout),
916 VMSTATE_END_OF_LIST()
920 static const VMStateDescription vmstate_virtio_gpu_scanouts = {
921 .name = "virtio-gpu-scanouts",
922 .version_id = 1,
923 .fields = (VMStateField[]) {
924 VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU),
925 VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs,
926 struct VirtIOGPU, NULL),
927 VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU,
928 parent_obj.conf.max_outputs, 1,
929 vmstate_virtio_gpu_scanout,
930 struct virtio_gpu_scanout),
931 VMSTATE_END_OF_LIST()
935 static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size,
936 const VMStateField *field, JSONWriter *vmdesc)
938 VirtIOGPU *g = opaque;
939 struct virtio_gpu_simple_resource *res;
940 int i;
942 /* in 2d mode we should never find unprocessed commands here */
943 assert(QTAILQ_EMPTY(&g->cmdq));
945 QTAILQ_FOREACH(res, &g->reslist, next) {
946 qemu_put_be32(f, res->resource_id);
947 qemu_put_be32(f, res->width);
948 qemu_put_be32(f, res->height);
949 qemu_put_be32(f, res->format);
950 qemu_put_be32(f, res->iov_cnt);
951 for (i = 0; i < res->iov_cnt; i++) {
952 qemu_put_be64(f, res->addrs[i]);
953 qemu_put_be32(f, res->iov[i].iov_len);
955 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image),
956 pixman_image_get_stride(res->image) * res->height);
958 qemu_put_be32(f, 0); /* end of list */
960 return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL);
963 static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
964 const VMStateField *field)
966 VirtIOGPU *g = opaque;
967 struct virtio_gpu_simple_resource *res;
968 struct virtio_gpu_scanout *scanout;
969 uint32_t resource_id, pformat;
970 int i;
972 g->hostmem = 0;
974 resource_id = qemu_get_be32(f);
975 while (resource_id != 0) {
976 res = virtio_gpu_find_resource(g, resource_id);
977 if (res) {
978 return -EINVAL;
981 res = g_new0(struct virtio_gpu_simple_resource, 1);
982 res->resource_id = resource_id;
983 res->width = qemu_get_be32(f);
984 res->height = qemu_get_be32(f);
985 res->format = qemu_get_be32(f);
986 res->iov_cnt = qemu_get_be32(f);
988 /* allocate */
989 pformat = virtio_gpu_get_pixman_format(res->format);
990 if (!pformat) {
991 g_free(res);
992 return -EINVAL;
994 res->image = pixman_image_create_bits(pformat,
995 res->width, res->height,
996 NULL, 0);
997 if (!res->image) {
998 g_free(res);
999 return -EINVAL;
1002 res->hostmem = calc_image_hostmem(pformat, res->width, res->height);
1004 res->addrs = g_new(uint64_t, res->iov_cnt);
1005 res->iov = g_new(struct iovec, res->iov_cnt);
1007 /* read data */
1008 for (i = 0; i < res->iov_cnt; i++) {
1009 res->addrs[i] = qemu_get_be64(f);
1010 res->iov[i].iov_len = qemu_get_be32(f);
1012 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image),
1013 pixman_image_get_stride(res->image) * res->height);
1015 /* restore mapping */
1016 for (i = 0; i < res->iov_cnt; i++) {
1017 hwaddr len = res->iov[i].iov_len;
1018 res->iov[i].iov_base =
1019 dma_memory_map(VIRTIO_DEVICE(g)->dma_as,
1020 res->addrs[i], &len, DMA_DIRECTION_TO_DEVICE);
1022 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
1023 /* Clean up the half-a-mapping we just created... */
1024 if (res->iov[i].iov_base) {
1025 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
1026 res->iov[i].iov_base,
1027 len,
1028 DMA_DIRECTION_TO_DEVICE,
1031 /* ...and the mappings for previous loop iterations */
1032 res->iov_cnt = i;
1033 virtio_gpu_cleanup_mapping(g, res);
1034 pixman_image_unref(res->image);
1035 g_free(res);
1036 return -EINVAL;
1040 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
1041 g->hostmem += res->hostmem;
1043 resource_id = qemu_get_be32(f);
1046 /* load & apply scanout state */
1047 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
1048 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
1049 scanout = &g->parent_obj.scanout[i];
1050 if (!scanout->resource_id) {
1051 continue;
1053 res = virtio_gpu_find_resource(g, scanout->resource_id);
1054 if (!res) {
1055 return -EINVAL;
1057 scanout->ds = qemu_create_displaysurface_pixman(res->image);
1058 if (!scanout->ds) {
1059 return -EINVAL;
1062 dpy_gfx_replace_surface(scanout->con, scanout->ds);
1063 dpy_gfx_update_full(scanout->con);
1064 if (scanout->cursor.resource_id) {
1065 update_cursor(g, &scanout->cursor);
1067 res->scanout_bitmask |= (1 << i);
1070 return 0;
1073 void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
1075 VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
1076 VirtIOGPU *g = VIRTIO_GPU(qdev);
1078 if (!virtio_gpu_base_device_realize(qdev,
1079 virtio_gpu_handle_ctrl_cb,
1080 virtio_gpu_handle_cursor_cb,
1081 errp)) {
1082 return;
1085 g->ctrl_vq = virtio_get_queue(vdev, 0);
1086 g->cursor_vq = virtio_get_queue(vdev, 1);
1087 g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g);
1088 g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g);
1089 QTAILQ_INIT(&g->reslist);
1090 QTAILQ_INIT(&g->cmdq);
1091 QTAILQ_INIT(&g->fenceq);
1094 void virtio_gpu_reset(VirtIODevice *vdev)
1096 VirtIOGPU *g = VIRTIO_GPU(vdev);
1097 struct virtio_gpu_simple_resource *res, *tmp;
1098 struct virtio_gpu_ctrl_command *cmd;
1100 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
1101 virtio_gpu_resource_destroy(g, res);
1104 while (!QTAILQ_EMPTY(&g->cmdq)) {
1105 cmd = QTAILQ_FIRST(&g->cmdq);
1106 QTAILQ_REMOVE(&g->cmdq, cmd, next);
1107 g_free(cmd);
1110 while (!QTAILQ_EMPTY(&g->fenceq)) {
1111 cmd = QTAILQ_FIRST(&g->fenceq);
1112 QTAILQ_REMOVE(&g->fenceq, cmd, next);
1113 g->inflight--;
1114 g_free(cmd);
1117 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
1120 static void
1121 virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
1123 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
1125 memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
1128 static void
1129 virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
1131 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
1132 const struct virtio_gpu_config *vgconfig =
1133 (const struct virtio_gpu_config *)config;
1135 if (vgconfig->events_clear) {
1136 g->virtio_config.events_read &= ~vgconfig->events_clear;
1141 * For historical reasons virtio_gpu does not adhere to virtio migration
1142 * scheme as described in doc/virtio-migration.txt, in a sense that no
1143 * save/load callback are provided to the core. Instead the device data
1144 * is saved/loaded after the core data.
1146 * Because of this we need a special vmsd.
1148 static const VMStateDescription vmstate_virtio_gpu = {
1149 .name = "virtio-gpu",
1150 .minimum_version_id = VIRTIO_GPU_VM_VERSION,
1151 .version_id = VIRTIO_GPU_VM_VERSION,
1152 .fields = (VMStateField[]) {
1153 VMSTATE_VIRTIO_DEVICE /* core */,
1155 .name = "virtio-gpu",
1156 .info = &(const VMStateInfo) {
1157 .name = "virtio-gpu",
1158 .get = virtio_gpu_load,
1159 .put = virtio_gpu_save,
1161 .flags = VMS_SINGLE,
1162 } /* device */,
1163 VMSTATE_END_OF_LIST()
1167 static Property virtio_gpu_properties[] = {
1168 VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf),
1169 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem,
1170 256 * MiB),
1171 DEFINE_PROP_END_OF_LIST(),
1174 static void virtio_gpu_class_init(ObjectClass *klass, void *data)
1176 DeviceClass *dc = DEVICE_CLASS(klass);
1177 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1178 VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass);
1180 vgc->handle_ctrl = virtio_gpu_handle_ctrl;
1181 vgc->process_cmd = virtio_gpu_simple_process_cmd;
1182 vgc->update_cursor_data = virtio_gpu_update_cursor_data;
1184 vdc->realize = virtio_gpu_device_realize;
1185 vdc->reset = virtio_gpu_reset;
1186 vdc->get_config = virtio_gpu_get_config;
1187 vdc->set_config = virtio_gpu_set_config;
1189 dc->vmsd = &vmstate_virtio_gpu;
1190 device_class_set_props(dc, virtio_gpu_properties);
1193 static const TypeInfo virtio_gpu_info = {
1194 .name = TYPE_VIRTIO_GPU,
1195 .parent = TYPE_VIRTIO_GPU_BASE,
1196 .instance_size = sizeof(VirtIOGPU),
1197 .class_size = sizeof(VirtIOGPUClass),
1198 .class_init = virtio_gpu_class_init,
1201 static void virtio_register_types(void)
1203 type_register_static(&virtio_gpu_info);
1206 type_init(virtio_register_types)