virtio-gpu: Refactor virtio_gpu_set_scanout
[qemu/ar7.git] / hw / display / virtio-gpu.c
blobfdcedfc61e54d793b23215a35e0de3dada470dd6
1 /*
2 * Virtio GPU Device
4 * Copyright Red Hat, Inc. 2013-2014
6 * Authors:
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/units.h"
16 #include "qemu/iov.h"
17 #include "ui/console.h"
18 #include "trace.h"
19 #include "sysemu/dma.h"
20 #include "sysemu/sysemu.h"
21 #include "hw/virtio/virtio.h"
22 #include "migration/qemu-file-types.h"
23 #include "hw/virtio/virtio-gpu.h"
24 #include "hw/virtio/virtio-gpu-bswap.h"
25 #include "hw/virtio/virtio-gpu-pixman.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "hw/display/edid.h"
28 #include "hw/qdev-properties.h"
29 #include "qemu/log.h"
30 #include "qemu/module.h"
31 #include "qapi/error.h"
32 #include "qemu/error-report.h"
34 #define VIRTIO_GPU_VM_VERSION 1
36 static struct virtio_gpu_simple_resource*
37 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
38 static struct virtio_gpu_simple_resource *
39 virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id,
40 bool require_backing,
41 const char *caller, uint32_t *error);
43 static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
44 struct virtio_gpu_simple_resource *res);
46 void virtio_gpu_update_cursor_data(VirtIOGPU *g,
47 struct virtio_gpu_scanout *s,
48 uint32_t resource_id)
50 struct virtio_gpu_simple_resource *res;
51 uint32_t pixels;
53 res = virtio_gpu_find_check_resource(g, resource_id, false,
54 __func__, NULL);
55 if (!res) {
56 return;
59 if (pixman_image_get_width(res->image) != s->current_cursor->width ||
60 pixman_image_get_height(res->image) != s->current_cursor->height) {
61 return;
64 pixels = s->current_cursor->width * s->current_cursor->height;
65 memcpy(s->current_cursor->data,
66 pixman_image_get_data(res->image),
67 pixels * sizeof(uint32_t));
70 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
72 struct virtio_gpu_scanout *s;
73 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
74 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
76 if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) {
77 return;
79 s = &g->parent_obj.scanout[cursor->pos.scanout_id];
81 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id,
82 cursor->pos.x,
83 cursor->pos.y,
84 move ? "move" : "update",
85 cursor->resource_id);
87 if (!move) {
88 if (!s->current_cursor) {
89 s->current_cursor = cursor_alloc(64, 64);
92 s->current_cursor->hot_x = cursor->hot_x;
93 s->current_cursor->hot_y = cursor->hot_y;
95 if (cursor->resource_id > 0) {
96 vgc->update_cursor_data(g, s, cursor->resource_id);
98 dpy_cursor_define(s->con, s->current_cursor);
100 s->cursor = *cursor;
101 } else {
102 s->cursor.pos.x = cursor->pos.x;
103 s->cursor.pos.y = cursor->pos.y;
105 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y,
106 cursor->resource_id ? 1 : 0);
109 static struct virtio_gpu_simple_resource *
110 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
112 struct virtio_gpu_simple_resource *res;
114 QTAILQ_FOREACH(res, &g->reslist, next) {
115 if (res->resource_id == resource_id) {
116 return res;
119 return NULL;
122 static struct virtio_gpu_simple_resource *
123 virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id,
124 bool require_backing,
125 const char *caller, uint32_t *error)
127 struct virtio_gpu_simple_resource *res;
129 res = virtio_gpu_find_resource(g, resource_id);
130 if (!res) {
131 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid resource specified %d\n",
132 caller, resource_id);
133 if (error) {
134 *error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
136 return NULL;
139 if (require_backing) {
140 if (!res->iov || !res->image) {
141 qemu_log_mask(LOG_GUEST_ERROR, "%s: no backing storage %d\n",
142 caller, resource_id);
143 if (error) {
144 *error = VIRTIO_GPU_RESP_ERR_UNSPEC;
146 return NULL;
150 return res;
153 void virtio_gpu_ctrl_response(VirtIOGPU *g,
154 struct virtio_gpu_ctrl_command *cmd,
155 struct virtio_gpu_ctrl_hdr *resp,
156 size_t resp_len)
158 size_t s;
160 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
161 resp->flags |= VIRTIO_GPU_FLAG_FENCE;
162 resp->fence_id = cmd->cmd_hdr.fence_id;
163 resp->ctx_id = cmd->cmd_hdr.ctx_id;
165 virtio_gpu_ctrl_hdr_bswap(resp);
166 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
167 if (s != resp_len) {
168 qemu_log_mask(LOG_GUEST_ERROR,
169 "%s: response size incorrect %zu vs %zu\n",
170 __func__, s, resp_len);
172 virtqueue_push(cmd->vq, &cmd->elem, s);
173 virtio_notify(VIRTIO_DEVICE(g), cmd->vq);
174 cmd->finished = true;
177 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
178 struct virtio_gpu_ctrl_command *cmd,
179 enum virtio_gpu_ctrl_type type)
181 struct virtio_gpu_ctrl_hdr resp;
183 memset(&resp, 0, sizeof(resp));
184 resp.type = type;
185 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
188 void virtio_gpu_get_display_info(VirtIOGPU *g,
189 struct virtio_gpu_ctrl_command *cmd)
191 struct virtio_gpu_resp_display_info display_info;
193 trace_virtio_gpu_cmd_get_display_info();
194 memset(&display_info, 0, sizeof(display_info));
195 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
196 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
197 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
198 sizeof(display_info));
201 static void
202 virtio_gpu_generate_edid(VirtIOGPU *g, int scanout,
203 struct virtio_gpu_resp_edid *edid)
205 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
206 qemu_edid_info info = {
207 .width_mm = b->req_state[scanout].width_mm,
208 .height_mm = b->req_state[scanout].height_mm,
209 .prefx = b->req_state[scanout].width,
210 .prefy = b->req_state[scanout].height,
213 edid->size = cpu_to_le32(sizeof(edid->edid));
214 qemu_edid_generate(edid->edid, sizeof(edid->edid), &info);
217 void virtio_gpu_get_edid(VirtIOGPU *g,
218 struct virtio_gpu_ctrl_command *cmd)
220 struct virtio_gpu_resp_edid edid;
221 struct virtio_gpu_cmd_get_edid get_edid;
222 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
224 VIRTIO_GPU_FILL_CMD(get_edid);
225 virtio_gpu_bswap_32(&get_edid, sizeof(get_edid));
227 if (get_edid.scanout >= b->conf.max_outputs) {
228 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
229 return;
232 trace_virtio_gpu_cmd_get_edid(get_edid.scanout);
233 memset(&edid, 0, sizeof(edid));
234 edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID;
235 virtio_gpu_generate_edid(g, get_edid.scanout, &edid);
236 virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid));
239 static uint32_t calc_image_hostmem(pixman_format_code_t pformat,
240 uint32_t width, uint32_t height)
242 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check.
243 * pixman_image_create_bits will fail in case it overflow.
246 int bpp = PIXMAN_FORMAT_BPP(pformat);
247 int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t);
248 return height * stride;
251 static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
252 struct virtio_gpu_ctrl_command *cmd)
254 pixman_format_code_t pformat;
255 struct virtio_gpu_simple_resource *res;
256 struct virtio_gpu_resource_create_2d c2d;
258 VIRTIO_GPU_FILL_CMD(c2d);
259 virtio_gpu_bswap_32(&c2d, sizeof(c2d));
260 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
261 c2d.width, c2d.height);
263 if (c2d.resource_id == 0) {
264 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
265 __func__);
266 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
267 return;
270 res = virtio_gpu_find_resource(g, c2d.resource_id);
271 if (res) {
272 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
273 __func__, c2d.resource_id);
274 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
275 return;
278 res = g_new0(struct virtio_gpu_simple_resource, 1);
280 res->width = c2d.width;
281 res->height = c2d.height;
282 res->format = c2d.format;
283 res->resource_id = c2d.resource_id;
285 pformat = virtio_gpu_get_pixman_format(c2d.format);
286 if (!pformat) {
287 qemu_log_mask(LOG_GUEST_ERROR,
288 "%s: host couldn't handle guest format %d\n",
289 __func__, c2d.format);
290 g_free(res);
291 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
292 return;
295 res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height);
296 if (res->hostmem + g->hostmem < g->conf_max_hostmem) {
297 res->image = pixman_image_create_bits(pformat,
298 c2d.width,
299 c2d.height,
300 NULL, 0);
303 if (!res->image) {
304 qemu_log_mask(LOG_GUEST_ERROR,
305 "%s: resource creation failed %d %d %d\n",
306 __func__, c2d.resource_id, c2d.width, c2d.height);
307 g_free(res);
308 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
309 return;
312 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
313 g->hostmem += res->hostmem;
316 static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id)
318 struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id];
319 struct virtio_gpu_simple_resource *res;
321 if (scanout->resource_id == 0) {
322 return;
325 res = virtio_gpu_find_resource(g, scanout->resource_id);
326 if (res) {
327 res->scanout_bitmask &= ~(1 << scanout_id);
330 dpy_gfx_replace_surface(scanout->con, NULL);
331 scanout->resource_id = 0;
332 scanout->ds = NULL;
333 scanout->width = 0;
334 scanout->height = 0;
337 static void virtio_gpu_resource_destroy(VirtIOGPU *g,
338 struct virtio_gpu_simple_resource *res)
340 int i;
342 if (res->scanout_bitmask) {
343 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
344 if (res->scanout_bitmask & (1 << i)) {
345 virtio_gpu_disable_scanout(g, i);
350 pixman_image_unref(res->image);
351 virtio_gpu_cleanup_mapping(g, res);
352 QTAILQ_REMOVE(&g->reslist, res, next);
353 g->hostmem -= res->hostmem;
354 g_free(res);
357 static void virtio_gpu_resource_unref(VirtIOGPU *g,
358 struct virtio_gpu_ctrl_command *cmd)
360 struct virtio_gpu_simple_resource *res;
361 struct virtio_gpu_resource_unref unref;
363 VIRTIO_GPU_FILL_CMD(unref);
364 virtio_gpu_bswap_32(&unref, sizeof(unref));
365 trace_virtio_gpu_cmd_res_unref(unref.resource_id);
367 res = virtio_gpu_find_resource(g, unref.resource_id);
368 if (!res) {
369 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
370 __func__, unref.resource_id);
371 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
372 return;
374 virtio_gpu_resource_destroy(g, res);
377 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
378 struct virtio_gpu_ctrl_command *cmd)
380 struct virtio_gpu_simple_resource *res;
381 int h;
382 uint32_t src_offset, dst_offset, stride;
383 int bpp;
384 pixman_format_code_t format;
385 struct virtio_gpu_transfer_to_host_2d t2d;
387 VIRTIO_GPU_FILL_CMD(t2d);
388 virtio_gpu_t2d_bswap(&t2d);
389 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
391 res = virtio_gpu_find_check_resource(g, t2d.resource_id, true,
392 __func__, &cmd->error);
393 if (!res) {
394 return;
397 if (t2d.r.x > res->width ||
398 t2d.r.y > res->height ||
399 t2d.r.width > res->width ||
400 t2d.r.height > res->height ||
401 t2d.r.x + t2d.r.width > res->width ||
402 t2d.r.y + t2d.r.height > res->height) {
403 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource"
404 " bounds for resource %d: %d %d %d %d vs %d %d\n",
405 __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
406 t2d.r.width, t2d.r.height, res->width, res->height);
407 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
408 return;
411 format = pixman_image_get_format(res->image);
412 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
413 stride = pixman_image_get_stride(res->image);
415 if (t2d.offset || t2d.r.x || t2d.r.y ||
416 t2d.r.width != pixman_image_get_width(res->image)) {
417 void *img_data = pixman_image_get_data(res->image);
418 for (h = 0; h < t2d.r.height; h++) {
419 src_offset = t2d.offset + stride * h;
420 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
422 iov_to_buf(res->iov, res->iov_cnt, src_offset,
423 (uint8_t *)img_data
424 + dst_offset, t2d.r.width * bpp);
426 } else {
427 iov_to_buf(res->iov, res->iov_cnt, 0,
428 pixman_image_get_data(res->image),
429 pixman_image_get_stride(res->image)
430 * pixman_image_get_height(res->image));
434 static void virtio_gpu_resource_flush(VirtIOGPU *g,
435 struct virtio_gpu_ctrl_command *cmd)
437 struct virtio_gpu_simple_resource *res;
438 struct virtio_gpu_resource_flush rf;
439 pixman_region16_t flush_region;
440 int i;
442 VIRTIO_GPU_FILL_CMD(rf);
443 virtio_gpu_bswap_32(&rf, sizeof(rf));
444 trace_virtio_gpu_cmd_res_flush(rf.resource_id,
445 rf.r.width, rf.r.height, rf.r.x, rf.r.y);
447 res = virtio_gpu_find_check_resource(g, rf.resource_id, false,
448 __func__, &cmd->error);
449 if (!res) {
450 return;
453 if (rf.r.x > res->width ||
454 rf.r.y > res->height ||
455 rf.r.width > res->width ||
456 rf.r.height > res->height ||
457 rf.r.x + rf.r.width > res->width ||
458 rf.r.y + rf.r.height > res->height) {
459 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource"
460 " bounds for resource %d: %d %d %d %d vs %d %d\n",
461 __func__, rf.resource_id, rf.r.x, rf.r.y,
462 rf.r.width, rf.r.height, res->width, res->height);
463 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
464 return;
467 pixman_region_init_rect(&flush_region,
468 rf.r.x, rf.r.y, rf.r.width, rf.r.height);
469 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
470 struct virtio_gpu_scanout *scanout;
471 pixman_region16_t region, finalregion;
472 pixman_box16_t *extents;
474 if (!(res->scanout_bitmask & (1 << i))) {
475 continue;
477 scanout = &g->parent_obj.scanout[i];
479 pixman_region_init(&finalregion);
480 pixman_region_init_rect(&region, scanout->x, scanout->y,
481 scanout->width, scanout->height);
483 pixman_region_intersect(&finalregion, &flush_region, &region);
484 pixman_region_translate(&finalregion, -scanout->x, -scanout->y);
485 extents = pixman_region_extents(&finalregion);
486 /* work out the area we need to update for each console */
487 dpy_gfx_update(g->parent_obj.scanout[i].con,
488 extents->x1, extents->y1,
489 extents->x2 - extents->x1,
490 extents->y2 - extents->y1);
492 pixman_region_fini(&region);
493 pixman_region_fini(&finalregion);
495 pixman_region_fini(&flush_region);
498 static void virtio_unref_resource(pixman_image_t *image, void *data)
500 pixman_image_unref(data);
503 static void virtio_gpu_do_set_scanout(VirtIOGPU *g,
504 uint32_t scanout_id,
505 struct virtio_gpu_framebuffer *fb,
506 struct virtio_gpu_simple_resource *res,
507 struct virtio_gpu_rect *r,
508 uint32_t *error)
510 struct virtio_gpu_simple_resource *ores;
511 struct virtio_gpu_scanout *scanout;
512 uint8_t *data;
514 if (scanout_id >= g->parent_obj.conf.max_outputs) {
515 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
516 __func__, scanout_id);
517 *error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
518 return;
520 scanout = &g->parent_obj.scanout[scanout_id];
522 if (r->x > fb->width ||
523 r->y > fb->height ||
524 r->width < 16 ||
525 r->height < 16 ||
526 r->width > fb->width ||
527 r->height > fb->height ||
528 r->x + r->width > fb->width ||
529 r->y + r->height > fb->height) {
530 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
531 " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n",
532 __func__, scanout_id, res->resource_id,
533 r->x, r->y, r->width, r->height,
534 fb->width, fb->height);
535 *error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
536 return;
539 g->parent_obj.enable = 1;
540 data = (uint8_t *)pixman_image_get_data(res->image);
542 /* create a surface for this scanout */
543 if (!scanout->ds ||
544 surface_data(scanout->ds) != data + fb->offset ||
545 scanout->width != r->width ||
546 scanout->height != r->height) {
547 pixman_image_t *rect;
548 void *ptr = data + fb->offset;
549 rect = pixman_image_create_bits(fb->format, r->width, r->height,
550 ptr, fb->stride);
552 if (res->image) {
553 pixman_image_ref(res->image);
554 pixman_image_set_destroy_function(rect, virtio_unref_resource,
555 res->image);
558 /* realloc the surface ptr */
559 scanout->ds = qemu_create_displaysurface_pixman(rect);
560 if (!scanout->ds) {
561 *error = VIRTIO_GPU_RESP_ERR_UNSPEC;
562 return;
565 pixman_image_unref(rect);
566 dpy_gfx_replace_surface(g->parent_obj.scanout[scanout_id].con,
567 scanout->ds);
570 ores = virtio_gpu_find_resource(g, scanout->resource_id);
571 if (ores) {
572 ores->scanout_bitmask &= ~(1 << scanout_id);
575 res->scanout_bitmask |= (1 << scanout_id);
576 scanout->resource_id = res->resource_id;
577 scanout->x = r->x;
578 scanout->y = r->y;
579 scanout->width = r->width;
580 scanout->height = r->height;
583 static void virtio_gpu_set_scanout(VirtIOGPU *g,
584 struct virtio_gpu_ctrl_command *cmd)
586 struct virtio_gpu_simple_resource *res;
587 struct virtio_gpu_framebuffer fb = { 0 };
588 struct virtio_gpu_set_scanout ss;
590 VIRTIO_GPU_FILL_CMD(ss);
591 virtio_gpu_bswap_32(&ss, sizeof(ss));
592 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
593 ss.r.width, ss.r.height, ss.r.x, ss.r.y);
595 if (ss.resource_id == 0) {
596 virtio_gpu_disable_scanout(g, ss.scanout_id);
597 return;
600 res = virtio_gpu_find_check_resource(g, ss.resource_id, true,
601 __func__, &cmd->error);
602 if (!res) {
603 return;
606 fb.format = pixman_image_get_format(res->image);
607 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8);
608 fb.width = pixman_image_get_width(res->image);
609 fb.height = pixman_image_get_height(res->image);
610 fb.stride = pixman_image_get_stride(res->image);
611 fb.offset = ss.r.x * fb.bytes_pp + ss.r.y * fb.stride;
613 virtio_gpu_do_set_scanout(g, ss.scanout_id,
614 &fb, res, &ss.r, &cmd->error);
617 int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
618 struct virtio_gpu_resource_attach_backing *ab,
619 struct virtio_gpu_ctrl_command *cmd,
620 uint64_t **addr, struct iovec **iov,
621 uint32_t *niov)
623 struct virtio_gpu_mem_entry *ents;
624 size_t esize, s;
625 int e, v;
627 if (ab->nr_entries > 16384) {
628 qemu_log_mask(LOG_GUEST_ERROR,
629 "%s: nr_entries is too big (%d > 16384)\n",
630 __func__, ab->nr_entries);
631 return -1;
634 esize = sizeof(*ents) * ab->nr_entries;
635 ents = g_malloc(esize);
636 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
637 sizeof(*ab), ents, esize);
638 if (s != esize) {
639 qemu_log_mask(LOG_GUEST_ERROR,
640 "%s: command data size incorrect %zu vs %zu\n",
641 __func__, s, esize);
642 g_free(ents);
643 return -1;
646 *iov = NULL;
647 if (addr) {
648 *addr = NULL;
650 for (e = 0, v = 0; e < ab->nr_entries; e++) {
651 uint64_t a = le64_to_cpu(ents[e].addr);
652 uint32_t l = le32_to_cpu(ents[e].length);
653 hwaddr len;
654 void *map;
656 do {
657 len = l;
658 map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as,
659 a, &len, DMA_DIRECTION_TO_DEVICE);
660 if (!map) {
661 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
662 " resource %d element %d\n",
663 __func__, ab->resource_id, e);
664 virtio_gpu_cleanup_mapping_iov(g, *iov, v);
665 g_free(ents);
666 *iov = NULL;
667 if (addr) {
668 g_free(*addr);
669 *addr = NULL;
671 return -1;
674 if (!(v % 16)) {
675 *iov = g_realloc(*iov, sizeof(struct iovec) * (v + 16));
676 if (addr) {
677 *addr = g_realloc(*addr, sizeof(uint64_t) * (v + 16));
680 (*iov)[v].iov_base = map;
681 (*iov)[v].iov_len = len;
682 if (addr) {
683 (*addr)[v] = a;
686 a += len;
687 l -= len;
688 v += 1;
689 } while (l > 0);
691 *niov = v;
693 g_free(ents);
694 return 0;
697 void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g,
698 struct iovec *iov, uint32_t count)
700 int i;
702 for (i = 0; i < count; i++) {
703 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
704 iov[i].iov_base, iov[i].iov_len,
705 DMA_DIRECTION_TO_DEVICE,
706 iov[i].iov_len);
708 g_free(iov);
711 static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
712 struct virtio_gpu_simple_resource *res)
714 virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt);
715 res->iov = NULL;
716 res->iov_cnt = 0;
717 g_free(res->addrs);
718 res->addrs = NULL;
721 static void
722 virtio_gpu_resource_attach_backing(VirtIOGPU *g,
723 struct virtio_gpu_ctrl_command *cmd)
725 struct virtio_gpu_simple_resource *res;
726 struct virtio_gpu_resource_attach_backing ab;
727 int ret;
729 VIRTIO_GPU_FILL_CMD(ab);
730 virtio_gpu_bswap_32(&ab, sizeof(ab));
731 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
733 res = virtio_gpu_find_resource(g, ab.resource_id);
734 if (!res) {
735 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
736 __func__, ab.resource_id);
737 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
738 return;
741 if (res->iov) {
742 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
743 return;
746 ret = virtio_gpu_create_mapping_iov(g, &ab, cmd, &res->addrs,
747 &res->iov, &res->iov_cnt);
748 if (ret != 0) {
749 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
750 return;
754 static void
755 virtio_gpu_resource_detach_backing(VirtIOGPU *g,
756 struct virtio_gpu_ctrl_command *cmd)
758 struct virtio_gpu_simple_resource *res;
759 struct virtio_gpu_resource_detach_backing detach;
761 VIRTIO_GPU_FILL_CMD(detach);
762 virtio_gpu_bswap_32(&detach, sizeof(detach));
763 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id);
765 res = virtio_gpu_find_check_resource(g, detach.resource_id, true,
766 __func__, &cmd->error);
767 if (!res) {
768 return;
770 virtio_gpu_cleanup_mapping(g, res);
773 void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
774 struct virtio_gpu_ctrl_command *cmd)
776 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
777 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
779 switch (cmd->cmd_hdr.type) {
780 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
781 virtio_gpu_get_display_info(g, cmd);
782 break;
783 case VIRTIO_GPU_CMD_GET_EDID:
784 virtio_gpu_get_edid(g, cmd);
785 break;
786 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
787 virtio_gpu_resource_create_2d(g, cmd);
788 break;
789 case VIRTIO_GPU_CMD_RESOURCE_UNREF:
790 virtio_gpu_resource_unref(g, cmd);
791 break;
792 case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
793 virtio_gpu_resource_flush(g, cmd);
794 break;
795 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
796 virtio_gpu_transfer_to_host_2d(g, cmd);
797 break;
798 case VIRTIO_GPU_CMD_SET_SCANOUT:
799 virtio_gpu_set_scanout(g, cmd);
800 break;
801 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
802 virtio_gpu_resource_attach_backing(g, cmd);
803 break;
804 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
805 virtio_gpu_resource_detach_backing(g, cmd);
806 break;
807 default:
808 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
809 break;
811 if (!cmd->finished) {
812 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
813 VIRTIO_GPU_RESP_OK_NODATA);
817 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq)
819 VirtIOGPU *g = VIRTIO_GPU(vdev);
820 qemu_bh_schedule(g->ctrl_bh);
823 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq)
825 VirtIOGPU *g = VIRTIO_GPU(vdev);
826 qemu_bh_schedule(g->cursor_bh);
829 void virtio_gpu_process_cmdq(VirtIOGPU *g)
831 struct virtio_gpu_ctrl_command *cmd;
832 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
834 if (g->processing_cmdq) {
835 return;
837 g->processing_cmdq = true;
838 while (!QTAILQ_EMPTY(&g->cmdq)) {
839 cmd = QTAILQ_FIRST(&g->cmdq);
841 if (g->parent_obj.renderer_blocked) {
842 break;
845 /* process command */
846 vgc->process_cmd(g, cmd);
848 QTAILQ_REMOVE(&g->cmdq, cmd, next);
849 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
850 g->stats.requests++;
853 if (!cmd->finished) {
854 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
855 g->inflight++;
856 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
857 if (g->stats.max_inflight < g->inflight) {
858 g->stats.max_inflight = g->inflight;
860 fprintf(stderr, "inflight: %3d (+)\r", g->inflight);
862 } else {
863 g_free(cmd);
866 g->processing_cmdq = false;
869 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
871 VirtIOGPU *g = VIRTIO_GPU(vdev);
872 struct virtio_gpu_ctrl_command *cmd;
874 if (!virtio_queue_ready(vq)) {
875 return;
878 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
879 while (cmd) {
880 cmd->vq = vq;
881 cmd->error = 0;
882 cmd->finished = false;
883 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
884 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
887 virtio_gpu_process_cmdq(g);
890 static void virtio_gpu_ctrl_bh(void *opaque)
892 VirtIOGPU *g = opaque;
893 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
895 vgc->handle_ctrl(&g->parent_obj.parent_obj, g->ctrl_vq);
898 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
900 VirtIOGPU *g = VIRTIO_GPU(vdev);
901 VirtQueueElement *elem;
902 size_t s;
903 struct virtio_gpu_update_cursor cursor_info;
905 if (!virtio_queue_ready(vq)) {
906 return;
908 for (;;) {
909 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
910 if (!elem) {
911 break;
914 s = iov_to_buf(elem->out_sg, elem->out_num, 0,
915 &cursor_info, sizeof(cursor_info));
916 if (s != sizeof(cursor_info)) {
917 qemu_log_mask(LOG_GUEST_ERROR,
918 "%s: cursor size incorrect %zu vs %zu\n",
919 __func__, s, sizeof(cursor_info));
920 } else {
921 virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info));
922 update_cursor(g, &cursor_info);
924 virtqueue_push(vq, elem, 0);
925 virtio_notify(vdev, vq);
926 g_free(elem);
930 static void virtio_gpu_cursor_bh(void *opaque)
932 VirtIOGPU *g = opaque;
933 virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq);
936 static const VMStateDescription vmstate_virtio_gpu_scanout = {
937 .name = "virtio-gpu-one-scanout",
938 .version_id = 1,
939 .fields = (VMStateField[]) {
940 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout),
941 VMSTATE_UINT32(width, struct virtio_gpu_scanout),
942 VMSTATE_UINT32(height, struct virtio_gpu_scanout),
943 VMSTATE_INT32(x, struct virtio_gpu_scanout),
944 VMSTATE_INT32(y, struct virtio_gpu_scanout),
945 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout),
946 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout),
947 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout),
948 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout),
949 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout),
950 VMSTATE_END_OF_LIST()
954 static const VMStateDescription vmstate_virtio_gpu_scanouts = {
955 .name = "virtio-gpu-scanouts",
956 .version_id = 1,
957 .fields = (VMStateField[]) {
958 VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU),
959 VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs,
960 struct VirtIOGPU, NULL),
961 VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU,
962 parent_obj.conf.max_outputs, 1,
963 vmstate_virtio_gpu_scanout,
964 struct virtio_gpu_scanout),
965 VMSTATE_END_OF_LIST()
969 static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size,
970 const VMStateField *field, JSONWriter *vmdesc)
972 VirtIOGPU *g = opaque;
973 struct virtio_gpu_simple_resource *res;
974 int i;
976 /* in 2d mode we should never find unprocessed commands here */
977 assert(QTAILQ_EMPTY(&g->cmdq));
979 QTAILQ_FOREACH(res, &g->reslist, next) {
980 qemu_put_be32(f, res->resource_id);
981 qemu_put_be32(f, res->width);
982 qemu_put_be32(f, res->height);
983 qemu_put_be32(f, res->format);
984 qemu_put_be32(f, res->iov_cnt);
985 for (i = 0; i < res->iov_cnt; i++) {
986 qemu_put_be64(f, res->addrs[i]);
987 qemu_put_be32(f, res->iov[i].iov_len);
989 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image),
990 pixman_image_get_stride(res->image) * res->height);
992 qemu_put_be32(f, 0); /* end of list */
994 return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL);
997 static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
998 const VMStateField *field)
1000 VirtIOGPU *g = opaque;
1001 struct virtio_gpu_simple_resource *res;
1002 struct virtio_gpu_scanout *scanout;
1003 uint32_t resource_id, pformat;
1004 int i;
1006 g->hostmem = 0;
1008 resource_id = qemu_get_be32(f);
1009 while (resource_id != 0) {
1010 res = virtio_gpu_find_resource(g, resource_id);
1011 if (res) {
1012 return -EINVAL;
1015 res = g_new0(struct virtio_gpu_simple_resource, 1);
1016 res->resource_id = resource_id;
1017 res->width = qemu_get_be32(f);
1018 res->height = qemu_get_be32(f);
1019 res->format = qemu_get_be32(f);
1020 res->iov_cnt = qemu_get_be32(f);
1022 /* allocate */
1023 pformat = virtio_gpu_get_pixman_format(res->format);
1024 if (!pformat) {
1025 g_free(res);
1026 return -EINVAL;
1028 res->image = pixman_image_create_bits(pformat,
1029 res->width, res->height,
1030 NULL, 0);
1031 if (!res->image) {
1032 g_free(res);
1033 return -EINVAL;
1036 res->hostmem = calc_image_hostmem(pformat, res->width, res->height);
1038 res->addrs = g_new(uint64_t, res->iov_cnt);
1039 res->iov = g_new(struct iovec, res->iov_cnt);
1041 /* read data */
1042 for (i = 0; i < res->iov_cnt; i++) {
1043 res->addrs[i] = qemu_get_be64(f);
1044 res->iov[i].iov_len = qemu_get_be32(f);
1046 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image),
1047 pixman_image_get_stride(res->image) * res->height);
1049 /* restore mapping */
1050 for (i = 0; i < res->iov_cnt; i++) {
1051 hwaddr len = res->iov[i].iov_len;
1052 res->iov[i].iov_base =
1053 dma_memory_map(VIRTIO_DEVICE(g)->dma_as,
1054 res->addrs[i], &len, DMA_DIRECTION_TO_DEVICE);
1056 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
1057 /* Clean up the half-a-mapping we just created... */
1058 if (res->iov[i].iov_base) {
1059 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
1060 res->iov[i].iov_base,
1061 len,
1062 DMA_DIRECTION_TO_DEVICE,
1065 /* ...and the mappings for previous loop iterations */
1066 res->iov_cnt = i;
1067 virtio_gpu_cleanup_mapping(g, res);
1068 pixman_image_unref(res->image);
1069 g_free(res);
1070 return -EINVAL;
1074 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
1075 g->hostmem += res->hostmem;
1077 resource_id = qemu_get_be32(f);
1080 /* load & apply scanout state */
1081 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
1082 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
1083 scanout = &g->parent_obj.scanout[i];
1084 if (!scanout->resource_id) {
1085 continue;
1087 res = virtio_gpu_find_resource(g, scanout->resource_id);
1088 if (!res) {
1089 return -EINVAL;
1091 scanout->ds = qemu_create_displaysurface_pixman(res->image);
1092 if (!scanout->ds) {
1093 return -EINVAL;
1096 dpy_gfx_replace_surface(scanout->con, scanout->ds);
1097 dpy_gfx_update_full(scanout->con);
1098 if (scanout->cursor.resource_id) {
1099 update_cursor(g, &scanout->cursor);
1101 res->scanout_bitmask |= (1 << i);
1104 return 0;
1107 void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
1109 VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
1110 VirtIOGPU *g = VIRTIO_GPU(qdev);
1112 if (!virtio_gpu_base_device_realize(qdev,
1113 virtio_gpu_handle_ctrl_cb,
1114 virtio_gpu_handle_cursor_cb,
1115 errp)) {
1116 return;
1119 g->ctrl_vq = virtio_get_queue(vdev, 0);
1120 g->cursor_vq = virtio_get_queue(vdev, 1);
1121 g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g);
1122 g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g);
1123 QTAILQ_INIT(&g->reslist);
1124 QTAILQ_INIT(&g->cmdq);
1125 QTAILQ_INIT(&g->fenceq);
1128 void virtio_gpu_reset(VirtIODevice *vdev)
1130 VirtIOGPU *g = VIRTIO_GPU(vdev);
1131 struct virtio_gpu_simple_resource *res, *tmp;
1132 struct virtio_gpu_ctrl_command *cmd;
1134 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
1135 virtio_gpu_resource_destroy(g, res);
1138 while (!QTAILQ_EMPTY(&g->cmdq)) {
1139 cmd = QTAILQ_FIRST(&g->cmdq);
1140 QTAILQ_REMOVE(&g->cmdq, cmd, next);
1141 g_free(cmd);
1144 while (!QTAILQ_EMPTY(&g->fenceq)) {
1145 cmd = QTAILQ_FIRST(&g->fenceq);
1146 QTAILQ_REMOVE(&g->fenceq, cmd, next);
1147 g->inflight--;
1148 g_free(cmd);
1151 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
1154 static void
1155 virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
1157 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
1159 memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
1162 static void
1163 virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
1165 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
1166 const struct virtio_gpu_config *vgconfig =
1167 (const struct virtio_gpu_config *)config;
1169 if (vgconfig->events_clear) {
1170 g->virtio_config.events_read &= ~vgconfig->events_clear;
1175 * For historical reasons virtio_gpu does not adhere to virtio migration
1176 * scheme as described in doc/virtio-migration.txt, in a sense that no
1177 * save/load callback are provided to the core. Instead the device data
1178 * is saved/loaded after the core data.
1180 * Because of this we need a special vmsd.
1182 static const VMStateDescription vmstate_virtio_gpu = {
1183 .name = "virtio-gpu",
1184 .minimum_version_id = VIRTIO_GPU_VM_VERSION,
1185 .version_id = VIRTIO_GPU_VM_VERSION,
1186 .fields = (VMStateField[]) {
1187 VMSTATE_VIRTIO_DEVICE /* core */,
1189 .name = "virtio-gpu",
1190 .info = &(const VMStateInfo) {
1191 .name = "virtio-gpu",
1192 .get = virtio_gpu_load,
1193 .put = virtio_gpu_save,
1195 .flags = VMS_SINGLE,
1196 } /* device */,
1197 VMSTATE_END_OF_LIST()
1201 static Property virtio_gpu_properties[] = {
1202 VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf),
1203 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem,
1204 256 * MiB),
1205 DEFINE_PROP_END_OF_LIST(),
1208 static void virtio_gpu_class_init(ObjectClass *klass, void *data)
1210 DeviceClass *dc = DEVICE_CLASS(klass);
1211 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1212 VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass);
1214 vgc->handle_ctrl = virtio_gpu_handle_ctrl;
1215 vgc->process_cmd = virtio_gpu_simple_process_cmd;
1216 vgc->update_cursor_data = virtio_gpu_update_cursor_data;
1218 vdc->realize = virtio_gpu_device_realize;
1219 vdc->reset = virtio_gpu_reset;
1220 vdc->get_config = virtio_gpu_get_config;
1221 vdc->set_config = virtio_gpu_set_config;
1223 dc->vmsd = &vmstate_virtio_gpu;
1224 device_class_set_props(dc, virtio_gpu_properties);
1227 static const TypeInfo virtio_gpu_info = {
1228 .name = TYPE_VIRTIO_GPU,
1229 .parent = TYPE_VIRTIO_GPU_BASE,
1230 .instance_size = sizeof(VirtIOGPU),
1231 .class_size = sizeof(VirtIOGPUClass),
1232 .class_init = virtio_gpu_class_init,
1235 static void virtio_register_types(void)
1237 type_register_static(&virtio_gpu_info);
1240 type_init(virtio_register_types)