net: optimize checksum computation
[qemu/ar7.git] / hw / display / virtio-gpu-3d.c
blobf96a0c2e590d803c5a7b9b7fa86e0db40b5a29f1
1 /*
2 * Virtio GPU Device
4 * Copyright Red Hat, Inc. 2013-2014
6 * Authors:
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu-common.h"
16 #include "qemu/iov.h"
17 #include "trace.h"
18 #include "hw/virtio/virtio.h"
19 #include "hw/virtio/virtio-gpu.h"
20 #include "qapi/error.h"
22 #ifdef CONFIG_VIRGL
24 #include <virglrenderer.h>
26 static struct virgl_renderer_callbacks virtio_gpu_3d_cbs;
28 static void virgl_cmd_create_resource_2d(VirtIOGPU *g,
29 struct virtio_gpu_ctrl_command *cmd)
31 struct virtio_gpu_resource_create_2d c2d;
32 struct virgl_renderer_resource_create_args args;
34 VIRTIO_GPU_FILL_CMD(c2d);
35 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
36 c2d.width, c2d.height);
38 args.handle = c2d.resource_id;
39 args.target = 2;
40 args.format = c2d.format;
41 args.bind = (1 << 1);
42 args.width = c2d.width;
43 args.height = c2d.height;
44 args.depth = 1;
45 args.array_size = 1;
46 args.last_level = 0;
47 args.nr_samples = 0;
48 args.flags = VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP;
49 virgl_renderer_resource_create(&args, NULL, 0);
52 static void virgl_cmd_create_resource_3d(VirtIOGPU *g,
53 struct virtio_gpu_ctrl_command *cmd)
55 struct virtio_gpu_resource_create_3d c3d;
56 struct virgl_renderer_resource_create_args args;
58 VIRTIO_GPU_FILL_CMD(c3d);
59 trace_virtio_gpu_cmd_res_create_3d(c3d.resource_id, c3d.format,
60 c3d.width, c3d.height, c3d.depth);
62 args.handle = c3d.resource_id;
63 args.target = c3d.target;
64 args.format = c3d.format;
65 args.bind = c3d.bind;
66 args.width = c3d.width;
67 args.height = c3d.height;
68 args.depth = c3d.depth;
69 args.array_size = c3d.array_size;
70 args.last_level = c3d.last_level;
71 args.nr_samples = c3d.nr_samples;
72 args.flags = c3d.flags;
73 virgl_renderer_resource_create(&args, NULL, 0);
76 static void virgl_cmd_resource_unref(VirtIOGPU *g,
77 struct virtio_gpu_ctrl_command *cmd)
79 struct virtio_gpu_resource_unref unref;
81 VIRTIO_GPU_FILL_CMD(unref);
82 trace_virtio_gpu_cmd_res_unref(unref.resource_id);
84 virgl_renderer_resource_unref(unref.resource_id);
87 static void virgl_cmd_context_create(VirtIOGPU *g,
88 struct virtio_gpu_ctrl_command *cmd)
90 struct virtio_gpu_ctx_create cc;
92 VIRTIO_GPU_FILL_CMD(cc);
93 trace_virtio_gpu_cmd_ctx_create(cc.hdr.ctx_id,
94 cc.debug_name);
96 virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen,
97 cc.debug_name);
100 static void virgl_cmd_context_destroy(VirtIOGPU *g,
101 struct virtio_gpu_ctrl_command *cmd)
103 struct virtio_gpu_ctx_destroy cd;
105 VIRTIO_GPU_FILL_CMD(cd);
106 trace_virtio_gpu_cmd_ctx_destroy(cd.hdr.ctx_id);
108 virgl_renderer_context_destroy(cd.hdr.ctx_id);
111 static void virtio_gpu_rect_update(VirtIOGPU *g, int idx, int x, int y,
112 int width, int height)
114 if (!g->scanout[idx].con) {
115 return;
118 dpy_gl_update(g->scanout[idx].con, x, y, width, height);
121 static void virgl_cmd_resource_flush(VirtIOGPU *g,
122 struct virtio_gpu_ctrl_command *cmd)
124 struct virtio_gpu_resource_flush rf;
125 int i;
127 VIRTIO_GPU_FILL_CMD(rf);
128 trace_virtio_gpu_cmd_res_flush(rf.resource_id,
129 rf.r.width, rf.r.height, rf.r.x, rf.r.y);
131 for (i = 0; i < g->conf.max_outputs; i++) {
132 if (g->scanout[i].resource_id != rf.resource_id) {
133 continue;
135 virtio_gpu_rect_update(g, i, rf.r.x, rf.r.y, rf.r.width, rf.r.height);
139 static void virgl_cmd_set_scanout(VirtIOGPU *g,
140 struct virtio_gpu_ctrl_command *cmd)
142 struct virtio_gpu_set_scanout ss;
143 struct virgl_renderer_resource_info info;
144 int ret;
146 VIRTIO_GPU_FILL_CMD(ss);
147 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
148 ss.r.width, ss.r.height, ss.r.x, ss.r.y);
150 if (ss.scanout_id >= g->conf.max_outputs) {
151 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
152 __func__, ss.scanout_id);
153 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
154 return;
156 g->enable = 1;
158 memset(&info, 0, sizeof(info));
160 if (ss.resource_id && ss.r.width && ss.r.height) {
161 ret = virgl_renderer_resource_get_info(ss.resource_id, &info);
162 if (ret == -1) {
163 qemu_log_mask(LOG_GUEST_ERROR,
164 "%s: illegal resource specified %d\n",
165 __func__, ss.resource_id);
166 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
167 return;
169 qemu_console_resize(g->scanout[ss.scanout_id].con,
170 ss.r.width, ss.r.height);
171 virgl_renderer_force_ctx_0();
172 dpy_gl_scanout(g->scanout[ss.scanout_id].con, info.tex_id,
173 info.flags & 1 /* FIXME: Y_0_TOP */,
174 info.width, info.height,
175 ss.r.x, ss.r.y, ss.r.width, ss.r.height);
176 } else {
177 if (ss.scanout_id != 0) {
178 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL);
180 dpy_gl_scanout(g->scanout[ss.scanout_id].con, 0, false,
181 0, 0, 0, 0, 0, 0);
183 g->scanout[ss.scanout_id].resource_id = ss.resource_id;
186 static void virgl_cmd_submit_3d(VirtIOGPU *g,
187 struct virtio_gpu_ctrl_command *cmd)
189 struct virtio_gpu_cmd_submit cs;
190 void *buf;
191 size_t s;
193 VIRTIO_GPU_FILL_CMD(cs);
194 trace_virtio_gpu_cmd_ctx_submit(cs.hdr.ctx_id, cs.size);
196 buf = g_malloc(cs.size);
197 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
198 sizeof(cs), buf, cs.size);
199 if (s != cs.size) {
200 qemu_log_mask(LOG_GUEST_ERROR, "%s: size mismatch (%zd/%d)",
201 __func__, s, cs.size);
202 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
203 goto out;
206 if (virtio_gpu_stats_enabled(g->conf)) {
207 g->stats.req_3d++;
208 g->stats.bytes_3d += cs.size;
211 virgl_renderer_submit_cmd(buf, cs.hdr.ctx_id, cs.size / 4);
213 out:
214 g_free(buf);
217 static void virgl_cmd_transfer_to_host_2d(VirtIOGPU *g,
218 struct virtio_gpu_ctrl_command *cmd)
220 struct virtio_gpu_transfer_to_host_2d t2d;
221 struct virtio_gpu_box box;
223 VIRTIO_GPU_FILL_CMD(t2d);
224 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
226 box.x = t2d.r.x;
227 box.y = t2d.r.y;
228 box.z = 0;
229 box.w = t2d.r.width;
230 box.h = t2d.r.height;
231 box.d = 1;
233 virgl_renderer_transfer_write_iov(t2d.resource_id,
238 (struct virgl_box *)&box,
239 t2d.offset, NULL, 0);
242 static void virgl_cmd_transfer_to_host_3d(VirtIOGPU *g,
243 struct virtio_gpu_ctrl_command *cmd)
245 struct virtio_gpu_transfer_host_3d t3d;
247 VIRTIO_GPU_FILL_CMD(t3d);
248 trace_virtio_gpu_cmd_res_xfer_toh_3d(t3d.resource_id);
250 virgl_renderer_transfer_write_iov(t3d.resource_id,
251 t3d.hdr.ctx_id,
252 t3d.level,
253 t3d.stride,
254 t3d.layer_stride,
255 (struct virgl_box *)&t3d.box,
256 t3d.offset, NULL, 0);
259 static void
260 virgl_cmd_transfer_from_host_3d(VirtIOGPU *g,
261 struct virtio_gpu_ctrl_command *cmd)
263 struct virtio_gpu_transfer_host_3d tf3d;
265 VIRTIO_GPU_FILL_CMD(tf3d);
266 trace_virtio_gpu_cmd_res_xfer_fromh_3d(tf3d.resource_id);
268 virgl_renderer_transfer_read_iov(tf3d.resource_id,
269 tf3d.hdr.ctx_id,
270 tf3d.level,
271 tf3d.stride,
272 tf3d.layer_stride,
273 (struct virgl_box *)&tf3d.box,
274 tf3d.offset, NULL, 0);
278 static void virgl_resource_attach_backing(VirtIOGPU *g,
279 struct virtio_gpu_ctrl_command *cmd)
281 struct virtio_gpu_resource_attach_backing att_rb;
282 struct iovec *res_iovs;
283 int ret;
285 VIRTIO_GPU_FILL_CMD(att_rb);
286 trace_virtio_gpu_cmd_res_back_attach(att_rb.resource_id);
288 ret = virtio_gpu_create_mapping_iov(&att_rb, cmd, NULL, &res_iovs);
289 if (ret != 0) {
290 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
291 return;
294 ret = virgl_renderer_resource_attach_iov(att_rb.resource_id,
295 res_iovs, att_rb.nr_entries);
297 if (ret != 0)
298 virtio_gpu_cleanup_mapping_iov(res_iovs, att_rb.nr_entries);
301 static void virgl_resource_detach_backing(VirtIOGPU *g,
302 struct virtio_gpu_ctrl_command *cmd)
304 struct virtio_gpu_resource_detach_backing detach_rb;
305 struct iovec *res_iovs = NULL;
306 int num_iovs = 0;
308 VIRTIO_GPU_FILL_CMD(detach_rb);
309 trace_virtio_gpu_cmd_res_back_detach(detach_rb.resource_id);
311 virgl_renderer_resource_detach_iov(detach_rb.resource_id,
312 &res_iovs,
313 &num_iovs);
314 if (res_iovs == NULL || num_iovs == 0) {
315 return;
317 virtio_gpu_cleanup_mapping_iov(res_iovs, num_iovs);
321 static void virgl_cmd_ctx_attach_resource(VirtIOGPU *g,
322 struct virtio_gpu_ctrl_command *cmd)
324 struct virtio_gpu_ctx_resource att_res;
326 VIRTIO_GPU_FILL_CMD(att_res);
327 trace_virtio_gpu_cmd_ctx_res_attach(att_res.hdr.ctx_id,
328 att_res.resource_id);
330 virgl_renderer_ctx_attach_resource(att_res.hdr.ctx_id, att_res.resource_id);
333 static void virgl_cmd_ctx_detach_resource(VirtIOGPU *g,
334 struct virtio_gpu_ctrl_command *cmd)
336 struct virtio_gpu_ctx_resource det_res;
338 VIRTIO_GPU_FILL_CMD(det_res);
339 trace_virtio_gpu_cmd_ctx_res_detach(det_res.hdr.ctx_id,
340 det_res.resource_id);
342 virgl_renderer_ctx_detach_resource(det_res.hdr.ctx_id, det_res.resource_id);
345 static void virgl_cmd_get_capset_info(VirtIOGPU *g,
346 struct virtio_gpu_ctrl_command *cmd)
348 struct virtio_gpu_get_capset_info info;
349 struct virtio_gpu_resp_capset_info resp;
351 VIRTIO_GPU_FILL_CMD(info);
353 memset(&resp, 0, sizeof(resp));
354 if (info.capset_index == 0) {
355 resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL;
356 virgl_renderer_get_cap_set(resp.capset_id,
357 &resp.capset_max_version,
358 &resp.capset_max_size);
359 } else {
360 resp.capset_max_version = 0;
361 resp.capset_max_size = 0;
363 resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO;
364 virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
367 static void virgl_cmd_get_capset(VirtIOGPU *g,
368 struct virtio_gpu_ctrl_command *cmd)
370 struct virtio_gpu_get_capset gc;
371 struct virtio_gpu_resp_capset *resp;
372 uint32_t max_ver, max_size;
373 VIRTIO_GPU_FILL_CMD(gc);
375 virgl_renderer_get_cap_set(gc.capset_id, &max_ver,
376 &max_size);
377 if (!max_size) {
378 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
379 return;
382 resp = g_malloc0(sizeof(*resp) + max_size);
383 resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET;
384 virgl_renderer_fill_caps(gc.capset_id,
385 gc.capset_version,
386 (void *)resp->capset_data);
387 virtio_gpu_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + max_size);
388 g_free(resp);
391 void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
392 struct virtio_gpu_ctrl_command *cmd)
394 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
396 cmd->waiting = g->renderer_blocked;
397 if (cmd->waiting) {
398 return;
401 virgl_renderer_force_ctx_0();
402 switch (cmd->cmd_hdr.type) {
403 case VIRTIO_GPU_CMD_CTX_CREATE:
404 virgl_cmd_context_create(g, cmd);
405 break;
406 case VIRTIO_GPU_CMD_CTX_DESTROY:
407 virgl_cmd_context_destroy(g, cmd);
408 break;
409 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
410 virgl_cmd_create_resource_2d(g, cmd);
411 break;
412 case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D:
413 virgl_cmd_create_resource_3d(g, cmd);
414 break;
415 case VIRTIO_GPU_CMD_SUBMIT_3D:
416 virgl_cmd_submit_3d(g, cmd);
417 break;
418 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
419 virgl_cmd_transfer_to_host_2d(g, cmd);
420 break;
421 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D:
422 virgl_cmd_transfer_to_host_3d(g, cmd);
423 break;
424 case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D:
425 virgl_cmd_transfer_from_host_3d(g, cmd);
426 break;
427 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
428 virgl_resource_attach_backing(g, cmd);
429 break;
430 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
431 virgl_resource_detach_backing(g, cmd);
432 break;
433 case VIRTIO_GPU_CMD_SET_SCANOUT:
434 virgl_cmd_set_scanout(g, cmd);
435 break;
436 case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
437 virgl_cmd_resource_flush(g, cmd);
438 break;
439 case VIRTIO_GPU_CMD_RESOURCE_UNREF:
440 virgl_cmd_resource_unref(g, cmd);
441 break;
442 case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE:
443 /* TODO add security */
444 virgl_cmd_ctx_attach_resource(g, cmd);
445 break;
446 case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE:
447 /* TODO add security */
448 virgl_cmd_ctx_detach_resource(g, cmd);
449 break;
450 case VIRTIO_GPU_CMD_GET_CAPSET_INFO:
451 virgl_cmd_get_capset_info(g, cmd);
452 break;
453 case VIRTIO_GPU_CMD_GET_CAPSET:
454 virgl_cmd_get_capset(g, cmd);
455 break;
457 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
458 virtio_gpu_get_display_info(g, cmd);
459 break;
460 default:
461 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
462 break;
465 if (cmd->finished) {
466 return;
468 if (cmd->error) {
469 fprintf(stderr, "%s: ctrl 0x%x, error 0x%x\n", __func__,
470 cmd->cmd_hdr.type, cmd->error);
471 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error);
472 return;
474 if (!(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) {
475 virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
476 return;
479 trace_virtio_gpu_fence_ctrl(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
480 virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
483 static void virgl_write_fence(void *opaque, uint32_t fence)
485 VirtIOGPU *g = opaque;
486 struct virtio_gpu_ctrl_command *cmd, *tmp;
488 QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
490 * the guest can end up emitting fences out of order
491 * so we should check all fenced cmds not just the first one.
493 if (cmd->cmd_hdr.fence_id > fence) {
494 continue;
496 trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id);
497 virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
498 QTAILQ_REMOVE(&g->fenceq, cmd, next);
499 g_free(cmd);
500 g->inflight--;
501 if (virtio_gpu_stats_enabled(g->conf)) {
502 fprintf(stderr, "inflight: %3d (-)\r", g->inflight);
507 static virgl_renderer_gl_context
508 virgl_create_context(void *opaque, int scanout_idx,
509 struct virgl_renderer_gl_ctx_param *params)
511 VirtIOGPU *g = opaque;
512 QEMUGLContext ctx;
513 QEMUGLParams qparams;
515 qparams.major_ver = params->major_ver;
516 qparams.minor_ver = params->minor_ver;
518 ctx = dpy_gl_ctx_create(g->scanout[scanout_idx].con, &qparams);
519 return (virgl_renderer_gl_context)ctx;
522 static void virgl_destroy_context(void *opaque, virgl_renderer_gl_context ctx)
524 VirtIOGPU *g = opaque;
525 QEMUGLContext qctx = (QEMUGLContext)ctx;
527 dpy_gl_ctx_destroy(g->scanout[0].con, qctx);
530 static int virgl_make_context_current(void *opaque, int scanout_idx,
531 virgl_renderer_gl_context ctx)
533 VirtIOGPU *g = opaque;
534 QEMUGLContext qctx = (QEMUGLContext)ctx;
536 return dpy_gl_ctx_make_current(g->scanout[scanout_idx].con, qctx);
539 static struct virgl_renderer_callbacks virtio_gpu_3d_cbs = {
540 .version = 1,
541 .write_fence = virgl_write_fence,
542 .create_gl_context = virgl_create_context,
543 .destroy_gl_context = virgl_destroy_context,
544 .make_current = virgl_make_context_current,
547 static void virtio_gpu_print_stats(void *opaque)
549 VirtIOGPU *g = opaque;
551 if (g->stats.requests) {
552 fprintf(stderr, "stats: vq req %4d, %3d -- 3D %4d (%5d)\n",
553 g->stats.requests,
554 g->stats.max_inflight,
555 g->stats.req_3d,
556 g->stats.bytes_3d);
557 g->stats.requests = 0;
558 g->stats.max_inflight = 0;
559 g->stats.req_3d = 0;
560 g->stats.bytes_3d = 0;
561 } else {
562 fprintf(stderr, "stats: idle\r");
564 timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
567 static void virtio_gpu_fence_poll(void *opaque)
569 VirtIOGPU *g = opaque;
571 virgl_renderer_poll();
572 virtio_gpu_process_cmdq(g);
573 if (!QTAILQ_EMPTY(&g->cmdq) || !QTAILQ_EMPTY(&g->fenceq)) {
574 timer_mod(g->fence_poll, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 10);
578 void virtio_gpu_virgl_fence_poll(VirtIOGPU *g)
580 virtio_gpu_fence_poll(g);
583 void virtio_gpu_virgl_reset(VirtIOGPU *g)
585 int i;
587 /* virgl_renderer_reset() ??? */
588 for (i = 0; i < g->conf.max_outputs; i++) {
589 if (i != 0) {
590 dpy_gfx_replace_surface(g->scanout[i].con, NULL);
592 dpy_gl_scanout(g->scanout[i].con, 0, false, 0, 0, 0, 0, 0, 0);
596 int virtio_gpu_virgl_init(VirtIOGPU *g)
598 int ret;
600 ret = virgl_renderer_init(g, 0, &virtio_gpu_3d_cbs);
601 if (ret != 0) {
602 return ret;
605 g->fence_poll = timer_new_ms(QEMU_CLOCK_VIRTUAL,
606 virtio_gpu_fence_poll, g);
608 if (virtio_gpu_stats_enabled(g->conf)) {
609 g->print_stats = timer_new_ms(QEMU_CLOCK_VIRTUAL,
610 virtio_gpu_print_stats, g);
611 timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
613 return 0;
616 #endif /* CONFIG_VIRGL */