qapi storage-daemon/qapi: Fix documentation section structure
[qemu/ar7.git] / hw / display / virtio-gpu-virgl.c
blob1c47603d40a21f9b4d04742a4a0e73eb92a70f31
1 /*
2 * Virtio GPU Device
4 * Copyright Red Hat, Inc. 2013-2014
6 * Authors:
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/error-report.h"
16 #include "qemu/iov.h"
17 #include "trace.h"
18 #include "hw/virtio/virtio.h"
19 #include "hw/virtio/virtio-gpu.h"
21 #include <virglrenderer.h>
23 static struct virgl_renderer_callbacks virtio_gpu_3d_cbs;
25 static void virgl_cmd_create_resource_2d(VirtIOGPU *g,
26 struct virtio_gpu_ctrl_command *cmd)
28 struct virtio_gpu_resource_create_2d c2d;
29 struct virgl_renderer_resource_create_args args;
31 VIRTIO_GPU_FILL_CMD(c2d);
32 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
33 c2d.width, c2d.height);
35 args.handle = c2d.resource_id;
36 args.target = 2;
37 args.format = c2d.format;
38 args.bind = (1 << 1);
39 args.width = c2d.width;
40 args.height = c2d.height;
41 args.depth = 1;
42 args.array_size = 1;
43 args.last_level = 0;
44 args.nr_samples = 0;
45 args.flags = VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP;
46 virgl_renderer_resource_create(&args, NULL, 0);
49 static void virgl_cmd_create_resource_3d(VirtIOGPU *g,
50 struct virtio_gpu_ctrl_command *cmd)
52 struct virtio_gpu_resource_create_3d c3d;
53 struct virgl_renderer_resource_create_args args;
55 VIRTIO_GPU_FILL_CMD(c3d);
56 trace_virtio_gpu_cmd_res_create_3d(c3d.resource_id, c3d.format,
57 c3d.width, c3d.height, c3d.depth);
59 args.handle = c3d.resource_id;
60 args.target = c3d.target;
61 args.format = c3d.format;
62 args.bind = c3d.bind;
63 args.width = c3d.width;
64 args.height = c3d.height;
65 args.depth = c3d.depth;
66 args.array_size = c3d.array_size;
67 args.last_level = c3d.last_level;
68 args.nr_samples = c3d.nr_samples;
69 args.flags = c3d.flags;
70 virgl_renderer_resource_create(&args, NULL, 0);
73 static void virgl_cmd_resource_unref(VirtIOGPU *g,
74 struct virtio_gpu_ctrl_command *cmd)
76 struct virtio_gpu_resource_unref unref;
77 struct iovec *res_iovs = NULL;
78 int num_iovs = 0;
80 VIRTIO_GPU_FILL_CMD(unref);
81 trace_virtio_gpu_cmd_res_unref(unref.resource_id);
83 virgl_renderer_resource_detach_iov(unref.resource_id,
84 &res_iovs,
85 &num_iovs);
86 if (res_iovs != NULL && num_iovs != 0) {
87 virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
89 virgl_renderer_resource_unref(unref.resource_id);
92 static void virgl_cmd_context_create(VirtIOGPU *g,
93 struct virtio_gpu_ctrl_command *cmd)
95 struct virtio_gpu_ctx_create cc;
97 VIRTIO_GPU_FILL_CMD(cc);
98 trace_virtio_gpu_cmd_ctx_create(cc.hdr.ctx_id,
99 cc.debug_name);
101 virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen,
102 cc.debug_name);
105 static void virgl_cmd_context_destroy(VirtIOGPU *g,
106 struct virtio_gpu_ctrl_command *cmd)
108 struct virtio_gpu_ctx_destroy cd;
110 VIRTIO_GPU_FILL_CMD(cd);
111 trace_virtio_gpu_cmd_ctx_destroy(cd.hdr.ctx_id);
113 virgl_renderer_context_destroy(cd.hdr.ctx_id);
116 static void virtio_gpu_rect_update(VirtIOGPU *g, int idx, int x, int y,
117 int width, int height)
119 if (!g->parent_obj.scanout[idx].con) {
120 return;
123 dpy_gl_update(g->parent_obj.scanout[idx].con, x, y, width, height);
126 static void virgl_cmd_resource_flush(VirtIOGPU *g,
127 struct virtio_gpu_ctrl_command *cmd)
129 struct virtio_gpu_resource_flush rf;
130 int i;
132 VIRTIO_GPU_FILL_CMD(rf);
133 trace_virtio_gpu_cmd_res_flush(rf.resource_id,
134 rf.r.width, rf.r.height, rf.r.x, rf.r.y);
136 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
137 if (g->parent_obj.scanout[i].resource_id != rf.resource_id) {
138 continue;
140 virtio_gpu_rect_update(g, i, rf.r.x, rf.r.y, rf.r.width, rf.r.height);
144 static void virgl_cmd_set_scanout(VirtIOGPU *g,
145 struct virtio_gpu_ctrl_command *cmd)
147 struct virtio_gpu_set_scanout ss;
148 struct virgl_renderer_resource_info info;
149 int ret;
151 VIRTIO_GPU_FILL_CMD(ss);
152 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
153 ss.r.width, ss.r.height, ss.r.x, ss.r.y);
155 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
156 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
157 __func__, ss.scanout_id);
158 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
159 return;
161 g->parent_obj.enable = 1;
163 memset(&info, 0, sizeof(info));
165 if (ss.resource_id && ss.r.width && ss.r.height) {
166 ret = virgl_renderer_resource_get_info(ss.resource_id, &info);
167 if (ret == -1) {
168 qemu_log_mask(LOG_GUEST_ERROR,
169 "%s: illegal resource specified %d\n",
170 __func__, ss.resource_id);
171 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
172 return;
174 qemu_console_resize(g->parent_obj.scanout[ss.scanout_id].con,
175 ss.r.width, ss.r.height);
176 virgl_renderer_force_ctx_0();
177 dpy_gl_scanout_texture(
178 g->parent_obj.scanout[ss.scanout_id].con, info.tex_id,
179 info.flags & VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP,
180 info.width, info.height,
181 ss.r.x, ss.r.y, ss.r.width, ss.r.height);
182 } else {
183 dpy_gfx_replace_surface(
184 g->parent_obj.scanout[ss.scanout_id].con, NULL);
185 dpy_gl_scanout_disable(g->parent_obj.scanout[ss.scanout_id].con);
187 g->parent_obj.scanout[ss.scanout_id].resource_id = ss.resource_id;
190 static void virgl_cmd_submit_3d(VirtIOGPU *g,
191 struct virtio_gpu_ctrl_command *cmd)
193 struct virtio_gpu_cmd_submit cs;
194 void *buf;
195 size_t s;
197 VIRTIO_GPU_FILL_CMD(cs);
198 trace_virtio_gpu_cmd_ctx_submit(cs.hdr.ctx_id, cs.size);
200 buf = g_malloc(cs.size);
201 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
202 sizeof(cs), buf, cs.size);
203 if (s != cs.size) {
204 qemu_log_mask(LOG_GUEST_ERROR, "%s: size mismatch (%zd/%d)",
205 __func__, s, cs.size);
206 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
207 goto out;
210 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
211 g->stats.req_3d++;
212 g->stats.bytes_3d += cs.size;
215 virgl_renderer_submit_cmd(buf, cs.hdr.ctx_id, cs.size / 4);
217 out:
218 g_free(buf);
221 static void virgl_cmd_transfer_to_host_2d(VirtIOGPU *g,
222 struct virtio_gpu_ctrl_command *cmd)
224 struct virtio_gpu_transfer_to_host_2d t2d;
225 struct virtio_gpu_box box;
227 VIRTIO_GPU_FILL_CMD(t2d);
228 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
230 box.x = t2d.r.x;
231 box.y = t2d.r.y;
232 box.z = 0;
233 box.w = t2d.r.width;
234 box.h = t2d.r.height;
235 box.d = 1;
237 virgl_renderer_transfer_write_iov(t2d.resource_id,
242 (struct virgl_box *)&box,
243 t2d.offset, NULL, 0);
246 static void virgl_cmd_transfer_to_host_3d(VirtIOGPU *g,
247 struct virtio_gpu_ctrl_command *cmd)
249 struct virtio_gpu_transfer_host_3d t3d;
251 VIRTIO_GPU_FILL_CMD(t3d);
252 trace_virtio_gpu_cmd_res_xfer_toh_3d(t3d.resource_id);
254 virgl_renderer_transfer_write_iov(t3d.resource_id,
255 t3d.hdr.ctx_id,
256 t3d.level,
257 t3d.stride,
258 t3d.layer_stride,
259 (struct virgl_box *)&t3d.box,
260 t3d.offset, NULL, 0);
263 static void
264 virgl_cmd_transfer_from_host_3d(VirtIOGPU *g,
265 struct virtio_gpu_ctrl_command *cmd)
267 struct virtio_gpu_transfer_host_3d tf3d;
269 VIRTIO_GPU_FILL_CMD(tf3d);
270 trace_virtio_gpu_cmd_res_xfer_fromh_3d(tf3d.resource_id);
272 virgl_renderer_transfer_read_iov(tf3d.resource_id,
273 tf3d.hdr.ctx_id,
274 tf3d.level,
275 tf3d.stride,
276 tf3d.layer_stride,
277 (struct virgl_box *)&tf3d.box,
278 tf3d.offset, NULL, 0);
282 static void virgl_resource_attach_backing(VirtIOGPU *g,
283 struct virtio_gpu_ctrl_command *cmd)
285 struct virtio_gpu_resource_attach_backing att_rb;
286 struct iovec *res_iovs;
287 uint32_t res_niov;
288 int ret;
290 VIRTIO_GPU_FILL_CMD(att_rb);
291 trace_virtio_gpu_cmd_res_back_attach(att_rb.resource_id);
293 ret = virtio_gpu_create_mapping_iov(g, att_rb.nr_entries, sizeof(att_rb),
294 cmd, NULL, &res_iovs, &res_niov);
295 if (ret != 0) {
296 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
297 return;
300 ret = virgl_renderer_resource_attach_iov(att_rb.resource_id,
301 res_iovs, res_niov);
303 if (ret != 0)
304 virtio_gpu_cleanup_mapping_iov(g, res_iovs, res_niov);
307 static void virgl_resource_detach_backing(VirtIOGPU *g,
308 struct virtio_gpu_ctrl_command *cmd)
310 struct virtio_gpu_resource_detach_backing detach_rb;
311 struct iovec *res_iovs = NULL;
312 int num_iovs = 0;
314 VIRTIO_GPU_FILL_CMD(detach_rb);
315 trace_virtio_gpu_cmd_res_back_detach(detach_rb.resource_id);
317 virgl_renderer_resource_detach_iov(detach_rb.resource_id,
318 &res_iovs,
319 &num_iovs);
320 if (res_iovs == NULL || num_iovs == 0) {
321 return;
323 virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
327 static void virgl_cmd_ctx_attach_resource(VirtIOGPU *g,
328 struct virtio_gpu_ctrl_command *cmd)
330 struct virtio_gpu_ctx_resource att_res;
332 VIRTIO_GPU_FILL_CMD(att_res);
333 trace_virtio_gpu_cmd_ctx_res_attach(att_res.hdr.ctx_id,
334 att_res.resource_id);
336 virgl_renderer_ctx_attach_resource(att_res.hdr.ctx_id, att_res.resource_id);
339 static void virgl_cmd_ctx_detach_resource(VirtIOGPU *g,
340 struct virtio_gpu_ctrl_command *cmd)
342 struct virtio_gpu_ctx_resource det_res;
344 VIRTIO_GPU_FILL_CMD(det_res);
345 trace_virtio_gpu_cmd_ctx_res_detach(det_res.hdr.ctx_id,
346 det_res.resource_id);
348 virgl_renderer_ctx_detach_resource(det_res.hdr.ctx_id, det_res.resource_id);
351 static void virgl_cmd_get_capset_info(VirtIOGPU *g,
352 struct virtio_gpu_ctrl_command *cmd)
354 struct virtio_gpu_get_capset_info info;
355 struct virtio_gpu_resp_capset_info resp;
357 VIRTIO_GPU_FILL_CMD(info);
359 memset(&resp, 0, sizeof(resp));
360 if (info.capset_index == 0) {
361 resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL;
362 virgl_renderer_get_cap_set(resp.capset_id,
363 &resp.capset_max_version,
364 &resp.capset_max_size);
365 } else if (info.capset_index == 1) {
366 resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL2;
367 virgl_renderer_get_cap_set(resp.capset_id,
368 &resp.capset_max_version,
369 &resp.capset_max_size);
370 } else {
371 resp.capset_max_version = 0;
372 resp.capset_max_size = 0;
374 resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO;
375 virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
378 static void virgl_cmd_get_capset(VirtIOGPU *g,
379 struct virtio_gpu_ctrl_command *cmd)
381 struct virtio_gpu_get_capset gc;
382 struct virtio_gpu_resp_capset *resp;
383 uint32_t max_ver, max_size;
384 VIRTIO_GPU_FILL_CMD(gc);
386 virgl_renderer_get_cap_set(gc.capset_id, &max_ver,
387 &max_size);
388 if (!max_size) {
389 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
390 return;
393 resp = g_malloc0(sizeof(*resp) + max_size);
394 resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET;
395 virgl_renderer_fill_caps(gc.capset_id,
396 gc.capset_version,
397 (void *)resp->capset_data);
398 virtio_gpu_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + max_size);
399 g_free(resp);
402 void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
403 struct virtio_gpu_ctrl_command *cmd)
405 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
407 virgl_renderer_force_ctx_0();
408 switch (cmd->cmd_hdr.type) {
409 case VIRTIO_GPU_CMD_CTX_CREATE:
410 virgl_cmd_context_create(g, cmd);
411 break;
412 case VIRTIO_GPU_CMD_CTX_DESTROY:
413 virgl_cmd_context_destroy(g, cmd);
414 break;
415 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
416 virgl_cmd_create_resource_2d(g, cmd);
417 break;
418 case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D:
419 virgl_cmd_create_resource_3d(g, cmd);
420 break;
421 case VIRTIO_GPU_CMD_SUBMIT_3D:
422 virgl_cmd_submit_3d(g, cmd);
423 break;
424 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
425 virgl_cmd_transfer_to_host_2d(g, cmd);
426 break;
427 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D:
428 virgl_cmd_transfer_to_host_3d(g, cmd);
429 break;
430 case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D:
431 virgl_cmd_transfer_from_host_3d(g, cmd);
432 break;
433 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
434 virgl_resource_attach_backing(g, cmd);
435 break;
436 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
437 virgl_resource_detach_backing(g, cmd);
438 break;
439 case VIRTIO_GPU_CMD_SET_SCANOUT:
440 virgl_cmd_set_scanout(g, cmd);
441 break;
442 case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
443 virgl_cmd_resource_flush(g, cmd);
444 break;
445 case VIRTIO_GPU_CMD_RESOURCE_UNREF:
446 virgl_cmd_resource_unref(g, cmd);
447 break;
448 case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE:
449 /* TODO add security */
450 virgl_cmd_ctx_attach_resource(g, cmd);
451 break;
452 case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE:
453 /* TODO add security */
454 virgl_cmd_ctx_detach_resource(g, cmd);
455 break;
456 case VIRTIO_GPU_CMD_GET_CAPSET_INFO:
457 virgl_cmd_get_capset_info(g, cmd);
458 break;
459 case VIRTIO_GPU_CMD_GET_CAPSET:
460 virgl_cmd_get_capset(g, cmd);
461 break;
462 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
463 virtio_gpu_get_display_info(g, cmd);
464 break;
465 case VIRTIO_GPU_CMD_GET_EDID:
466 virtio_gpu_get_edid(g, cmd);
467 break;
468 default:
469 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
470 break;
473 if (cmd->finished) {
474 return;
476 if (cmd->error) {
477 fprintf(stderr, "%s: ctrl 0x%x, error 0x%x\n", __func__,
478 cmd->cmd_hdr.type, cmd->error);
479 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error);
480 return;
482 if (!(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) {
483 virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
484 return;
487 trace_virtio_gpu_fence_ctrl(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
488 virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
491 static void virgl_write_fence(void *opaque, uint32_t fence)
493 VirtIOGPU *g = opaque;
494 struct virtio_gpu_ctrl_command *cmd, *tmp;
496 QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
498 * the guest can end up emitting fences out of order
499 * so we should check all fenced cmds not just the first one.
501 if (cmd->cmd_hdr.fence_id > fence) {
502 continue;
504 trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id);
505 virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
506 QTAILQ_REMOVE(&g->fenceq, cmd, next);
507 g_free(cmd);
508 g->inflight--;
509 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
510 fprintf(stderr, "inflight: %3d (-)\r", g->inflight);
515 static virgl_renderer_gl_context
516 virgl_create_context(void *opaque, int scanout_idx,
517 struct virgl_renderer_gl_ctx_param *params)
519 VirtIOGPU *g = opaque;
520 QEMUGLContext ctx;
521 QEMUGLParams qparams;
523 qparams.major_ver = params->major_ver;
524 qparams.minor_ver = params->minor_ver;
526 ctx = dpy_gl_ctx_create(g->parent_obj.scanout[scanout_idx].con, &qparams);
527 return (virgl_renderer_gl_context)ctx;
530 static void virgl_destroy_context(void *opaque, virgl_renderer_gl_context ctx)
532 VirtIOGPU *g = opaque;
533 QEMUGLContext qctx = (QEMUGLContext)ctx;
535 dpy_gl_ctx_destroy(g->parent_obj.scanout[0].con, qctx);
538 static int virgl_make_context_current(void *opaque, int scanout_idx,
539 virgl_renderer_gl_context ctx)
541 VirtIOGPU *g = opaque;
542 QEMUGLContext qctx = (QEMUGLContext)ctx;
544 return dpy_gl_ctx_make_current(g->parent_obj.scanout[scanout_idx].con,
545 qctx);
548 static struct virgl_renderer_callbacks virtio_gpu_3d_cbs = {
549 .version = 1,
550 .write_fence = virgl_write_fence,
551 .create_gl_context = virgl_create_context,
552 .destroy_gl_context = virgl_destroy_context,
553 .make_current = virgl_make_context_current,
556 static void virtio_gpu_print_stats(void *opaque)
558 VirtIOGPU *g = opaque;
560 if (g->stats.requests) {
561 fprintf(stderr, "stats: vq req %4d, %3d -- 3D %4d (%5d)\n",
562 g->stats.requests,
563 g->stats.max_inflight,
564 g->stats.req_3d,
565 g->stats.bytes_3d);
566 g->stats.requests = 0;
567 g->stats.max_inflight = 0;
568 g->stats.req_3d = 0;
569 g->stats.bytes_3d = 0;
570 } else {
571 fprintf(stderr, "stats: idle\r");
573 timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
576 static void virtio_gpu_fence_poll(void *opaque)
578 VirtIOGPU *g = opaque;
580 virgl_renderer_poll();
581 virtio_gpu_process_cmdq(g);
582 if (!QTAILQ_EMPTY(&g->cmdq) || !QTAILQ_EMPTY(&g->fenceq)) {
583 timer_mod(g->fence_poll, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 10);
587 void virtio_gpu_virgl_fence_poll(VirtIOGPU *g)
589 virtio_gpu_fence_poll(g);
592 void virtio_gpu_virgl_reset_scanout(VirtIOGPU *g)
594 int i;
596 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
597 dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL);
598 dpy_gl_scanout_disable(g->parent_obj.scanout[i].con);
602 void virtio_gpu_virgl_reset(VirtIOGPU *g)
604 virgl_renderer_reset();
607 int virtio_gpu_virgl_init(VirtIOGPU *g)
609 int ret;
611 ret = virgl_renderer_init(g, 0, &virtio_gpu_3d_cbs);
612 if (ret != 0) {
613 error_report("virgl could not be initialized: %d", ret);
614 return ret;
617 g->fence_poll = timer_new_ms(QEMU_CLOCK_VIRTUAL,
618 virtio_gpu_fence_poll, g);
620 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
621 g->print_stats = timer_new_ms(QEMU_CLOCK_VIRTUAL,
622 virtio_gpu_print_stats, g);
623 timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
625 return 0;
628 int virtio_gpu_virgl_get_num_capsets(VirtIOGPU *g)
630 uint32_t capset2_max_ver, capset2_max_size;
631 virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2,
632 &capset2_max_ver,
633 &capset2_max_size);
635 return capset2_max_ver ? 2 : 1;