tests: numa: test one backend with prealloc enabled
[qemu/ar7.git] / hw / 9pfs / xen-9p-backend.c
blob18fe5b7c92fac49bfebe8478d169a33ececb6f50
1 /*
2 * Xen 9p backend
4 * Copyright Aporeto 2017
6 * Authors:
7 * Stefano Stabellini <stefano@aporeto.com>
9 */
11 #include "qemu/osdep.h"
13 #include "hw/9pfs/9p.h"
14 #include "hw/xen/xen-legacy-backend.h"
15 #include "hw/9pfs/xen-9pfs.h"
16 #include "qapi/error.h"
17 #include "qemu/config-file.h"
18 #include "qemu/main-loop.h"
19 #include "qemu/option.h"
20 #include "fsdev/qemu-fsdev.h"
22 #define VERSIONS "1"
23 #define MAX_RINGS 8
24 #define MAX_RING_ORDER 8
26 typedef struct Xen9pfsRing {
27 struct Xen9pfsDev *priv;
29 int ref;
30 xenevtchn_handle *evtchndev;
31 int evtchn;
32 int local_port;
33 int ring_order;
34 struct xen_9pfs_data_intf *intf;
35 unsigned char *data;
36 struct xen_9pfs_data ring;
38 struct iovec *sg;
39 QEMUBH *bh;
41 /* local copies, so that we can read/write PDU data directly from
42 * the ring */
43 RING_IDX out_cons, out_size, in_cons;
44 bool inprogress;
45 } Xen9pfsRing;
47 typedef struct Xen9pfsDev {
48 struct XenLegacyDevice xendev; /* must be first */
49 V9fsState state;
50 char *path;
51 char *security_model;
52 char *tag;
53 char *id;
55 int num_rings;
56 Xen9pfsRing *rings;
57 } Xen9pfsDev;
59 static void xen_9pfs_disconnect(struct XenLegacyDevice *xendev);
61 static void xen_9pfs_in_sg(Xen9pfsRing *ring,
62 struct iovec *in_sg,
63 int *num,
64 uint32_t idx,
65 uint32_t size)
67 RING_IDX cons, prod, masked_prod, masked_cons;
69 cons = ring->intf->in_cons;
70 prod = ring->intf->in_prod;
71 xen_rmb();
72 masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order));
73 masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order));
75 if (masked_prod < masked_cons) {
76 in_sg[0].iov_base = ring->ring.in + masked_prod;
77 in_sg[0].iov_len = masked_cons - masked_prod;
78 *num = 1;
79 } else {
80 in_sg[0].iov_base = ring->ring.in + masked_prod;
81 in_sg[0].iov_len = XEN_FLEX_RING_SIZE(ring->ring_order) - masked_prod;
82 in_sg[1].iov_base = ring->ring.in;
83 in_sg[1].iov_len = masked_cons;
84 *num = 2;
88 static void xen_9pfs_out_sg(Xen9pfsRing *ring,
89 struct iovec *out_sg,
90 int *num,
91 uint32_t idx)
93 RING_IDX cons, prod, masked_prod, masked_cons;
95 cons = ring->intf->out_cons;
96 prod = ring->intf->out_prod;
97 xen_rmb();
98 masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order));
99 masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order));
101 if (masked_cons < masked_prod) {
102 out_sg[0].iov_base = ring->ring.out + masked_cons;
103 out_sg[0].iov_len = ring->out_size;
104 *num = 1;
105 } else {
106 if (ring->out_size >
107 (XEN_FLEX_RING_SIZE(ring->ring_order) - masked_cons)) {
108 out_sg[0].iov_base = ring->ring.out + masked_cons;
109 out_sg[0].iov_len = XEN_FLEX_RING_SIZE(ring->ring_order) -
110 masked_cons;
111 out_sg[1].iov_base = ring->ring.out;
112 out_sg[1].iov_len = ring->out_size -
113 (XEN_FLEX_RING_SIZE(ring->ring_order) -
114 masked_cons);
115 *num = 2;
116 } else {
117 out_sg[0].iov_base = ring->ring.out + masked_cons;
118 out_sg[0].iov_len = ring->out_size;
119 *num = 1;
124 static ssize_t xen_9pfs_pdu_vmarshal(V9fsPDU *pdu,
125 size_t offset,
126 const char *fmt,
127 va_list ap)
129 Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
130 struct iovec in_sg[2];
131 int num;
132 ssize_t ret;
134 xen_9pfs_in_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings],
135 in_sg, &num, pdu->idx, ROUND_UP(offset + 128, 512));
137 ret = v9fs_iov_vmarshal(in_sg, num, offset, 0, fmt, ap);
138 if (ret < 0) {
139 xen_pv_printf(&xen_9pfs->xendev, 0,
140 "Failed to encode VirtFS request type %d\n", pdu->id + 1);
141 xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
142 xen_9pfs_disconnect(&xen_9pfs->xendev);
144 return ret;
147 static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu,
148 size_t offset,
149 const char *fmt,
150 va_list ap)
152 Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
153 struct iovec out_sg[2];
154 int num;
155 ssize_t ret;
157 xen_9pfs_out_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings],
158 out_sg, &num, pdu->idx);
160 ret = v9fs_iov_vunmarshal(out_sg, num, offset, 0, fmt, ap);
161 if (ret < 0) {
162 xen_pv_printf(&xen_9pfs->xendev, 0,
163 "Failed to decode VirtFS request type %d\n", pdu->id);
164 xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
165 xen_9pfs_disconnect(&xen_9pfs->xendev);
167 return ret;
170 static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu,
171 struct iovec **piov,
172 unsigned int *pniov,
173 size_t size)
175 Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
176 Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
177 int num;
179 g_free(ring->sg);
181 ring->sg = g_new0(struct iovec, 2);
182 xen_9pfs_out_sg(ring, ring->sg, &num, pdu->idx);
183 *piov = ring->sg;
184 *pniov = num;
187 static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
188 struct iovec **piov,
189 unsigned int *pniov,
190 size_t *size)
192 Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
193 Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
194 int num;
195 size_t buf_size;
197 g_free(ring->sg);
199 ring->sg = g_new0(struct iovec, 2);
200 xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, *size);
202 buf_size = iov_size(ring->sg, num);
203 if (buf_size < P9_IOHDRSZ) {
204 xen_pv_printf(&xen_9pfs->xendev, 0, "Xen 9pfs request type %d"
205 "needs %zu bytes, buffer has %zu, less than minimum\n",
206 pdu->id, *size, buf_size);
207 xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
208 xen_9pfs_disconnect(&xen_9pfs->xendev);
210 if (buf_size < *size) {
211 *size = buf_size;
214 *piov = ring->sg;
215 *pniov = num;
218 static void xen_9pfs_push_and_notify(V9fsPDU *pdu)
220 RING_IDX prod;
221 Xen9pfsDev *priv = container_of(pdu->s, Xen9pfsDev, state);
222 Xen9pfsRing *ring = &priv->rings[pdu->tag % priv->num_rings];
224 g_free(ring->sg);
225 ring->sg = NULL;
227 ring->intf->out_cons = ring->out_cons;
228 xen_wmb();
230 prod = ring->intf->in_prod;
231 xen_rmb();
232 ring->intf->in_prod = prod + pdu->size;
233 xen_wmb();
235 ring->inprogress = false;
236 xenevtchn_notify(ring->evtchndev, ring->local_port);
238 qemu_bh_schedule(ring->bh);
241 static const V9fsTransport xen_9p_transport = {
242 .pdu_vmarshal = xen_9pfs_pdu_vmarshal,
243 .pdu_vunmarshal = xen_9pfs_pdu_vunmarshal,
244 .init_in_iov_from_pdu = xen_9pfs_init_in_iov_from_pdu,
245 .init_out_iov_from_pdu = xen_9pfs_init_out_iov_from_pdu,
246 .push_and_notify = xen_9pfs_push_and_notify,
249 static int xen_9pfs_init(struct XenLegacyDevice *xendev)
251 return 0;
254 static int xen_9pfs_receive(Xen9pfsRing *ring)
256 P9MsgHeader h;
257 RING_IDX cons, prod, masked_prod, masked_cons, queued;
258 V9fsPDU *pdu;
260 if (ring->inprogress) {
261 return 0;
264 cons = ring->intf->out_cons;
265 prod = ring->intf->out_prod;
266 xen_rmb();
268 queued = xen_9pfs_queued(prod, cons, XEN_FLEX_RING_SIZE(ring->ring_order));
269 if (queued < sizeof(h)) {
270 return 0;
272 ring->inprogress = true;
274 masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order));
275 masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order));
277 xen_9pfs_read_packet((uint8_t *) &h, ring->ring.out, sizeof(h),
278 masked_prod, &masked_cons,
279 XEN_FLEX_RING_SIZE(ring->ring_order));
280 if (queued < le32_to_cpu(h.size_le)) {
281 return 0;
284 /* cannot fail, because we only handle one request per ring at a time */
285 pdu = pdu_alloc(&ring->priv->state);
286 ring->out_size = le32_to_cpu(h.size_le);
287 ring->out_cons = cons + le32_to_cpu(h.size_le);
289 pdu_submit(pdu, &h);
291 return 0;
294 static void xen_9pfs_bh(void *opaque)
296 Xen9pfsRing *ring = opaque;
297 xen_9pfs_receive(ring);
300 static void xen_9pfs_evtchn_event(void *opaque)
302 Xen9pfsRing *ring = opaque;
303 evtchn_port_t port;
305 port = xenevtchn_pending(ring->evtchndev);
306 xenevtchn_unmask(ring->evtchndev, port);
308 qemu_bh_schedule(ring->bh);
311 static void xen_9pfs_disconnect(struct XenLegacyDevice *xendev)
313 Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
314 int i;
316 for (i = 0; i < xen_9pdev->num_rings; i++) {
317 if (xen_9pdev->rings[i].evtchndev != NULL) {
318 qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev),
319 NULL, NULL, NULL);
320 xenevtchn_unbind(xen_9pdev->rings[i].evtchndev,
321 xen_9pdev->rings[i].local_port);
322 xen_9pdev->rings[i].evtchndev = NULL;
327 static int xen_9pfs_free(struct XenLegacyDevice *xendev)
329 Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
330 int i;
332 if (xen_9pdev->rings[0].evtchndev != NULL) {
333 xen_9pfs_disconnect(xendev);
336 for (i = 0; i < xen_9pdev->num_rings; i++) {
337 if (xen_9pdev->rings[i].data != NULL) {
338 xen_be_unmap_grant_refs(&xen_9pdev->xendev,
339 xen_9pdev->rings[i].data,
340 (1 << xen_9pdev->rings[i].ring_order));
342 if (xen_9pdev->rings[i].intf != NULL) {
343 xen_be_unmap_grant_refs(&xen_9pdev->xendev,
344 xen_9pdev->rings[i].intf,
347 if (xen_9pdev->rings[i].bh != NULL) {
348 qemu_bh_delete(xen_9pdev->rings[i].bh);
352 g_free(xen_9pdev->id);
353 g_free(xen_9pdev->tag);
354 g_free(xen_9pdev->path);
355 g_free(xen_9pdev->security_model);
356 g_free(xen_9pdev->rings);
357 return 0;
360 static int xen_9pfs_connect(struct XenLegacyDevice *xendev)
362 Error *err = NULL;
363 int i;
364 Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
365 V9fsState *s = &xen_9pdev->state;
366 QemuOpts *fsdev;
368 if (xenstore_read_fe_int(&xen_9pdev->xendev, "num-rings",
369 &xen_9pdev->num_rings) == -1 ||
370 xen_9pdev->num_rings > MAX_RINGS || xen_9pdev->num_rings < 1) {
371 return -1;
374 xen_9pdev->rings = g_new0(Xen9pfsRing, xen_9pdev->num_rings);
375 for (i = 0; i < xen_9pdev->num_rings; i++) {
376 char *str;
377 int ring_order;
379 xen_9pdev->rings[i].priv = xen_9pdev;
380 xen_9pdev->rings[i].evtchn = -1;
381 xen_9pdev->rings[i].local_port = -1;
383 str = g_strdup_printf("ring-ref%u", i);
384 if (xenstore_read_fe_int(&xen_9pdev->xendev, str,
385 &xen_9pdev->rings[i].ref) == -1) {
386 g_free(str);
387 goto out;
389 g_free(str);
390 str = g_strdup_printf("event-channel-%u", i);
391 if (xenstore_read_fe_int(&xen_9pdev->xendev, str,
392 &xen_9pdev->rings[i].evtchn) == -1) {
393 g_free(str);
394 goto out;
396 g_free(str);
398 xen_9pdev->rings[i].intf =
399 xen_be_map_grant_ref(&xen_9pdev->xendev,
400 xen_9pdev->rings[i].ref,
401 PROT_READ | PROT_WRITE);
402 if (!xen_9pdev->rings[i].intf) {
403 goto out;
405 ring_order = xen_9pdev->rings[i].intf->ring_order;
406 if (ring_order > MAX_RING_ORDER) {
407 goto out;
409 xen_9pdev->rings[i].ring_order = ring_order;
410 xen_9pdev->rings[i].data =
411 xen_be_map_grant_refs(&xen_9pdev->xendev,
412 xen_9pdev->rings[i].intf->ref,
413 (1 << ring_order),
414 PROT_READ | PROT_WRITE);
415 if (!xen_9pdev->rings[i].data) {
416 goto out;
418 xen_9pdev->rings[i].ring.in = xen_9pdev->rings[i].data;
419 xen_9pdev->rings[i].ring.out = xen_9pdev->rings[i].data +
420 XEN_FLEX_RING_SIZE(ring_order);
422 xen_9pdev->rings[i].bh = qemu_bh_new(xen_9pfs_bh, &xen_9pdev->rings[i]);
423 xen_9pdev->rings[i].out_cons = 0;
424 xen_9pdev->rings[i].out_size = 0;
425 xen_9pdev->rings[i].inprogress = false;
428 xen_9pdev->rings[i].evtchndev = xenevtchn_open(NULL, 0);
429 if (xen_9pdev->rings[i].evtchndev == NULL) {
430 goto out;
432 qemu_set_cloexec(xenevtchn_fd(xen_9pdev->rings[i].evtchndev));
433 xen_9pdev->rings[i].local_port = xenevtchn_bind_interdomain
434 (xen_9pdev->rings[i].evtchndev,
435 xendev->dom,
436 xen_9pdev->rings[i].evtchn);
437 if (xen_9pdev->rings[i].local_port == -1) {
438 xen_pv_printf(xendev, 0,
439 "xenevtchn_bind_interdomain failed port=%d\n",
440 xen_9pdev->rings[i].evtchn);
441 goto out;
443 xen_pv_printf(xendev, 2, "bind evtchn port %d\n", xendev->local_port);
444 qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev),
445 xen_9pfs_evtchn_event, NULL, &xen_9pdev->rings[i]);
448 xen_9pdev->security_model = xenstore_read_be_str(xendev, "security_model");
449 xen_9pdev->path = xenstore_read_be_str(xendev, "path");
450 xen_9pdev->id = s->fsconf.fsdev_id =
451 g_strdup_printf("xen9p%d", xendev->dev);
452 xen_9pdev->tag = s->fsconf.tag = xenstore_read_fe_str(xendev, "tag");
453 fsdev = qemu_opts_create(qemu_find_opts("fsdev"),
454 s->fsconf.tag,
455 1, NULL);
456 qemu_opt_set(fsdev, "fsdriver", "local", NULL);
457 qemu_opt_set(fsdev, "path", xen_9pdev->path, NULL);
458 qemu_opt_set(fsdev, "security_model", xen_9pdev->security_model, NULL);
459 qemu_opts_set_id(fsdev, s->fsconf.fsdev_id);
460 qemu_fsdev_add(fsdev, &err);
461 if (err) {
462 error_report_err(err);
464 v9fs_device_realize_common(s, &xen_9p_transport, NULL);
466 return 0;
468 out:
469 xen_9pfs_free(xendev);
470 return -1;
473 static void xen_9pfs_alloc(struct XenLegacyDevice *xendev)
475 xenstore_write_be_str(xendev, "versions", VERSIONS);
476 xenstore_write_be_int(xendev, "max-rings", MAX_RINGS);
477 xenstore_write_be_int(xendev, "max-ring-page-order", MAX_RING_ORDER);
480 struct XenDevOps xen_9pfs_ops = {
481 .size = sizeof(Xen9pfsDev),
482 .flags = DEVOPS_FLAG_NEED_GNTDEV,
483 .alloc = xen_9pfs_alloc,
484 .init = xen_9pfs_init,
485 .initialise = xen_9pfs_connect,
486 .disconnect = xen_9pfs_disconnect,
487 .free = xen_9pfs_free,