include/standard-headers: add pvrdma related headers
[qemu.git] / hw / 9pfs / xen-9p-backend.c
blob95e50c4dfc84579859567b46e45fd9864ca10dcb
1 /*
2 * Xen 9p backend
4 * Copyright Aporeto 2017
6 * Authors:
7 * Stefano Stabellini <stefano@aporeto.com>
9 */
11 #include "qemu/osdep.h"
13 #include "hw/hw.h"
14 #include "hw/9pfs/9p.h"
15 #include "hw/xen/xen_backend.h"
16 #include "hw/9pfs/xen-9pfs.h"
17 #include "qemu/config-file.h"
18 #include "qemu/option.h"
19 #include "fsdev/qemu-fsdev.h"
21 #define VERSIONS "1"
22 #define MAX_RINGS 8
23 #define MAX_RING_ORDER 8
25 typedef struct Xen9pfsRing {
26 struct Xen9pfsDev *priv;
28 int ref;
29 xenevtchn_handle *evtchndev;
30 int evtchn;
31 int local_port;
32 int ring_order;
33 struct xen_9pfs_data_intf *intf;
34 unsigned char *data;
35 struct xen_9pfs_data ring;
37 struct iovec *sg;
38 QEMUBH *bh;
40 /* local copies, so that we can read/write PDU data directly from
41 * the ring */
42 RING_IDX out_cons, out_size, in_cons;
43 bool inprogress;
44 } Xen9pfsRing;
46 typedef struct Xen9pfsDev {
47 struct XenDevice xendev; /* must be first */
48 V9fsState state;
49 char *path;
50 char *security_model;
51 char *tag;
52 char *id;
54 int num_rings;
55 Xen9pfsRing *rings;
56 } Xen9pfsDev;
58 static void xen_9pfs_disconnect(struct XenDevice *xendev);
60 static void xen_9pfs_in_sg(Xen9pfsRing *ring,
61 struct iovec *in_sg,
62 int *num,
63 uint32_t idx,
64 uint32_t size)
66 RING_IDX cons, prod, masked_prod, masked_cons;
68 cons = ring->intf->in_cons;
69 prod = ring->intf->in_prod;
70 xen_rmb();
71 masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order));
72 masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order));
74 if (masked_prod < masked_cons) {
75 in_sg[0].iov_base = ring->ring.in + masked_prod;
76 in_sg[0].iov_len = masked_cons - masked_prod;
77 *num = 1;
78 } else {
79 in_sg[0].iov_base = ring->ring.in + masked_prod;
80 in_sg[0].iov_len = XEN_FLEX_RING_SIZE(ring->ring_order) - masked_prod;
81 in_sg[1].iov_base = ring->ring.in;
82 in_sg[1].iov_len = masked_cons;
83 *num = 2;
87 static void xen_9pfs_out_sg(Xen9pfsRing *ring,
88 struct iovec *out_sg,
89 int *num,
90 uint32_t idx)
92 RING_IDX cons, prod, masked_prod, masked_cons;
94 cons = ring->intf->out_cons;
95 prod = ring->intf->out_prod;
96 xen_rmb();
97 masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order));
98 masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order));
100 if (masked_cons < masked_prod) {
101 out_sg[0].iov_base = ring->ring.out + masked_cons;
102 out_sg[0].iov_len = ring->out_size;
103 *num = 1;
104 } else {
105 if (ring->out_size >
106 (XEN_FLEX_RING_SIZE(ring->ring_order) - masked_cons)) {
107 out_sg[0].iov_base = ring->ring.out + masked_cons;
108 out_sg[0].iov_len = XEN_FLEX_RING_SIZE(ring->ring_order) -
109 masked_cons;
110 out_sg[1].iov_base = ring->ring.out;
111 out_sg[1].iov_len = ring->out_size -
112 (XEN_FLEX_RING_SIZE(ring->ring_order) -
113 masked_cons);
114 *num = 2;
115 } else {
116 out_sg[0].iov_base = ring->ring.out + masked_cons;
117 out_sg[0].iov_len = ring->out_size;
118 *num = 1;
123 static ssize_t xen_9pfs_pdu_vmarshal(V9fsPDU *pdu,
124 size_t offset,
125 const char *fmt,
126 va_list ap)
128 Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
129 struct iovec in_sg[2];
130 int num;
131 ssize_t ret;
133 xen_9pfs_in_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings],
134 in_sg, &num, pdu->idx, ROUND_UP(offset + 128, 512));
136 ret = v9fs_iov_vmarshal(in_sg, num, offset, 0, fmt, ap);
137 if (ret < 0) {
138 xen_pv_printf(&xen_9pfs->xendev, 0,
139 "Failed to encode VirtFS request type %d\n", pdu->id + 1);
140 xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
141 xen_9pfs_disconnect(&xen_9pfs->xendev);
143 return ret;
146 static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu,
147 size_t offset,
148 const char *fmt,
149 va_list ap)
151 Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
152 struct iovec out_sg[2];
153 int num;
154 ssize_t ret;
156 xen_9pfs_out_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings],
157 out_sg, &num, pdu->idx);
159 ret = v9fs_iov_vunmarshal(out_sg, num, offset, 0, fmt, ap);
160 if (ret < 0) {
161 xen_pv_printf(&xen_9pfs->xendev, 0,
162 "Failed to decode VirtFS request type %d\n", pdu->id);
163 xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
164 xen_9pfs_disconnect(&xen_9pfs->xendev);
166 return ret;
169 static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu,
170 struct iovec **piov,
171 unsigned int *pniov,
172 size_t size)
174 Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
175 Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
176 int num;
178 g_free(ring->sg);
180 ring->sg = g_malloc0(sizeof(*ring->sg) * 2);
181 xen_9pfs_out_sg(ring, ring->sg, &num, pdu->idx);
182 *piov = ring->sg;
183 *pniov = num;
186 static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
187 struct iovec **piov,
188 unsigned int *pniov,
189 size_t size)
191 Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
192 Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
193 int num;
194 size_t buf_size;
196 g_free(ring->sg);
198 ring->sg = g_malloc0(sizeof(*ring->sg) * 2);
199 xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, size);
201 buf_size = iov_size(ring->sg, num);
202 if (buf_size < size) {
203 xen_pv_printf(&xen_9pfs->xendev, 0, "Xen 9pfs request type %d"
204 "needs %zu bytes, buffer has %zu\n", pdu->id, size,
205 buf_size);
206 xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
207 xen_9pfs_disconnect(&xen_9pfs->xendev);
210 *piov = ring->sg;
211 *pniov = num;
214 static void xen_9pfs_push_and_notify(V9fsPDU *pdu)
216 RING_IDX prod;
217 Xen9pfsDev *priv = container_of(pdu->s, Xen9pfsDev, state);
218 Xen9pfsRing *ring = &priv->rings[pdu->tag % priv->num_rings];
220 g_free(ring->sg);
221 ring->sg = NULL;
223 ring->intf->out_cons = ring->out_cons;
224 xen_wmb();
226 prod = ring->intf->in_prod;
227 xen_rmb();
228 ring->intf->in_prod = prod + pdu->size;
229 xen_wmb();
231 ring->inprogress = false;
232 xenevtchn_notify(ring->evtchndev, ring->local_port);
234 qemu_bh_schedule(ring->bh);
237 static const V9fsTransport xen_9p_transport = {
238 .pdu_vmarshal = xen_9pfs_pdu_vmarshal,
239 .pdu_vunmarshal = xen_9pfs_pdu_vunmarshal,
240 .init_in_iov_from_pdu = xen_9pfs_init_in_iov_from_pdu,
241 .init_out_iov_from_pdu = xen_9pfs_init_out_iov_from_pdu,
242 .push_and_notify = xen_9pfs_push_and_notify,
245 static int xen_9pfs_init(struct XenDevice *xendev)
247 return 0;
250 static int xen_9pfs_receive(Xen9pfsRing *ring)
252 P9MsgHeader h;
253 RING_IDX cons, prod, masked_prod, masked_cons, queued;
254 V9fsPDU *pdu;
256 if (ring->inprogress) {
257 return 0;
260 cons = ring->intf->out_cons;
261 prod = ring->intf->out_prod;
262 xen_rmb();
264 queued = xen_9pfs_queued(prod, cons, XEN_FLEX_RING_SIZE(ring->ring_order));
265 if (queued < sizeof(h)) {
266 return 0;
268 ring->inprogress = true;
270 masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order));
271 masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order));
273 xen_9pfs_read_packet((uint8_t *) &h, ring->ring.out, sizeof(h),
274 masked_prod, &masked_cons,
275 XEN_FLEX_RING_SIZE(ring->ring_order));
276 if (queued < le32_to_cpu(h.size_le)) {
277 return 0;
280 /* cannot fail, because we only handle one request per ring at a time */
281 pdu = pdu_alloc(&ring->priv->state);
282 ring->out_size = le32_to_cpu(h.size_le);
283 ring->out_cons = cons + le32_to_cpu(h.size_le);
285 pdu_submit(pdu, &h);
287 return 0;
290 static void xen_9pfs_bh(void *opaque)
292 Xen9pfsRing *ring = opaque;
293 xen_9pfs_receive(ring);
296 static void xen_9pfs_evtchn_event(void *opaque)
298 Xen9pfsRing *ring = opaque;
299 evtchn_port_t port;
301 port = xenevtchn_pending(ring->evtchndev);
302 xenevtchn_unmask(ring->evtchndev, port);
304 qemu_bh_schedule(ring->bh);
307 static void xen_9pfs_disconnect(struct XenDevice *xendev)
309 Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
310 int i;
312 for (i = 0; i < xen_9pdev->num_rings; i++) {
313 if (xen_9pdev->rings[i].evtchndev != NULL) {
314 qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev),
315 NULL, NULL, NULL);
316 xenevtchn_unbind(xen_9pdev->rings[i].evtchndev,
317 xen_9pdev->rings[i].local_port);
318 xen_9pdev->rings[i].evtchndev = NULL;
323 static int xen_9pfs_free(struct XenDevice *xendev)
325 Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
326 int i;
328 if (xen_9pdev->rings[0].evtchndev != NULL) {
329 xen_9pfs_disconnect(xendev);
332 for (i = 0; i < xen_9pdev->num_rings; i++) {
333 if (xen_9pdev->rings[i].data != NULL) {
334 xengnttab_unmap(xen_9pdev->xendev.gnttabdev,
335 xen_9pdev->rings[i].data,
336 (1 << xen_9pdev->rings[i].ring_order));
338 if (xen_9pdev->rings[i].intf != NULL) {
339 xengnttab_unmap(xen_9pdev->xendev.gnttabdev,
340 xen_9pdev->rings[i].intf,
343 if (xen_9pdev->rings[i].bh != NULL) {
344 qemu_bh_delete(xen_9pdev->rings[i].bh);
348 g_free(xen_9pdev->id);
349 g_free(xen_9pdev->tag);
350 g_free(xen_9pdev->path);
351 g_free(xen_9pdev->security_model);
352 g_free(xen_9pdev->rings);
353 return 0;
356 static int xen_9pfs_connect(struct XenDevice *xendev)
358 int i;
359 Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
360 V9fsState *s = &xen_9pdev->state;
361 QemuOpts *fsdev;
363 if (xenstore_read_fe_int(&xen_9pdev->xendev, "num-rings",
364 &xen_9pdev->num_rings) == -1 ||
365 xen_9pdev->num_rings > MAX_RINGS || xen_9pdev->num_rings < 1) {
366 return -1;
369 xen_9pdev->rings = g_malloc0(xen_9pdev->num_rings * sizeof(Xen9pfsRing));
370 for (i = 0; i < xen_9pdev->num_rings; i++) {
371 char *str;
372 int ring_order;
374 xen_9pdev->rings[i].priv = xen_9pdev;
375 xen_9pdev->rings[i].evtchn = -1;
376 xen_9pdev->rings[i].local_port = -1;
378 str = g_strdup_printf("ring-ref%u", i);
379 if (xenstore_read_fe_int(&xen_9pdev->xendev, str,
380 &xen_9pdev->rings[i].ref) == -1) {
381 g_free(str);
382 goto out;
384 g_free(str);
385 str = g_strdup_printf("event-channel-%u", i);
386 if (xenstore_read_fe_int(&xen_9pdev->xendev, str,
387 &xen_9pdev->rings[i].evtchn) == -1) {
388 g_free(str);
389 goto out;
391 g_free(str);
393 xen_9pdev->rings[i].intf = xengnttab_map_grant_ref(
394 xen_9pdev->xendev.gnttabdev,
395 xen_9pdev->xendev.dom,
396 xen_9pdev->rings[i].ref,
397 PROT_READ | PROT_WRITE);
398 if (!xen_9pdev->rings[i].intf) {
399 goto out;
401 ring_order = xen_9pdev->rings[i].intf->ring_order;
402 if (ring_order > MAX_RING_ORDER) {
403 goto out;
405 xen_9pdev->rings[i].ring_order = ring_order;
406 xen_9pdev->rings[i].data = xengnttab_map_domain_grant_refs(
407 xen_9pdev->xendev.gnttabdev,
408 (1 << ring_order),
409 xen_9pdev->xendev.dom,
410 xen_9pdev->rings[i].intf->ref,
411 PROT_READ | PROT_WRITE);
412 if (!xen_9pdev->rings[i].data) {
413 goto out;
415 xen_9pdev->rings[i].ring.in = xen_9pdev->rings[i].data;
416 xen_9pdev->rings[i].ring.out = xen_9pdev->rings[i].data +
417 XEN_FLEX_RING_SIZE(ring_order);
419 xen_9pdev->rings[i].bh = qemu_bh_new(xen_9pfs_bh, &xen_9pdev->rings[i]);
420 xen_9pdev->rings[i].out_cons = 0;
421 xen_9pdev->rings[i].out_size = 0;
422 xen_9pdev->rings[i].inprogress = false;
425 xen_9pdev->rings[i].evtchndev = xenevtchn_open(NULL, 0);
426 if (xen_9pdev->rings[i].evtchndev == NULL) {
427 goto out;
429 qemu_set_cloexec(xenevtchn_fd(xen_9pdev->rings[i].evtchndev));
430 xen_9pdev->rings[i].local_port = xenevtchn_bind_interdomain
431 (xen_9pdev->rings[i].evtchndev,
432 xendev->dom,
433 xen_9pdev->rings[i].evtchn);
434 if (xen_9pdev->rings[i].local_port == -1) {
435 xen_pv_printf(xendev, 0,
436 "xenevtchn_bind_interdomain failed port=%d\n",
437 xen_9pdev->rings[i].evtchn);
438 goto out;
440 xen_pv_printf(xendev, 2, "bind evtchn port %d\n", xendev->local_port);
441 qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev),
442 xen_9pfs_evtchn_event, NULL, &xen_9pdev->rings[i]);
445 xen_9pdev->security_model = xenstore_read_be_str(xendev, "security_model");
446 xen_9pdev->path = xenstore_read_be_str(xendev, "path");
447 xen_9pdev->id = s->fsconf.fsdev_id =
448 g_strdup_printf("xen9p%d", xendev->dev);
449 xen_9pdev->tag = s->fsconf.tag = xenstore_read_fe_str(xendev, "tag");
450 fsdev = qemu_opts_create(qemu_find_opts("fsdev"),
451 s->fsconf.tag,
452 1, NULL);
453 qemu_opt_set(fsdev, "fsdriver", "local", NULL);
454 qemu_opt_set(fsdev, "path", xen_9pdev->path, NULL);
455 qemu_opt_set(fsdev, "security_model", xen_9pdev->security_model, NULL);
456 qemu_opts_set_id(fsdev, s->fsconf.fsdev_id);
457 qemu_fsdev_add(fsdev);
458 v9fs_device_realize_common(s, &xen_9p_transport, NULL);
460 return 0;
462 out:
463 xen_9pfs_free(xendev);
464 return -1;
467 static void xen_9pfs_alloc(struct XenDevice *xendev)
469 xenstore_write_be_str(xendev, "versions", VERSIONS);
470 xenstore_write_be_int(xendev, "max-rings", MAX_RINGS);
471 xenstore_write_be_int(xendev, "max-ring-page-order", MAX_RING_ORDER);
474 struct XenDevOps xen_9pfs_ops = {
475 .size = sizeof(Xen9pfsDev),
476 .flags = DEVOPS_FLAG_NEED_GNTDEV,
477 .alloc = xen_9pfs_alloc,
478 .init = xen_9pfs_init,
479 .initialise = xen_9pfs_connect,
480 .disconnect = xen_9pfs_disconnect,
481 .free = xen_9pfs_free,