block/export: only acquire AioContext once for vhost_user_server_stop()
[qemu/kevin.git] / hw / xen / xen-operations.c
blob4b78fbf4bdf89a34a180891f700abfbfa0426aac
1 /*
2 * QEMU Xen backend support: Operations for true Xen
4 * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
6 * Authors: David Woodhouse <dwmw2@infradead.org>
8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
9 * See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
13 #include "qemu/uuid.h"
14 #include "qapi/error.h"
16 #include "hw/xen/xen_native.h"
17 #include "hw/xen/xen_backend_ops.h"
20 * If we have new enough libxenctrl then we do not want/need these compat
21 * interfaces, despite what the user supplied cflags might say. They
22 * must be undefined before including xenctrl.h
24 #undef XC_WANT_COMPAT_EVTCHN_API
25 #undef XC_WANT_COMPAT_GNTTAB_API
26 #undef XC_WANT_COMPAT_MAP_FOREIGN_API
28 #include <xenctrl.h>
31 * We don't support Xen prior to 4.2.0.
34 /* Xen 4.2 through 4.6 */
35 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701
37 typedef xc_evtchn xenevtchn_handle;
38 typedef evtchn_port_or_error_t xenevtchn_port_or_error_t;
40 #define xenevtchn_open(l, f) xc_evtchn_open(l, f);
41 #define xenevtchn_close(h) xc_evtchn_close(h)
42 #define xenevtchn_fd(h) xc_evtchn_fd(h)
43 #define xenevtchn_pending(h) xc_evtchn_pending(h)
44 #define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
45 #define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
46 #define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
47 #define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
49 typedef xc_gnttab xengnttab_handle;
51 #define xengnttab_open(l, f) xc_gnttab_open(l, f)
52 #define xengnttab_close(h) xc_gnttab_close(h)
53 #define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
54 #define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
55 #define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
56 #define xengnttab_map_grant_refs(h, c, d, r, p) \
57 xc_gnttab_map_grant_refs(h, c, d, r, p)
58 #define xengnttab_map_domain_grant_refs(h, c, d, r, p) \
59 xc_gnttab_map_domain_grant_refs(h, c, d, r, p)
61 typedef xc_interface xenforeignmemory_handle;
63 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */
65 #include <xenevtchn.h>
66 #include <xengnttab.h>
67 #include <xenforeignmemory.h>
69 #endif
71 /* Xen before 4.8 */
73 static int libxengnttab_fallback_grant_copy(xengnttab_handle *xgt,
74 bool to_domain, uint32_t domid,
75 XenGrantCopySegment segs[],
76 unsigned int nr_segs, Error **errp)
78 uint32_t *refs = g_new(uint32_t, nr_segs);
79 int prot = to_domain ? PROT_WRITE : PROT_READ;
80 void *map;
81 unsigned int i;
82 int rc = 0;
84 for (i = 0; i < nr_segs; i++) {
85 XenGrantCopySegment *seg = &segs[i];
87 refs[i] = to_domain ? seg->dest.foreign.ref :
88 seg->source.foreign.ref;
90 map = xengnttab_map_domain_grant_refs(xgt, nr_segs, domid, refs, prot);
91 if (!map) {
92 if (errp) {
93 error_setg_errno(errp, errno,
94 "xengnttab_map_domain_grant_refs failed");
96 rc = -errno;
97 goto done;
100 for (i = 0; i < nr_segs; i++) {
101 XenGrantCopySegment *seg = &segs[i];
102 void *page = map + (i * XEN_PAGE_SIZE);
104 if (to_domain) {
105 memcpy(page + seg->dest.foreign.offset, seg->source.virt,
106 seg->len);
107 } else {
108 memcpy(seg->dest.virt, page + seg->source.foreign.offset,
109 seg->len);
113 if (xengnttab_unmap(xgt, map, nr_segs)) {
114 if (errp) {
115 error_setg_errno(errp, errno, "xengnttab_unmap failed");
117 rc = -errno;
120 done:
121 g_free(refs);
122 return rc;
125 #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40800
127 static int libxengnttab_backend_grant_copy(xengnttab_handle *xgt,
128 bool to_domain, uint32_t domid,
129 XenGrantCopySegment *segs,
130 uint32_t nr_segs, Error **errp)
132 xengnttab_grant_copy_segment_t *xengnttab_segs;
133 unsigned int i;
134 int rc;
136 xengnttab_segs = g_new0(xengnttab_grant_copy_segment_t, nr_segs);
138 for (i = 0; i < nr_segs; i++) {
139 XenGrantCopySegment *seg = &segs[i];
140 xengnttab_grant_copy_segment_t *xengnttab_seg = &xengnttab_segs[i];
142 if (to_domain) {
143 xengnttab_seg->flags = GNTCOPY_dest_gref;
144 xengnttab_seg->dest.foreign.domid = domid;
145 xengnttab_seg->dest.foreign.ref = seg->dest.foreign.ref;
146 xengnttab_seg->dest.foreign.offset = seg->dest.foreign.offset;
147 xengnttab_seg->source.virt = seg->source.virt;
148 } else {
149 xengnttab_seg->flags = GNTCOPY_source_gref;
150 xengnttab_seg->source.foreign.domid = domid;
151 xengnttab_seg->source.foreign.ref = seg->source.foreign.ref;
152 xengnttab_seg->source.foreign.offset =
153 seg->source.foreign.offset;
154 xengnttab_seg->dest.virt = seg->dest.virt;
157 xengnttab_seg->len = seg->len;
160 if (xengnttab_grant_copy(xgt, nr_segs, xengnttab_segs)) {
161 if (errp) {
162 error_setg_errno(errp, errno, "xengnttab_grant_copy failed");
164 rc = -errno;
165 goto done;
168 rc = 0;
169 for (i = 0; i < nr_segs; i++) {
170 xengnttab_grant_copy_segment_t *xengnttab_seg = &xengnttab_segs[i];
172 if (xengnttab_seg->status != GNTST_okay) {
173 if (errp) {
174 error_setg(errp, "xengnttab_grant_copy seg[%u] failed", i);
176 rc = -EIO;
177 break;
181 done:
182 g_free(xengnttab_segs);
183 return rc;
185 #endif
187 static xenevtchn_handle *libxenevtchn_backend_open(void)
189 return xenevtchn_open(NULL, 0);
192 struct evtchn_backend_ops libxenevtchn_backend_ops = {
193 .open = libxenevtchn_backend_open,
194 .close = xenevtchn_close,
195 .bind_interdomain = xenevtchn_bind_interdomain,
196 .unbind = xenevtchn_unbind,
197 .get_fd = xenevtchn_fd,
198 .notify = xenevtchn_notify,
199 .unmask = xenevtchn_unmask,
200 .pending = xenevtchn_pending,
203 static xengnttab_handle *libxengnttab_backend_open(void)
205 return xengnttab_open(NULL, 0);
208 static int libxengnttab_backend_unmap(xengnttab_handle *xgt,
209 void *start_address, uint32_t *refs,
210 uint32_t count)
212 return xengnttab_unmap(xgt, start_address, count);
216 static struct gnttab_backend_ops libxengnttab_backend_ops = {
217 .features = XEN_GNTTAB_OP_FEATURE_MAP_MULTIPLE,
218 .open = libxengnttab_backend_open,
219 .close = xengnttab_close,
220 .grant_copy = libxengnttab_fallback_grant_copy,
221 .set_max_grants = xengnttab_set_max_grants,
222 .map_refs = xengnttab_map_domain_grant_refs,
223 .unmap = libxengnttab_backend_unmap,
226 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701
228 static void *libxenforeignmem_backend_map(uint32_t dom, void *addr, int prot,
229 size_t pages, xfn_pfn_t *pfns,
230 int *errs)
232 if (errs) {
233 return xc_map_foreign_bulk(xen_xc, dom, prot, pfns, errs, pages);
234 } else {
235 return xc_map_foreign_pages(xen_xc, dom, prot, pfns, pages);
239 static int libxenforeignmem_backend_unmap(void *addr, size_t pages)
241 return munmap(addr, pages * XC_PAGE_SIZE);
244 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */
246 static void *libxenforeignmem_backend_map(uint32_t dom, void *addr, int prot,
247 size_t pages, xen_pfn_t *pfns,
248 int *errs)
250 return xenforeignmemory_map2(xen_fmem, dom, addr, prot, 0, pages, pfns,
251 errs);
254 static int libxenforeignmem_backend_unmap(void *addr, size_t pages)
256 return xenforeignmemory_unmap(xen_fmem, addr, pages);
259 #endif
261 struct foreignmem_backend_ops libxenforeignmem_backend_ops = {
262 .map = libxenforeignmem_backend_map,
263 .unmap = libxenforeignmem_backend_unmap,
266 struct qemu_xs_handle {
267 struct xs_handle *xsh;
268 NotifierList notifiers;
271 static void watch_event(void *opaque)
273 struct qemu_xs_handle *h = opaque;
275 for (;;) {
276 char **v = xs_check_watch(h->xsh);
278 if (!v) {
279 break;
282 notifier_list_notify(&h->notifiers, v);
283 free(v);
287 static struct qemu_xs_handle *libxenstore_open(void)
289 struct xs_handle *xsh = xs_open(0);
290 struct qemu_xs_handle *h = g_new0(struct qemu_xs_handle, 1);
292 if (!xsh) {
293 return NULL;
296 h = g_new0(struct qemu_xs_handle, 1);
297 h->xsh = xsh;
299 notifier_list_init(&h->notifiers);
300 qemu_set_fd_handler(xs_fileno(h->xsh), watch_event, NULL, h);
302 return h;
305 static void libxenstore_close(struct qemu_xs_handle *h)
307 g_assert(notifier_list_empty(&h->notifiers));
308 qemu_set_fd_handler(xs_fileno(h->xsh), NULL, NULL, NULL);
309 xs_close(h->xsh);
310 g_free(h);
313 static char *libxenstore_get_domain_path(struct qemu_xs_handle *h,
314 unsigned int domid)
316 return xs_get_domain_path(h->xsh, domid);
319 static char **libxenstore_directory(struct qemu_xs_handle *h,
320 xs_transaction_t t, const char *path,
321 unsigned int *num)
323 return xs_directory(h->xsh, t, path, num);
326 static void *libxenstore_read(struct qemu_xs_handle *h, xs_transaction_t t,
327 const char *path, unsigned int *len)
329 return xs_read(h->xsh, t, path, len);
332 static bool libxenstore_write(struct qemu_xs_handle *h, xs_transaction_t t,
333 const char *path, const void *data,
334 unsigned int len)
336 return xs_write(h->xsh, t, path, data, len);
339 static bool libxenstore_create(struct qemu_xs_handle *h, xs_transaction_t t,
340 unsigned int owner, unsigned int domid,
341 unsigned int perms, const char *path)
343 struct xs_permissions perms_list[] = {
345 .id = owner,
346 .perms = XS_PERM_NONE,
349 .id = domid,
350 .perms = perms,
354 if (!xs_mkdir(h->xsh, t, path)) {
355 return false;
358 return xs_set_permissions(h->xsh, t, path, perms_list,
359 ARRAY_SIZE(perms_list));
362 static bool libxenstore_destroy(struct qemu_xs_handle *h, xs_transaction_t t,
363 const char *path)
365 return xs_rm(h->xsh, t, path);
368 struct qemu_xs_watch {
369 char *path;
370 char *token;
371 xs_watch_fn fn;
372 void *opaque;
373 Notifier notifier;
376 static void watch_notify(Notifier *n, void *data)
378 struct qemu_xs_watch *w = container_of(n, struct qemu_xs_watch, notifier);
379 const char **v = data;
381 if (!strcmp(w->token, v[XS_WATCH_TOKEN])) {
382 w->fn(w->opaque, v[XS_WATCH_PATH]);
386 static struct qemu_xs_watch *new_watch(const char *path, xs_watch_fn fn,
387 void *opaque)
389 struct qemu_xs_watch *w = g_new0(struct qemu_xs_watch, 1);
390 QemuUUID uuid;
392 qemu_uuid_generate(&uuid);
394 w->token = qemu_uuid_unparse_strdup(&uuid);
395 w->path = g_strdup(path);
396 w->fn = fn;
397 w->opaque = opaque;
398 w->notifier.notify = watch_notify;
400 return w;
403 static void free_watch(struct qemu_xs_watch *w)
405 g_free(w->token);
406 g_free(w->path);
408 g_free(w);
411 static struct qemu_xs_watch *libxenstore_watch(struct qemu_xs_handle *h,
412 const char *path, xs_watch_fn fn,
413 void *opaque)
415 struct qemu_xs_watch *w = new_watch(path, fn, opaque);
417 notifier_list_add(&h->notifiers, &w->notifier);
419 if (!xs_watch(h->xsh, path, w->token)) {
420 notifier_remove(&w->notifier);
421 free_watch(w);
422 return NULL;
425 return w;
428 static void libxenstore_unwatch(struct qemu_xs_handle *h,
429 struct qemu_xs_watch *w)
431 xs_unwatch(h->xsh, w->path, w->token);
432 notifier_remove(&w->notifier);
433 free_watch(w);
436 static xs_transaction_t libxenstore_transaction_start(struct qemu_xs_handle *h)
438 return xs_transaction_start(h->xsh);
441 static bool libxenstore_transaction_end(struct qemu_xs_handle *h,
442 xs_transaction_t t, bool abort)
444 return xs_transaction_end(h->xsh, t, abort);
447 struct xenstore_backend_ops libxenstore_backend_ops = {
448 .open = libxenstore_open,
449 .close = libxenstore_close,
450 .get_domain_path = libxenstore_get_domain_path,
451 .directory = libxenstore_directory,
452 .read = libxenstore_read,
453 .write = libxenstore_write,
454 .create = libxenstore_create,
455 .destroy = libxenstore_destroy,
456 .watch = libxenstore_watch,
457 .unwatch = libxenstore_unwatch,
458 .transaction_start = libxenstore_transaction_start,
459 .transaction_end = libxenstore_transaction_end,
462 void setup_xen_backend_ops(void)
464 #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40800
465 xengnttab_handle *xgt = xengnttab_open(NULL, 0);
467 if (xgt) {
468 if (xengnttab_grant_copy(xgt, 0, NULL) == 0) {
469 libxengnttab_backend_ops.grant_copy = libxengnttab_backend_grant_copy;
471 xengnttab_close(xgt);
473 #endif
474 xen_evtchn_ops = &libxenevtchn_backend_ops;
475 xen_gnttab_ops = &libxengnttab_backend_ops;
476 xen_foreignmem_ops = &libxenforeignmem_backend_ops;
477 xen_xenstore_ops = &libxenstore_backend_ops;