Update version for v2.9.0-rc0 release
[qemu/ar7.git] / include / hw / xen / xen_common.h
blobdce76ee16225ae633c748aaffa99adda5f035249
1 #ifndef QEMU_HW_XEN_COMMON_H
2 #define QEMU_HW_XEN_COMMON_H
4 /*
5 * If we have new enough libxenctrl then we do not want/need these compat
6 * interfaces, despite what the user supplied cflags might say. They
7 * must be undefined before including xenctrl.h
8 */
9 #undef XC_WANT_COMPAT_EVTCHN_API
10 #undef XC_WANT_COMPAT_GNTTAB_API
11 #undef XC_WANT_COMPAT_MAP_FOREIGN_API
13 #include <xenctrl.h>
14 #include <xenstore.h>
15 #include <xen/io/xenbus.h>
17 #include "hw/hw.h"
18 #include "hw/xen/xen.h"
19 #include "hw/pci/pci.h"
20 #include "qemu/queue.h"
21 #include "hw/xen/trace.h"
24 * We don't support Xen prior to 4.2.0.
27 /* Xen 4.2 through 4.6 */
28 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 471
30 typedef xc_interface xenforeignmemory_handle;
31 typedef xc_evtchn xenevtchn_handle;
32 typedef xc_gnttab xengnttab_handle;
34 #define xenevtchn_open(l, f) xc_evtchn_open(l, f);
35 #define xenevtchn_close(h) xc_evtchn_close(h)
36 #define xenevtchn_fd(h) xc_evtchn_fd(h)
37 #define xenevtchn_pending(h) xc_evtchn_pending(h)
38 #define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
39 #define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
40 #define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
41 #define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
43 #define xengnttab_open(l, f) xc_gnttab_open(l, f)
44 #define xengnttab_close(h) xc_gnttab_close(h)
45 #define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
46 #define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
47 #define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
48 #define xengnttab_map_grant_refs(h, c, d, r, p) \
49 xc_gnttab_map_grant_refs(h, c, d, r, p)
50 #define xengnttab_map_domain_grant_refs(h, c, d, r, p) \
51 xc_gnttab_map_domain_grant_refs(h, c, d, r, p)
53 #define xenforeignmemory_open(l, f) xen_xc
55 static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom,
56 int prot, size_t pages,
57 const xen_pfn_t arr[/*pages*/],
58 int err[/*pages*/])
60 if (err)
61 return xc_map_foreign_bulk(h, dom, prot, arr, err, pages);
62 else
63 return xc_map_foreign_pages(h, dom, prot, arr, pages);
66 #define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE)
68 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 471 */
70 #include <xenevtchn.h>
71 #include <xengnttab.h>
72 #include <xenforeignmemory.h>
74 #endif
76 void destroy_hvm_domain(bool reboot);
78 /* shutdown/destroy current domain because of an error */
79 void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
81 #ifdef HVM_PARAM_VMPORT_REGS_PFN
82 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
83 xen_pfn_t *vmport_regs_pfn)
85 int rc;
86 uint64_t value;
87 rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value);
88 if (rc >= 0) {
89 *vmport_regs_pfn = (xen_pfn_t) value;
91 return rc;
93 #else
94 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
95 xen_pfn_t *vmport_regs_pfn)
97 return -ENOSYS;
99 #endif
101 /* Xen before 4.6 */
102 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460
104 #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
105 #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
106 #endif
108 #endif
110 static inline int xen_get_default_ioreq_server_info(xc_interface *xc,
111 domid_t dom,
112 xen_pfn_t *ioreq_pfn,
113 xen_pfn_t *bufioreq_pfn,
114 evtchn_port_t
115 *bufioreq_evtchn)
117 unsigned long param;
118 int rc;
120 rc = xc_get_hvm_param(xc, dom, HVM_PARAM_IOREQ_PFN, &param);
121 if (rc < 0) {
122 fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
123 return -1;
126 *ioreq_pfn = param;
128 rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
129 if (rc < 0) {
130 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
131 return -1;
134 *bufioreq_pfn = param;
136 rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
137 &param);
138 if (rc < 0) {
139 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
140 return -1;
143 *bufioreq_evtchn = param;
145 return 0;
148 /* Xen before 4.5 */
149 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 450
151 #ifndef HVM_PARAM_BUFIOREQ_EVTCHN
152 #define HVM_PARAM_BUFIOREQ_EVTCHN 26
153 #endif
155 #define IOREQ_TYPE_PCI_CONFIG 2
157 typedef uint16_t ioservid_t;
159 static inline void xen_map_memory_section(xc_interface *xc, domid_t dom,
160 ioservid_t ioservid,
161 MemoryRegionSection *section)
165 static inline void xen_unmap_memory_section(xc_interface *xc, domid_t dom,
166 ioservid_t ioservid,
167 MemoryRegionSection *section)
171 static inline void xen_map_io_section(xc_interface *xc, domid_t dom,
172 ioservid_t ioservid,
173 MemoryRegionSection *section)
177 static inline void xen_unmap_io_section(xc_interface *xc, domid_t dom,
178 ioservid_t ioservid,
179 MemoryRegionSection *section)
183 static inline void xen_map_pcidev(xc_interface *xc, domid_t dom,
184 ioservid_t ioservid,
185 PCIDevice *pci_dev)
189 static inline void xen_unmap_pcidev(xc_interface *xc, domid_t dom,
190 ioservid_t ioservid,
191 PCIDevice *pci_dev)
195 static inline void xen_create_ioreq_server(xc_interface *xc, domid_t dom,
196 ioservid_t *ioservid)
200 static inline void xen_destroy_ioreq_server(xc_interface *xc, domid_t dom,
201 ioservid_t ioservid)
205 static inline int xen_get_ioreq_server_info(xc_interface *xc, domid_t dom,
206 ioservid_t ioservid,
207 xen_pfn_t *ioreq_pfn,
208 xen_pfn_t *bufioreq_pfn,
209 evtchn_port_t *bufioreq_evtchn)
211 return xen_get_default_ioreq_server_info(xc, dom, ioreq_pfn, bufioreq_pfn,
212 bufioreq_evtchn);
215 static inline int xen_set_ioreq_server_state(xc_interface *xc, domid_t dom,
216 ioservid_t ioservid,
217 bool enable)
219 return 0;
222 /* Xen 4.5 */
223 #else
225 static bool use_default_ioreq_server;
227 static inline void xen_map_memory_section(xc_interface *xc, domid_t dom,
228 ioservid_t ioservid,
229 MemoryRegionSection *section)
231 hwaddr start_addr = section->offset_within_address_space;
232 ram_addr_t size = int128_get64(section->size);
233 hwaddr end_addr = start_addr + size - 1;
235 if (use_default_ioreq_server) {
236 return;
239 trace_xen_map_mmio_range(ioservid, start_addr, end_addr);
240 xc_hvm_map_io_range_to_ioreq_server(xc, dom, ioservid, 1,
241 start_addr, end_addr);
244 static inline void xen_unmap_memory_section(xc_interface *xc, domid_t dom,
245 ioservid_t ioservid,
246 MemoryRegionSection *section)
248 hwaddr start_addr = section->offset_within_address_space;
249 ram_addr_t size = int128_get64(section->size);
250 hwaddr end_addr = start_addr + size - 1;
252 if (use_default_ioreq_server) {
253 return;
257 trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr);
258 xc_hvm_unmap_io_range_from_ioreq_server(xc, dom, ioservid, 1,
259 start_addr, end_addr);
262 static inline void xen_map_io_section(xc_interface *xc, domid_t dom,
263 ioservid_t ioservid,
264 MemoryRegionSection *section)
266 hwaddr start_addr = section->offset_within_address_space;
267 ram_addr_t size = int128_get64(section->size);
268 hwaddr end_addr = start_addr + size - 1;
270 if (use_default_ioreq_server) {
271 return;
275 trace_xen_map_portio_range(ioservid, start_addr, end_addr);
276 xc_hvm_map_io_range_to_ioreq_server(xc, dom, ioservid, 0,
277 start_addr, end_addr);
280 static inline void xen_unmap_io_section(xc_interface *xc, domid_t dom,
281 ioservid_t ioservid,
282 MemoryRegionSection *section)
284 hwaddr start_addr = section->offset_within_address_space;
285 ram_addr_t size = int128_get64(section->size);
286 hwaddr end_addr = start_addr + size - 1;
288 if (use_default_ioreq_server) {
289 return;
292 trace_xen_unmap_portio_range(ioservid, start_addr, end_addr);
293 xc_hvm_unmap_io_range_from_ioreq_server(xc, dom, ioservid, 0,
294 start_addr, end_addr);
297 static inline void xen_map_pcidev(xc_interface *xc, domid_t dom,
298 ioservid_t ioservid,
299 PCIDevice *pci_dev)
301 if (use_default_ioreq_server) {
302 return;
305 trace_xen_map_pcidev(ioservid, pci_bus_num(pci_dev->bus),
306 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
307 xc_hvm_map_pcidev_to_ioreq_server(xc, dom, ioservid,
308 0, pci_bus_num(pci_dev->bus),
309 PCI_SLOT(pci_dev->devfn),
310 PCI_FUNC(pci_dev->devfn));
313 static inline void xen_unmap_pcidev(xc_interface *xc, domid_t dom,
314 ioservid_t ioservid,
315 PCIDevice *pci_dev)
317 if (use_default_ioreq_server) {
318 return;
321 trace_xen_unmap_pcidev(ioservid, pci_bus_num(pci_dev->bus),
322 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
323 xc_hvm_unmap_pcidev_from_ioreq_server(xc, dom, ioservid,
324 0, pci_bus_num(pci_dev->bus),
325 PCI_SLOT(pci_dev->devfn),
326 PCI_FUNC(pci_dev->devfn));
329 static inline void xen_create_ioreq_server(xc_interface *xc, domid_t dom,
330 ioservid_t *ioservid)
332 int rc = xc_hvm_create_ioreq_server(xc, dom, HVM_IOREQSRV_BUFIOREQ_ATOMIC,
333 ioservid);
335 if (rc == 0) {
336 trace_xen_ioreq_server_create(*ioservid);
337 return;
340 *ioservid = 0;
341 use_default_ioreq_server = true;
342 trace_xen_default_ioreq_server();
345 static inline void xen_destroy_ioreq_server(xc_interface *xc, domid_t dom,
346 ioservid_t ioservid)
348 if (use_default_ioreq_server) {
349 return;
352 trace_xen_ioreq_server_destroy(ioservid);
353 xc_hvm_destroy_ioreq_server(xc, dom, ioservid);
356 static inline int xen_get_ioreq_server_info(xc_interface *xc, domid_t dom,
357 ioservid_t ioservid,
358 xen_pfn_t *ioreq_pfn,
359 xen_pfn_t *bufioreq_pfn,
360 evtchn_port_t *bufioreq_evtchn)
362 if (use_default_ioreq_server) {
363 return xen_get_default_ioreq_server_info(xc, dom, ioreq_pfn,
364 bufioreq_pfn,
365 bufioreq_evtchn);
368 return xc_hvm_get_ioreq_server_info(xc, dom, ioservid,
369 ioreq_pfn, bufioreq_pfn,
370 bufioreq_evtchn);
373 static inline int xen_set_ioreq_server_state(xc_interface *xc, domid_t dom,
374 ioservid_t ioservid,
375 bool enable)
377 if (use_default_ioreq_server) {
378 return 0;
381 trace_xen_ioreq_server_state(ioservid, enable);
382 return xc_hvm_set_ioreq_server_state(xc, dom, ioservid, enable);
385 #endif
387 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460
388 static inline int xen_xc_domain_add_to_physmap(xc_interface *xch, uint32_t domid,
389 unsigned int space,
390 unsigned long idx,
391 xen_pfn_t gpfn)
393 return xc_domain_add_to_physmap(xch, domid, space, idx, gpfn);
395 #else
396 static inline int xen_xc_domain_add_to_physmap(xc_interface *xch, uint32_t domid,
397 unsigned int space,
398 unsigned long idx,
399 xen_pfn_t gpfn)
401 /* In Xen 4.6 rc is -1 and errno contains the error value. */
402 int rc = xc_domain_add_to_physmap(xch, domid, space, idx, gpfn);
403 if (rc == -1)
404 return errno;
405 return rc;
407 #endif
409 #ifdef CONFIG_XEN_PV_DOMAIN_BUILD
410 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 470
411 static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref,
412 xen_domain_handle_t handle, uint32_t flags,
413 uint32_t *pdomid)
415 return xc_domain_create(xc, ssidref, handle, flags, pdomid);
417 #else
418 static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref,
419 xen_domain_handle_t handle, uint32_t flags,
420 uint32_t *pdomid)
422 return xc_domain_create(xc, ssidref, handle, flags, pdomid, NULL);
424 #endif
425 #endif
427 /* Xen before 4.8 */
429 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 480
432 typedef void *xengnttab_grant_copy_segment_t;
434 static inline int xengnttab_grant_copy(xengnttab_handle *xgt, uint32_t count,
435 xengnttab_grant_copy_segment_t *segs)
437 return -ENOSYS;
439 #endif
441 #endif /* QEMU_HW_XEN_COMMON_H */