tests: Fixes test-io-channel-file by mask only owner file state mask bits
[qemu/ar7.git] / include / hw / xen / xen_common.h
blob82e56339dd7ef3448b7c0d24b46ab9189c9f1a5c
1 #ifndef QEMU_HW_XEN_COMMON_H
2 #define QEMU_HW_XEN_COMMON_H
4 /*
5 * If we have new enough libxenctrl then we do not want/need these compat
6 * interfaces, despite what the user supplied cflags might say. They
7 * must be undefined before including xenctrl.h
8 */
9 #undef XC_WANT_COMPAT_EVTCHN_API
10 #undef XC_WANT_COMPAT_GNTTAB_API
11 #undef XC_WANT_COMPAT_MAP_FOREIGN_API
13 #include <xenctrl.h>
14 #include <xenstore.h>
15 #include "hw/xen/interface/io/xenbus.h"
17 #include "hw/xen/xen.h"
18 #include "hw/pci/pci.h"
19 #include "hw/xen/trace.h"
21 extern xc_interface *xen_xc;
24 * We don't support Xen prior to 4.2.0.
27 /* Xen 4.2 through 4.6 */
28 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701
30 typedef xc_interface xenforeignmemory_handle;
31 typedef xc_evtchn xenevtchn_handle;
32 typedef xc_gnttab xengnttab_handle;
33 typedef evtchn_port_or_error_t xenevtchn_port_or_error_t;
35 #define xenevtchn_open(l, f) xc_evtchn_open(l, f);
36 #define xenevtchn_close(h) xc_evtchn_close(h)
37 #define xenevtchn_fd(h) xc_evtchn_fd(h)
38 #define xenevtchn_pending(h) xc_evtchn_pending(h)
39 #define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
40 #define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
41 #define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
42 #define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
44 #define xengnttab_open(l, f) xc_gnttab_open(l, f)
45 #define xengnttab_close(h) xc_gnttab_close(h)
46 #define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
47 #define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
48 #define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
49 #define xengnttab_map_grant_refs(h, c, d, r, p) \
50 xc_gnttab_map_grant_refs(h, c, d, r, p)
51 #define xengnttab_map_domain_grant_refs(h, c, d, r, p) \
52 xc_gnttab_map_domain_grant_refs(h, c, d, r, p)
54 #define xenforeignmemory_open(l, f) xen_xc
55 #define xenforeignmemory_close(h)
57 static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom,
58 int prot, size_t pages,
59 const xen_pfn_t arr[/*pages*/],
60 int err[/*pages*/])
62 if (err)
63 return xc_map_foreign_bulk(h, dom, prot, arr, err, pages);
64 else
65 return xc_map_foreign_pages(h, dom, prot, arr, pages);
68 #define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE)
70 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */
72 #include <xenevtchn.h>
73 #include <xengnttab.h>
74 #include <xenforeignmemory.h>
76 #endif
78 extern xenforeignmemory_handle *xen_fmem;
80 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
82 typedef xc_interface xendevicemodel_handle;
84 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40900 */
86 #undef XC_WANT_COMPAT_DEVICEMODEL_API
87 #include <xendevicemodel.h>
89 #endif
91 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100
93 static inline int xendevicemodel_relocate_memory(
94 xendevicemodel_handle *dmod, domid_t domid, uint32_t size, uint64_t src_gfn,
95 uint64_t dst_gfn)
97 uint32_t i;
98 int rc;
100 for (i = 0; i < size; i++) {
101 unsigned long idx = src_gfn + i;
102 xen_pfn_t gpfn = dst_gfn + i;
104 rc = xc_domain_add_to_physmap(xen_xc, domid, XENMAPSPACE_gmfn, idx,
105 gpfn);
106 if (rc) {
107 return rc;
111 return 0;
114 static inline int xendevicemodel_pin_memory_cacheattr(
115 xendevicemodel_handle *dmod, domid_t domid, uint64_t start, uint64_t end,
116 uint32_t type)
118 return xc_domain_pin_memory_cacheattr(xen_xc, domid, start, end, type);
121 typedef void xenforeignmemory_resource_handle;
123 #define XENMEM_resource_ioreq_server 0
125 #define XENMEM_resource_ioreq_server_frame_bufioreq 0
126 #define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n))
128 static inline xenforeignmemory_resource_handle *xenforeignmemory_map_resource(
129 xenforeignmemory_handle *fmem, domid_t domid, unsigned int type,
130 unsigned int id, unsigned long frame, unsigned long nr_frames,
131 void **paddr, int prot, int flags)
133 errno = EOPNOTSUPP;
134 return NULL;
137 #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100 */
139 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41000
141 #define XEN_COMPAT_PHYSMAP
142 static inline void *xenforeignmemory_map2(xenforeignmemory_handle *h,
143 uint32_t dom, void *addr,
144 int prot, int flags, size_t pages,
145 const xen_pfn_t arr[/*pages*/],
146 int err[/*pages*/])
148 assert(addr == NULL && flags == 0);
149 return xenforeignmemory_map(h, dom, prot, pages, arr, err);
152 static inline int xentoolcore_restrict_all(domid_t domid)
154 errno = ENOTTY;
155 return -1;
158 static inline int xendevicemodel_shutdown(xendevicemodel_handle *dmod,
159 domid_t domid, unsigned int reason)
161 errno = ENOTTY;
162 return -1;
165 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 41000 */
167 #include <xentoolcore.h>
169 #endif
171 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
173 static inline xendevicemodel_handle *xendevicemodel_open(
174 struct xentoollog_logger *logger, unsigned int open_flags)
176 return xen_xc;
179 #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500
181 static inline int xendevicemodel_create_ioreq_server(
182 xendevicemodel_handle *dmod, domid_t domid, int handle_bufioreq,
183 ioservid_t *id)
185 return xc_hvm_create_ioreq_server(dmod, domid, handle_bufioreq,
186 id);
189 static inline int xendevicemodel_get_ioreq_server_info(
190 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
191 xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn,
192 evtchn_port_t *bufioreq_port)
194 return xc_hvm_get_ioreq_server_info(dmod, domid, id, ioreq_pfn,
195 bufioreq_pfn, bufioreq_port);
198 static inline int xendevicemodel_map_io_range_to_ioreq_server(
199 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
200 uint64_t start, uint64_t end)
202 return xc_hvm_map_io_range_to_ioreq_server(dmod, domid, id, is_mmio,
203 start, end);
206 static inline int xendevicemodel_unmap_io_range_from_ioreq_server(
207 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
208 uint64_t start, uint64_t end)
210 return xc_hvm_unmap_io_range_from_ioreq_server(dmod, domid, id, is_mmio,
211 start, end);
214 static inline int xendevicemodel_map_pcidev_to_ioreq_server(
215 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
216 uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
218 return xc_hvm_map_pcidev_to_ioreq_server(dmod, domid, id, segment,
219 bus, device, function);
222 static inline int xendevicemodel_unmap_pcidev_from_ioreq_server(
223 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
224 uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
226 return xc_hvm_unmap_pcidev_from_ioreq_server(dmod, domid, id, segment,
227 bus, device, function);
230 static inline int xendevicemodel_destroy_ioreq_server(
231 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id)
233 return xc_hvm_destroy_ioreq_server(dmod, domid, id);
236 static inline int xendevicemodel_set_ioreq_server_state(
237 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int enabled)
239 return xc_hvm_set_ioreq_server_state(dmod, domid, id, enabled);
242 #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500 */
244 static inline int xendevicemodel_set_pci_intx_level(
245 xendevicemodel_handle *dmod, domid_t domid, uint16_t segment,
246 uint8_t bus, uint8_t device, uint8_t intx, unsigned int level)
248 return xc_hvm_set_pci_intx_level(dmod, domid, segment, bus, device,
249 intx, level);
252 static inline int xendevicemodel_set_isa_irq_level(
253 xendevicemodel_handle *dmod, domid_t domid, uint8_t irq,
254 unsigned int level)
256 return xc_hvm_set_isa_irq_level(dmod, domid, irq, level);
259 static inline int xendevicemodel_set_pci_link_route(
260 xendevicemodel_handle *dmod, domid_t domid, uint8_t link, uint8_t irq)
262 return xc_hvm_set_pci_link_route(dmod, domid, link, irq);
265 static inline int xendevicemodel_inject_msi(
266 xendevicemodel_handle *dmod, domid_t domid, uint64_t msi_addr,
267 uint32_t msi_data)
269 return xc_hvm_inject_msi(dmod, domid, msi_addr, msi_data);
272 static inline int xendevicemodel_track_dirty_vram(
273 xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
274 uint32_t nr, unsigned long *dirty_bitmap)
276 return xc_hvm_track_dirty_vram(dmod, domid, first_pfn, nr,
277 dirty_bitmap);
280 static inline int xendevicemodel_modified_memory(
281 xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
282 uint32_t nr)
284 return xc_hvm_modified_memory(dmod, domid, first_pfn, nr);
287 static inline int xendevicemodel_set_mem_type(
288 xendevicemodel_handle *dmod, domid_t domid, hvmmem_type_t mem_type,
289 uint64_t first_pfn, uint32_t nr)
291 return xc_hvm_set_mem_type(dmod, domid, mem_type, first_pfn, nr);
294 #endif
296 extern xendevicemodel_handle *xen_dmod;
298 static inline int xen_set_mem_type(domid_t domid, hvmmem_type_t type,
299 uint64_t first_pfn, uint32_t nr)
301 return xendevicemodel_set_mem_type(xen_dmod, domid, type, first_pfn,
302 nr);
305 static inline int xen_set_pci_intx_level(domid_t domid, uint16_t segment,
306 uint8_t bus, uint8_t device,
307 uint8_t intx, unsigned int level)
309 return xendevicemodel_set_pci_intx_level(xen_dmod, domid, segment, bus,
310 device, intx, level);
313 static inline int xen_set_pci_link_route(domid_t domid, uint8_t link,
314 uint8_t irq)
316 return xendevicemodel_set_pci_link_route(xen_dmod, domid, link, irq);
319 static inline int xen_inject_msi(domid_t domid, uint64_t msi_addr,
320 uint32_t msi_data)
322 return xendevicemodel_inject_msi(xen_dmod, domid, msi_addr, msi_data);
325 static inline int xen_set_isa_irq_level(domid_t domid, uint8_t irq,
326 unsigned int level)
328 return xendevicemodel_set_isa_irq_level(xen_dmod, domid, irq, level);
331 static inline int xen_track_dirty_vram(domid_t domid, uint64_t first_pfn,
332 uint32_t nr, unsigned long *bitmap)
334 return xendevicemodel_track_dirty_vram(xen_dmod, domid, first_pfn, nr,
335 bitmap);
338 static inline int xen_modified_memory(domid_t domid, uint64_t first_pfn,
339 uint32_t nr)
341 return xendevicemodel_modified_memory(xen_dmod, domid, first_pfn, nr);
344 static inline int xen_restrict(domid_t domid)
346 int rc;
347 rc = xentoolcore_restrict_all(domid);
348 trace_xen_domid_restrict(rc ? errno : 0);
349 return rc;
352 void destroy_hvm_domain(bool reboot);
354 /* shutdown/destroy current domain because of an error */
355 void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
357 #ifdef HVM_PARAM_VMPORT_REGS_PFN
358 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
359 xen_pfn_t *vmport_regs_pfn)
361 int rc;
362 uint64_t value;
363 rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value);
364 if (rc >= 0) {
365 *vmport_regs_pfn = (xen_pfn_t) value;
367 return rc;
369 #else
370 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
371 xen_pfn_t *vmport_regs_pfn)
373 return -ENOSYS;
375 #endif
377 /* Xen before 4.6 */
378 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600
380 #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
381 #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
382 #endif
384 #endif
386 static inline int xen_get_default_ioreq_server_info(domid_t dom,
387 xen_pfn_t *ioreq_pfn,
388 xen_pfn_t *bufioreq_pfn,
389 evtchn_port_t
390 *bufioreq_evtchn)
392 unsigned long param;
393 int rc;
395 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_IOREQ_PFN, &param);
396 if (rc < 0) {
397 fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
398 return -1;
401 *ioreq_pfn = param;
403 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
404 if (rc < 0) {
405 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
406 return -1;
409 *bufioreq_pfn = param;
411 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
412 &param);
413 if (rc < 0) {
414 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
415 return -1;
418 *bufioreq_evtchn = param;
420 return 0;
423 /* Xen before 4.5 */
424 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40500
426 #ifndef HVM_PARAM_BUFIOREQ_EVTCHN
427 #define HVM_PARAM_BUFIOREQ_EVTCHN 26
428 #endif
430 #define IOREQ_TYPE_PCI_CONFIG 2
432 typedef uint16_t ioservid_t;
434 static inline void xen_map_memory_section(domid_t dom,
435 ioservid_t ioservid,
436 MemoryRegionSection *section)
440 static inline void xen_unmap_memory_section(domid_t dom,
441 ioservid_t ioservid,
442 MemoryRegionSection *section)
446 static inline void xen_map_io_section(domid_t dom,
447 ioservid_t ioservid,
448 MemoryRegionSection *section)
452 static inline void xen_unmap_io_section(domid_t dom,
453 ioservid_t ioservid,
454 MemoryRegionSection *section)
458 static inline void xen_map_pcidev(domid_t dom,
459 ioservid_t ioservid,
460 PCIDevice *pci_dev)
464 static inline void xen_unmap_pcidev(domid_t dom,
465 ioservid_t ioservid,
466 PCIDevice *pci_dev)
470 static inline void xen_create_ioreq_server(domid_t dom,
471 ioservid_t *ioservid)
475 static inline void xen_destroy_ioreq_server(domid_t dom,
476 ioservid_t ioservid)
480 static inline int xen_get_ioreq_server_info(domid_t dom,
481 ioservid_t ioservid,
482 xen_pfn_t *ioreq_pfn,
483 xen_pfn_t *bufioreq_pfn,
484 evtchn_port_t *bufioreq_evtchn)
486 return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
487 bufioreq_pfn,
488 bufioreq_evtchn);
491 static inline int xen_set_ioreq_server_state(domid_t dom,
492 ioservid_t ioservid,
493 bool enable)
495 return 0;
498 /* Xen 4.5 */
499 #else
501 static bool use_default_ioreq_server;
503 static inline void xen_map_memory_section(domid_t dom,
504 ioservid_t ioservid,
505 MemoryRegionSection *section)
507 hwaddr start_addr = section->offset_within_address_space;
508 ram_addr_t size = int128_get64(section->size);
509 hwaddr end_addr = start_addr + size - 1;
511 if (use_default_ioreq_server) {
512 return;
515 trace_xen_map_mmio_range(ioservid, start_addr, end_addr);
516 xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 1,
517 start_addr, end_addr);
520 static inline void xen_unmap_memory_section(domid_t dom,
521 ioservid_t ioservid,
522 MemoryRegionSection *section)
524 hwaddr start_addr = section->offset_within_address_space;
525 ram_addr_t size = int128_get64(section->size);
526 hwaddr end_addr = start_addr + size - 1;
528 if (use_default_ioreq_server) {
529 return;
532 trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr);
533 xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
534 1, start_addr, end_addr);
537 static inline void xen_map_io_section(domid_t dom,
538 ioservid_t ioservid,
539 MemoryRegionSection *section)
541 hwaddr start_addr = section->offset_within_address_space;
542 ram_addr_t size = int128_get64(section->size);
543 hwaddr end_addr = start_addr + size - 1;
545 if (use_default_ioreq_server) {
546 return;
549 trace_xen_map_portio_range(ioservid, start_addr, end_addr);
550 xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 0,
551 start_addr, end_addr);
554 static inline void xen_unmap_io_section(domid_t dom,
555 ioservid_t ioservid,
556 MemoryRegionSection *section)
558 hwaddr start_addr = section->offset_within_address_space;
559 ram_addr_t size = int128_get64(section->size);
560 hwaddr end_addr = start_addr + size - 1;
562 if (use_default_ioreq_server) {
563 return;
566 trace_xen_unmap_portio_range(ioservid, start_addr, end_addr);
567 xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
568 0, start_addr, end_addr);
571 static inline void xen_map_pcidev(domid_t dom,
572 ioservid_t ioservid,
573 PCIDevice *pci_dev)
575 if (use_default_ioreq_server) {
576 return;
579 trace_xen_map_pcidev(ioservid, pci_dev_bus_num(pci_dev),
580 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
581 xendevicemodel_map_pcidev_to_ioreq_server(xen_dmod, dom, ioservid, 0,
582 pci_dev_bus_num(pci_dev),
583 PCI_SLOT(pci_dev->devfn),
584 PCI_FUNC(pci_dev->devfn));
587 static inline void xen_unmap_pcidev(domid_t dom,
588 ioservid_t ioservid,
589 PCIDevice *pci_dev)
591 if (use_default_ioreq_server) {
592 return;
595 trace_xen_unmap_pcidev(ioservid, pci_dev_bus_num(pci_dev),
596 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
597 xendevicemodel_unmap_pcidev_from_ioreq_server(xen_dmod, dom, ioservid, 0,
598 pci_dev_bus_num(pci_dev),
599 PCI_SLOT(pci_dev->devfn),
600 PCI_FUNC(pci_dev->devfn));
603 static inline void xen_create_ioreq_server(domid_t dom,
604 ioservid_t *ioservid)
606 int rc = xendevicemodel_create_ioreq_server(xen_dmod, dom,
607 HVM_IOREQSRV_BUFIOREQ_ATOMIC,
608 ioservid);
610 if (rc == 0) {
611 trace_xen_ioreq_server_create(*ioservid);
612 return;
615 *ioservid = 0;
616 use_default_ioreq_server = true;
617 trace_xen_default_ioreq_server();
620 static inline void xen_destroy_ioreq_server(domid_t dom,
621 ioservid_t ioservid)
623 if (use_default_ioreq_server) {
624 return;
627 trace_xen_ioreq_server_destroy(ioservid);
628 xendevicemodel_destroy_ioreq_server(xen_dmod, dom, ioservid);
631 static inline int xen_get_ioreq_server_info(domid_t dom,
632 ioservid_t ioservid,
633 xen_pfn_t *ioreq_pfn,
634 xen_pfn_t *bufioreq_pfn,
635 evtchn_port_t *bufioreq_evtchn)
637 if (use_default_ioreq_server) {
638 return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
639 bufioreq_pfn,
640 bufioreq_evtchn);
643 return xendevicemodel_get_ioreq_server_info(xen_dmod, dom, ioservid,
644 ioreq_pfn, bufioreq_pfn,
645 bufioreq_evtchn);
648 static inline int xen_set_ioreq_server_state(domid_t dom,
649 ioservid_t ioservid,
650 bool enable)
652 if (use_default_ioreq_server) {
653 return 0;
656 trace_xen_ioreq_server_state(ioservid, enable);
657 return xendevicemodel_set_ioreq_server_state(xen_dmod, dom, ioservid,
658 enable);
661 #endif
663 /* Xen before 4.8 */
665 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40800
667 struct xengnttab_grant_copy_segment {
668 union xengnttab_copy_ptr {
669 void *virt;
670 struct {
671 uint32_t ref;
672 uint16_t offset;
673 uint16_t domid;
674 } foreign;
675 } source, dest;
676 uint16_t len;
677 uint16_t flags;
678 int16_t status;
681 typedef struct xengnttab_grant_copy_segment xengnttab_grant_copy_segment_t;
683 static inline int xengnttab_grant_copy(xengnttab_handle *xgt, uint32_t count,
684 xengnttab_grant_copy_segment_t *segs)
686 return -ENOSYS;
688 #endif
690 #endif /* QEMU_HW_XEN_COMMON_H */