1 #ifndef QEMU_HW_XEN_COMMON_H
2 #define QEMU_HW_XEN_COMMON_H
5 * If we have new enough libxenctrl then we do not want/need these compat
6 * interfaces, despite what the user supplied cflags might say. They
7 * must be undefined before including xenctrl.h
9 #undef XC_WANT_COMPAT_EVTCHN_API
10 #undef XC_WANT_COMPAT_GNTTAB_API
11 #undef XC_WANT_COMPAT_MAP_FOREIGN_API
15 #include "hw/xen/interface/io/xenbus.h"
17 #include "hw/xen/xen.h"
18 #include "hw/pci/pci.h"
19 #include "hw/xen/trace.h"
21 extern xc_interface
*xen_xc
;
24 * We don't support Xen prior to 4.2.0.
27 /* Xen 4.2 through 4.6 */
28 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701
30 typedef xc_interface xenforeignmemory_handle
;
31 typedef xc_evtchn xenevtchn_handle
;
32 typedef xc_gnttab xengnttab_handle
;
33 typedef evtchn_port_or_error_t xenevtchn_port_or_error_t
;
35 #define xenevtchn_open(l, f) xc_evtchn_open(l, f);
36 #define xenevtchn_close(h) xc_evtchn_close(h)
37 #define xenevtchn_fd(h) xc_evtchn_fd(h)
38 #define xenevtchn_pending(h) xc_evtchn_pending(h)
39 #define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
40 #define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
41 #define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
42 #define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
44 #define xengnttab_open(l, f) xc_gnttab_open(l, f)
45 #define xengnttab_close(h) xc_gnttab_close(h)
46 #define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
47 #define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
48 #define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
49 #define xengnttab_map_grant_refs(h, c, d, r, p) \
50 xc_gnttab_map_grant_refs(h, c, d, r, p)
51 #define xengnttab_map_domain_grant_refs(h, c, d, r, p) \
52 xc_gnttab_map_domain_grant_refs(h, c, d, r, p)
54 #define xenforeignmemory_open(l, f) xen_xc
55 #define xenforeignmemory_close(h)
57 static inline void *xenforeignmemory_map(xc_interface
*h
, uint32_t dom
,
58 int prot
, size_t pages
,
59 const xen_pfn_t arr
[/*pages*/],
63 return xc_map_foreign_bulk(h
, dom
, prot
, arr
, err
, pages
);
65 return xc_map_foreign_pages(h
, dom
, prot
, arr
, pages
);
68 #define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE)
70 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */
72 #include <xenevtchn.h>
73 #include <xengnttab.h>
74 #include <xenforeignmemory.h>
78 extern xenforeignmemory_handle
*xen_fmem
;
80 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
82 typedef xc_interface xendevicemodel_handle
;
84 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40900 */
86 #undef XC_WANT_COMPAT_DEVICEMODEL_API
87 #include <xendevicemodel.h>
91 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100
93 static inline int xendevicemodel_relocate_memory(
94 xendevicemodel_handle
*dmod
, domid_t domid
, uint32_t size
, uint64_t src_gfn
,
100 for (i
= 0; i
< size
; i
++) {
101 unsigned long idx
= src_gfn
+ i
;
102 xen_pfn_t gpfn
= dst_gfn
+ i
;
104 rc
= xc_domain_add_to_physmap(xen_xc
, domid
, XENMAPSPACE_gmfn
, idx
,
114 static inline int xendevicemodel_pin_memory_cacheattr(
115 xendevicemodel_handle
*dmod
, domid_t domid
, uint64_t start
, uint64_t end
,
118 return xc_domain_pin_memory_cacheattr(xen_xc
, domid
, start
, end
, type
);
121 typedef void xenforeignmemory_resource_handle
;
123 #define XENMEM_resource_ioreq_server 0
125 #define XENMEM_resource_ioreq_server_frame_bufioreq 0
126 #define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n))
128 static inline xenforeignmemory_resource_handle
*xenforeignmemory_map_resource(
129 xenforeignmemory_handle
*fmem
, domid_t domid
, unsigned int type
,
130 unsigned int id
, unsigned long frame
, unsigned long nr_frames
,
131 void **paddr
, int prot
, int flags
)
137 #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100 */
139 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41000
141 #define XEN_COMPAT_PHYSMAP
142 static inline void *xenforeignmemory_map2(xenforeignmemory_handle
*h
,
143 uint32_t dom
, void *addr
,
144 int prot
, int flags
, size_t pages
,
145 const xen_pfn_t arr
[/*pages*/],
148 assert(addr
== NULL
&& flags
== 0);
149 return xenforeignmemory_map(h
, dom
, prot
, pages
, arr
, err
);
152 static inline int xentoolcore_restrict_all(domid_t domid
)
158 static inline int xendevicemodel_shutdown(xendevicemodel_handle
*dmod
,
159 domid_t domid
, unsigned int reason
)
165 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 41000 */
167 #include <xentoolcore.h>
171 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
173 static inline xendevicemodel_handle
*xendevicemodel_open(
174 struct xentoollog_logger
*logger
, unsigned int open_flags
)
179 #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500
181 static inline int xendevicemodel_create_ioreq_server(
182 xendevicemodel_handle
*dmod
, domid_t domid
, int handle_bufioreq
,
185 return xc_hvm_create_ioreq_server(dmod
, domid
, handle_bufioreq
,
189 static inline int xendevicemodel_get_ioreq_server_info(
190 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
,
191 xen_pfn_t
*ioreq_pfn
, xen_pfn_t
*bufioreq_pfn
,
192 evtchn_port_t
*bufioreq_port
)
194 return xc_hvm_get_ioreq_server_info(dmod
, domid
, id
, ioreq_pfn
,
195 bufioreq_pfn
, bufioreq_port
);
198 static inline int xendevicemodel_map_io_range_to_ioreq_server(
199 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
, int is_mmio
,
200 uint64_t start
, uint64_t end
)
202 return xc_hvm_map_io_range_to_ioreq_server(dmod
, domid
, id
, is_mmio
,
206 static inline int xendevicemodel_unmap_io_range_from_ioreq_server(
207 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
, int is_mmio
,
208 uint64_t start
, uint64_t end
)
210 return xc_hvm_unmap_io_range_from_ioreq_server(dmod
, domid
, id
, is_mmio
,
214 static inline int xendevicemodel_map_pcidev_to_ioreq_server(
215 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
,
216 uint16_t segment
, uint8_t bus
, uint8_t device
, uint8_t function
)
218 return xc_hvm_map_pcidev_to_ioreq_server(dmod
, domid
, id
, segment
,
219 bus
, device
, function
);
222 static inline int xendevicemodel_unmap_pcidev_from_ioreq_server(
223 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
,
224 uint16_t segment
, uint8_t bus
, uint8_t device
, uint8_t function
)
226 return xc_hvm_unmap_pcidev_from_ioreq_server(dmod
, domid
, id
, segment
,
227 bus
, device
, function
);
230 static inline int xendevicemodel_destroy_ioreq_server(
231 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
)
233 return xc_hvm_destroy_ioreq_server(dmod
, domid
, id
);
236 static inline int xendevicemodel_set_ioreq_server_state(
237 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
, int enabled
)
239 return xc_hvm_set_ioreq_server_state(dmod
, domid
, id
, enabled
);
242 #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500 */
244 static inline int xendevicemodel_set_pci_intx_level(
245 xendevicemodel_handle
*dmod
, domid_t domid
, uint16_t segment
,
246 uint8_t bus
, uint8_t device
, uint8_t intx
, unsigned int level
)
248 return xc_hvm_set_pci_intx_level(dmod
, domid
, segment
, bus
, device
,
252 static inline int xendevicemodel_set_isa_irq_level(
253 xendevicemodel_handle
*dmod
, domid_t domid
, uint8_t irq
,
256 return xc_hvm_set_isa_irq_level(dmod
, domid
, irq
, level
);
259 static inline int xendevicemodel_set_pci_link_route(
260 xendevicemodel_handle
*dmod
, domid_t domid
, uint8_t link
, uint8_t irq
)
262 return xc_hvm_set_pci_link_route(dmod
, domid
, link
, irq
);
265 static inline int xendevicemodel_inject_msi(
266 xendevicemodel_handle
*dmod
, domid_t domid
, uint64_t msi_addr
,
269 return xc_hvm_inject_msi(dmod
, domid
, msi_addr
, msi_data
);
272 static inline int xendevicemodel_track_dirty_vram(
273 xendevicemodel_handle
*dmod
, domid_t domid
, uint64_t first_pfn
,
274 uint32_t nr
, unsigned long *dirty_bitmap
)
276 return xc_hvm_track_dirty_vram(dmod
, domid
, first_pfn
, nr
,
280 static inline int xendevicemodel_modified_memory(
281 xendevicemodel_handle
*dmod
, domid_t domid
, uint64_t first_pfn
,
284 return xc_hvm_modified_memory(dmod
, domid
, first_pfn
, nr
);
287 static inline int xendevicemodel_set_mem_type(
288 xendevicemodel_handle
*dmod
, domid_t domid
, hvmmem_type_t mem_type
,
289 uint64_t first_pfn
, uint32_t nr
)
291 return xc_hvm_set_mem_type(dmod
, domid
, mem_type
, first_pfn
, nr
);
296 extern xendevicemodel_handle
*xen_dmod
;
298 static inline int xen_set_mem_type(domid_t domid
, hvmmem_type_t type
,
299 uint64_t first_pfn
, uint32_t nr
)
301 return xendevicemodel_set_mem_type(xen_dmod
, domid
, type
, first_pfn
,
305 static inline int xen_set_pci_intx_level(domid_t domid
, uint16_t segment
,
306 uint8_t bus
, uint8_t device
,
307 uint8_t intx
, unsigned int level
)
309 return xendevicemodel_set_pci_intx_level(xen_dmod
, domid
, segment
, bus
,
310 device
, intx
, level
);
313 static inline int xen_set_pci_link_route(domid_t domid
, uint8_t link
,
316 return xendevicemodel_set_pci_link_route(xen_dmod
, domid
, link
, irq
);
319 static inline int xen_inject_msi(domid_t domid
, uint64_t msi_addr
,
322 return xendevicemodel_inject_msi(xen_dmod
, domid
, msi_addr
, msi_data
);
325 static inline int xen_set_isa_irq_level(domid_t domid
, uint8_t irq
,
328 return xendevicemodel_set_isa_irq_level(xen_dmod
, domid
, irq
, level
);
331 static inline int xen_track_dirty_vram(domid_t domid
, uint64_t first_pfn
,
332 uint32_t nr
, unsigned long *bitmap
)
334 return xendevicemodel_track_dirty_vram(xen_dmod
, domid
, first_pfn
, nr
,
338 static inline int xen_modified_memory(domid_t domid
, uint64_t first_pfn
,
341 return xendevicemodel_modified_memory(xen_dmod
, domid
, first_pfn
, nr
);
344 static inline int xen_restrict(domid_t domid
)
347 rc
= xentoolcore_restrict_all(domid
);
348 trace_xen_domid_restrict(rc
? errno
: 0);
352 void destroy_hvm_domain(bool reboot
);
354 /* shutdown/destroy current domain because of an error */
355 void xen_shutdown_fatal_error(const char *fmt
, ...) GCC_FMT_ATTR(1, 2);
357 #ifdef HVM_PARAM_VMPORT_REGS_PFN
358 static inline int xen_get_vmport_regs_pfn(xc_interface
*xc
, domid_t dom
,
359 xen_pfn_t
*vmport_regs_pfn
)
363 rc
= xc_hvm_param_get(xc
, dom
, HVM_PARAM_VMPORT_REGS_PFN
, &value
);
365 *vmport_regs_pfn
= (xen_pfn_t
) value
;
370 static inline int xen_get_vmport_regs_pfn(xc_interface
*xc
, domid_t dom
,
371 xen_pfn_t
*vmport_regs_pfn
)
378 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600
380 #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
381 #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
386 static inline int xen_get_default_ioreq_server_info(domid_t dom
,
387 xen_pfn_t
*ioreq_pfn
,
388 xen_pfn_t
*bufioreq_pfn
,
395 rc
= xc_get_hvm_param(xen_xc
, dom
, HVM_PARAM_IOREQ_PFN
, ¶m
);
397 fprintf(stderr
, "failed to get HVM_PARAM_IOREQ_PFN\n");
403 rc
= xc_get_hvm_param(xen_xc
, dom
, HVM_PARAM_BUFIOREQ_PFN
, ¶m
);
405 fprintf(stderr
, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
409 *bufioreq_pfn
= param
;
411 rc
= xc_get_hvm_param(xen_xc
, dom
, HVM_PARAM_BUFIOREQ_EVTCHN
,
414 fprintf(stderr
, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
418 *bufioreq_evtchn
= param
;
424 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40500
426 #ifndef HVM_PARAM_BUFIOREQ_EVTCHN
427 #define HVM_PARAM_BUFIOREQ_EVTCHN 26
430 #define IOREQ_TYPE_PCI_CONFIG 2
432 typedef uint16_t ioservid_t
;
434 static inline void xen_map_memory_section(domid_t dom
,
436 MemoryRegionSection
*section
)
440 static inline void xen_unmap_memory_section(domid_t dom
,
442 MemoryRegionSection
*section
)
446 static inline void xen_map_io_section(domid_t dom
,
448 MemoryRegionSection
*section
)
452 static inline void xen_unmap_io_section(domid_t dom
,
454 MemoryRegionSection
*section
)
458 static inline void xen_map_pcidev(domid_t dom
,
464 static inline void xen_unmap_pcidev(domid_t dom
,
470 static inline void xen_create_ioreq_server(domid_t dom
,
471 ioservid_t
*ioservid
)
475 static inline void xen_destroy_ioreq_server(domid_t dom
,
480 static inline int xen_get_ioreq_server_info(domid_t dom
,
482 xen_pfn_t
*ioreq_pfn
,
483 xen_pfn_t
*bufioreq_pfn
,
484 evtchn_port_t
*bufioreq_evtchn
)
486 return xen_get_default_ioreq_server_info(dom
, ioreq_pfn
,
491 static inline int xen_set_ioreq_server_state(domid_t dom
,
501 static bool use_default_ioreq_server
;
503 static inline void xen_map_memory_section(domid_t dom
,
505 MemoryRegionSection
*section
)
507 hwaddr start_addr
= section
->offset_within_address_space
;
508 ram_addr_t size
= int128_get64(section
->size
);
509 hwaddr end_addr
= start_addr
+ size
- 1;
511 if (use_default_ioreq_server
) {
515 trace_xen_map_mmio_range(ioservid
, start_addr
, end_addr
);
516 xendevicemodel_map_io_range_to_ioreq_server(xen_dmod
, dom
, ioservid
, 1,
517 start_addr
, end_addr
);
520 static inline void xen_unmap_memory_section(domid_t dom
,
522 MemoryRegionSection
*section
)
524 hwaddr start_addr
= section
->offset_within_address_space
;
525 ram_addr_t size
= int128_get64(section
->size
);
526 hwaddr end_addr
= start_addr
+ size
- 1;
528 if (use_default_ioreq_server
) {
532 trace_xen_unmap_mmio_range(ioservid
, start_addr
, end_addr
);
533 xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod
, dom
, ioservid
,
534 1, start_addr
, end_addr
);
537 static inline void xen_map_io_section(domid_t dom
,
539 MemoryRegionSection
*section
)
541 hwaddr start_addr
= section
->offset_within_address_space
;
542 ram_addr_t size
= int128_get64(section
->size
);
543 hwaddr end_addr
= start_addr
+ size
- 1;
545 if (use_default_ioreq_server
) {
549 trace_xen_map_portio_range(ioservid
, start_addr
, end_addr
);
550 xendevicemodel_map_io_range_to_ioreq_server(xen_dmod
, dom
, ioservid
, 0,
551 start_addr
, end_addr
);
554 static inline void xen_unmap_io_section(domid_t dom
,
556 MemoryRegionSection
*section
)
558 hwaddr start_addr
= section
->offset_within_address_space
;
559 ram_addr_t size
= int128_get64(section
->size
);
560 hwaddr end_addr
= start_addr
+ size
- 1;
562 if (use_default_ioreq_server
) {
566 trace_xen_unmap_portio_range(ioservid
, start_addr
, end_addr
);
567 xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod
, dom
, ioservid
,
568 0, start_addr
, end_addr
);
571 static inline void xen_map_pcidev(domid_t dom
,
575 if (use_default_ioreq_server
) {
579 trace_xen_map_pcidev(ioservid
, pci_dev_bus_num(pci_dev
),
580 PCI_SLOT(pci_dev
->devfn
), PCI_FUNC(pci_dev
->devfn
));
581 xendevicemodel_map_pcidev_to_ioreq_server(xen_dmod
, dom
, ioservid
, 0,
582 pci_dev_bus_num(pci_dev
),
583 PCI_SLOT(pci_dev
->devfn
),
584 PCI_FUNC(pci_dev
->devfn
));
587 static inline void xen_unmap_pcidev(domid_t dom
,
591 if (use_default_ioreq_server
) {
595 trace_xen_unmap_pcidev(ioservid
, pci_dev_bus_num(pci_dev
),
596 PCI_SLOT(pci_dev
->devfn
), PCI_FUNC(pci_dev
->devfn
));
597 xendevicemodel_unmap_pcidev_from_ioreq_server(xen_dmod
, dom
, ioservid
, 0,
598 pci_dev_bus_num(pci_dev
),
599 PCI_SLOT(pci_dev
->devfn
),
600 PCI_FUNC(pci_dev
->devfn
));
603 static inline void xen_create_ioreq_server(domid_t dom
,
604 ioservid_t
*ioservid
)
606 int rc
= xendevicemodel_create_ioreq_server(xen_dmod
, dom
,
607 HVM_IOREQSRV_BUFIOREQ_ATOMIC
,
611 trace_xen_ioreq_server_create(*ioservid
);
616 use_default_ioreq_server
= true;
617 trace_xen_default_ioreq_server();
620 static inline void xen_destroy_ioreq_server(domid_t dom
,
623 if (use_default_ioreq_server
) {
627 trace_xen_ioreq_server_destroy(ioservid
);
628 xendevicemodel_destroy_ioreq_server(xen_dmod
, dom
, ioservid
);
631 static inline int xen_get_ioreq_server_info(domid_t dom
,
633 xen_pfn_t
*ioreq_pfn
,
634 xen_pfn_t
*bufioreq_pfn
,
635 evtchn_port_t
*bufioreq_evtchn
)
637 if (use_default_ioreq_server
) {
638 return xen_get_default_ioreq_server_info(dom
, ioreq_pfn
,
643 return xendevicemodel_get_ioreq_server_info(xen_dmod
, dom
, ioservid
,
644 ioreq_pfn
, bufioreq_pfn
,
648 static inline int xen_set_ioreq_server_state(domid_t dom
,
652 if (use_default_ioreq_server
) {
656 trace_xen_ioreq_server_state(ioservid
, enable
);
657 return xendevicemodel_set_ioreq_server_state(xen_dmod
, dom
, ioservid
,
665 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40800
667 struct xengnttab_grant_copy_segment
{
668 union xengnttab_copy_ptr
{
681 typedef struct xengnttab_grant_copy_segment xengnttab_grant_copy_segment_t
;
683 static inline int xengnttab_grant_copy(xengnttab_handle
*xgt
, uint32_t count
,
684 xengnttab_grant_copy_segment_t
*segs
)
690 #endif /* QEMU_HW_XEN_COMMON_H */