1 #ifndef QEMU_HW_XEN_COMMON_H
2 #define QEMU_HW_XEN_COMMON_H
5 * If we have new enough libxenctrl then we do not want/need these compat
6 * interfaces, despite what the user supplied cflags might say. They
7 * must be undefined before including xenctrl.h
9 #undef XC_WANT_COMPAT_EVTCHN_API
10 #undef XC_WANT_COMPAT_GNTTAB_API
11 #undef XC_WANT_COMPAT_MAP_FOREIGN_API
15 #include <xen/io/xenbus.h>
18 #include "hw/xen/xen.h"
19 #include "hw/pci/pci.h"
20 #include "qemu/queue.h"
21 #include "hw/xen/trace.h"
24 * We don't support Xen prior to 4.2.0.
27 /* Xen 4.2 through 4.6 */
28 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 471
30 typedef xc_interface xenforeignmemory_handle
;
31 typedef xc_evtchn xenevtchn_handle
;
32 typedef xc_gnttab xengnttab_handle
;
34 #define xenevtchn_open(l, f) xc_evtchn_open(l, f);
35 #define xenevtchn_close(h) xc_evtchn_close(h)
36 #define xenevtchn_fd(h) xc_evtchn_fd(h)
37 #define xenevtchn_pending(h) xc_evtchn_pending(h)
38 #define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
39 #define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
40 #define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
41 #define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
43 #define xengnttab_open(l, f) xc_gnttab_open(l, f)
44 #define xengnttab_close(h) xc_gnttab_close(h)
45 #define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
46 #define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
47 #define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
48 #define xengnttab_map_grant_refs(h, c, d, r, p) \
49 xc_gnttab_map_grant_refs(h, c, d, r, p)
50 #define xengnttab_map_domain_grant_refs(h, c, d, r, p) \
51 xc_gnttab_map_domain_grant_refs(h, c, d, r, p)
53 #define xenforeignmemory_open(l, f) xen_xc
55 static inline void *xenforeignmemory_map(xc_interface
*h
, uint32_t dom
,
56 int prot
, size_t pages
,
57 const xen_pfn_t arr
[/*pages*/],
61 return xc_map_foreign_bulk(h
, dom
, prot
, arr
, err
, pages
);
63 return xc_map_foreign_pages(h
, dom
, prot
, arr
, pages
);
66 #define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE)
68 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 471 */
70 #include <xenevtchn.h>
71 #include <xengnttab.h>
72 #include <xenforeignmemory.h>
76 void destroy_hvm_domain(bool reboot
);
78 /* shutdown/destroy current domain because of an error */
79 void xen_shutdown_fatal_error(const char *fmt
, ...) GCC_FMT_ATTR(1, 2);
81 #ifdef HVM_PARAM_VMPORT_REGS_PFN
82 static inline int xen_get_vmport_regs_pfn(xc_interface
*xc
, domid_t dom
,
83 xen_pfn_t
*vmport_regs_pfn
)
87 rc
= xc_hvm_param_get(xc
, dom
, HVM_PARAM_VMPORT_REGS_PFN
, &value
);
89 *vmport_regs_pfn
= (xen_pfn_t
) value
;
94 static inline int xen_get_vmport_regs_pfn(xc_interface
*xc
, domid_t dom
,
95 xen_pfn_t
*vmport_regs_pfn
)
102 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460
104 #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
105 #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
110 static inline int xen_get_default_ioreq_server_info(xc_interface
*xc
,
112 xen_pfn_t
*ioreq_pfn
,
113 xen_pfn_t
*bufioreq_pfn
,
120 rc
= xc_get_hvm_param(xc
, dom
, HVM_PARAM_IOREQ_PFN
, ¶m
);
122 fprintf(stderr
, "failed to get HVM_PARAM_IOREQ_PFN\n");
128 rc
= xc_get_hvm_param(xc
, dom
, HVM_PARAM_BUFIOREQ_PFN
, ¶m
);
130 fprintf(stderr
, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
134 *bufioreq_pfn
= param
;
136 rc
= xc_get_hvm_param(xc
, dom
, HVM_PARAM_BUFIOREQ_EVTCHN
,
139 fprintf(stderr
, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
143 *bufioreq_evtchn
= param
;
149 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 450
151 #ifndef HVM_PARAM_BUFIOREQ_EVTCHN
152 #define HVM_PARAM_BUFIOREQ_EVTCHN 26
155 #define IOREQ_TYPE_PCI_CONFIG 2
157 typedef uint16_t ioservid_t
;
159 static inline void xen_map_memory_section(xc_interface
*xc
, domid_t dom
,
161 MemoryRegionSection
*section
)
165 static inline void xen_unmap_memory_section(xc_interface
*xc
, domid_t dom
,
167 MemoryRegionSection
*section
)
171 static inline void xen_map_io_section(xc_interface
*xc
, domid_t dom
,
173 MemoryRegionSection
*section
)
177 static inline void xen_unmap_io_section(xc_interface
*xc
, domid_t dom
,
179 MemoryRegionSection
*section
)
183 static inline void xen_map_pcidev(xc_interface
*xc
, domid_t dom
,
189 static inline void xen_unmap_pcidev(xc_interface
*xc
, domid_t dom
,
195 static inline void xen_create_ioreq_server(xc_interface
*xc
, domid_t dom
,
196 ioservid_t
*ioservid
)
200 static inline void xen_destroy_ioreq_server(xc_interface
*xc
, domid_t dom
,
205 static inline int xen_get_ioreq_server_info(xc_interface
*xc
, domid_t dom
,
207 xen_pfn_t
*ioreq_pfn
,
208 xen_pfn_t
*bufioreq_pfn
,
209 evtchn_port_t
*bufioreq_evtchn
)
211 return xen_get_default_ioreq_server_info(xc
, dom
, ioreq_pfn
, bufioreq_pfn
,
215 static inline int xen_set_ioreq_server_state(xc_interface
*xc
, domid_t dom
,
225 static bool use_default_ioreq_server
;
227 static inline void xen_map_memory_section(xc_interface
*xc
, domid_t dom
,
229 MemoryRegionSection
*section
)
231 hwaddr start_addr
= section
->offset_within_address_space
;
232 ram_addr_t size
= int128_get64(section
->size
);
233 hwaddr end_addr
= start_addr
+ size
- 1;
235 if (use_default_ioreq_server
) {
239 trace_xen_map_mmio_range(ioservid
, start_addr
, end_addr
);
240 xc_hvm_map_io_range_to_ioreq_server(xc
, dom
, ioservid
, 1,
241 start_addr
, end_addr
);
244 static inline void xen_unmap_memory_section(xc_interface
*xc
, domid_t dom
,
246 MemoryRegionSection
*section
)
248 hwaddr start_addr
= section
->offset_within_address_space
;
249 ram_addr_t size
= int128_get64(section
->size
);
250 hwaddr end_addr
= start_addr
+ size
- 1;
252 if (use_default_ioreq_server
) {
257 trace_xen_unmap_mmio_range(ioservid
, start_addr
, end_addr
);
258 xc_hvm_unmap_io_range_from_ioreq_server(xc
, dom
, ioservid
, 1,
259 start_addr
, end_addr
);
262 static inline void xen_map_io_section(xc_interface
*xc
, domid_t dom
,
264 MemoryRegionSection
*section
)
266 hwaddr start_addr
= section
->offset_within_address_space
;
267 ram_addr_t size
= int128_get64(section
->size
);
268 hwaddr end_addr
= start_addr
+ size
- 1;
270 if (use_default_ioreq_server
) {
275 trace_xen_map_portio_range(ioservid
, start_addr
, end_addr
);
276 xc_hvm_map_io_range_to_ioreq_server(xc
, dom
, ioservid
, 0,
277 start_addr
, end_addr
);
280 static inline void xen_unmap_io_section(xc_interface
*xc
, domid_t dom
,
282 MemoryRegionSection
*section
)
284 hwaddr start_addr
= section
->offset_within_address_space
;
285 ram_addr_t size
= int128_get64(section
->size
);
286 hwaddr end_addr
= start_addr
+ size
- 1;
288 if (use_default_ioreq_server
) {
292 trace_xen_unmap_portio_range(ioservid
, start_addr
, end_addr
);
293 xc_hvm_unmap_io_range_from_ioreq_server(xc
, dom
, ioservid
, 0,
294 start_addr
, end_addr
);
297 static inline void xen_map_pcidev(xc_interface
*xc
, domid_t dom
,
301 if (use_default_ioreq_server
) {
305 trace_xen_map_pcidev(ioservid
, pci_bus_num(pci_dev
->bus
),
306 PCI_SLOT(pci_dev
->devfn
), PCI_FUNC(pci_dev
->devfn
));
307 xc_hvm_map_pcidev_to_ioreq_server(xc
, dom
, ioservid
,
308 0, pci_bus_num(pci_dev
->bus
),
309 PCI_SLOT(pci_dev
->devfn
),
310 PCI_FUNC(pci_dev
->devfn
));
313 static inline void xen_unmap_pcidev(xc_interface
*xc
, domid_t dom
,
317 if (use_default_ioreq_server
) {
321 trace_xen_unmap_pcidev(ioservid
, pci_bus_num(pci_dev
->bus
),
322 PCI_SLOT(pci_dev
->devfn
), PCI_FUNC(pci_dev
->devfn
));
323 xc_hvm_unmap_pcidev_from_ioreq_server(xc
, dom
, ioservid
,
324 0, pci_bus_num(pci_dev
->bus
),
325 PCI_SLOT(pci_dev
->devfn
),
326 PCI_FUNC(pci_dev
->devfn
));
329 static inline void xen_create_ioreq_server(xc_interface
*xc
, domid_t dom
,
330 ioservid_t
*ioservid
)
332 int rc
= xc_hvm_create_ioreq_server(xc
, dom
, HVM_IOREQSRV_BUFIOREQ_ATOMIC
,
336 trace_xen_ioreq_server_create(*ioservid
);
341 use_default_ioreq_server
= true;
342 trace_xen_default_ioreq_server();
345 static inline void xen_destroy_ioreq_server(xc_interface
*xc
, domid_t dom
,
348 if (use_default_ioreq_server
) {
352 trace_xen_ioreq_server_destroy(ioservid
);
353 xc_hvm_destroy_ioreq_server(xc
, dom
, ioservid
);
356 static inline int xen_get_ioreq_server_info(xc_interface
*xc
, domid_t dom
,
358 xen_pfn_t
*ioreq_pfn
,
359 xen_pfn_t
*bufioreq_pfn
,
360 evtchn_port_t
*bufioreq_evtchn
)
362 if (use_default_ioreq_server
) {
363 return xen_get_default_ioreq_server_info(xc
, dom
, ioreq_pfn
,
368 return xc_hvm_get_ioreq_server_info(xc
, dom
, ioservid
,
369 ioreq_pfn
, bufioreq_pfn
,
373 static inline int xen_set_ioreq_server_state(xc_interface
*xc
, domid_t dom
,
377 if (use_default_ioreq_server
) {
381 trace_xen_ioreq_server_state(ioservid
, enable
);
382 return xc_hvm_set_ioreq_server_state(xc
, dom
, ioservid
, enable
);
387 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460
388 static inline int xen_xc_domain_add_to_physmap(xc_interface
*xch
, uint32_t domid
,
393 return xc_domain_add_to_physmap(xch
, domid
, space
, idx
, gpfn
);
396 static inline int xen_xc_domain_add_to_physmap(xc_interface
*xch
, uint32_t domid
,
401 /* In Xen 4.6 rc is -1 and errno contains the error value. */
402 int rc
= xc_domain_add_to_physmap(xch
, domid
, space
, idx
, gpfn
);
409 #ifdef CONFIG_XEN_PV_DOMAIN_BUILD
410 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 470
411 static inline int xen_domain_create(xc_interface
*xc
, uint32_t ssidref
,
412 xen_domain_handle_t handle
, uint32_t flags
,
415 return xc_domain_create(xc
, ssidref
, handle
, flags
, pdomid
);
418 static inline int xen_domain_create(xc_interface
*xc
, uint32_t ssidref
,
419 xen_domain_handle_t handle
, uint32_t flags
,
422 return xc_domain_create(xc
, ssidref
, handle
, flags
, pdomid
, NULL
);
429 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 480
432 typedef void *xengnttab_grant_copy_segment_t
;
434 static inline int xengnttab_grant_copy(xengnttab_handle
*xgt
, uint32_t count
,
435 xengnttab_grant_copy_segment_t
*segs
)
441 #endif /* QEMU_HW_XEN_COMMON_H */