xen: use libxendevicemodel when available
[qemu.git] / include / hw / xen / xen_common.h
blobb1f5f53e35a9dec9583c8468697d6a9403c25f4e
1 #ifndef QEMU_HW_XEN_COMMON_H
2 #define QEMU_HW_XEN_COMMON_H
4 /*
5 * If we have new enough libxenctrl then we do not want/need these compat
6 * interfaces, despite what the user supplied cflags might say. They
7 * must be undefined before including xenctrl.h
8 */
9 #undef XC_WANT_COMPAT_EVTCHN_API
10 #undef XC_WANT_COMPAT_GNTTAB_API
11 #undef XC_WANT_COMPAT_MAP_FOREIGN_API
13 #include <xenctrl.h>
14 #include <xenstore.h>
15 #include <xen/io/xenbus.h>
17 #include "hw/hw.h"
18 #include "hw/xen/xen.h"
19 #include "hw/pci/pci.h"
20 #include "qemu/queue.h"
21 #include "hw/xen/trace.h"
23 extern xc_interface *xen_xc;
26 * We don't support Xen prior to 4.2.0.
29 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 490
31 typedef xc_interface xendevicemodel_handle;
33 static inline xendevicemodel_handle *xendevicemodel_open(
34 struct xentoollog_logger *logger, unsigned int open_flags)
36 return xen_xc;
39 #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 450
41 static inline int xendevicemodel_create_ioreq_server(
42 xendevicemodel_handle *dmod, domid_t domid, int handle_bufioreq,
43 ioservid_t *id)
45 return xc_hvm_create_ioreq_server(dmod, domid, handle_bufioreq,
46 id);
49 static inline int xendevicemodel_get_ioreq_server_info(
50 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
51 xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn,
52 evtchn_port_t *bufioreq_port)
54 return xc_hvm_get_ioreq_server_info(dmod, domid, id, ioreq_pfn,
55 bufioreq_pfn, bufioreq_port);
58 static inline int xendevicemodel_map_io_range_to_ioreq_server(
59 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
60 uint64_t start, uint64_t end)
62 return xc_hvm_map_io_range_to_ioreq_server(dmod, domid, id, is_mmio,
63 start, end);
66 static inline int xendevicemodel_unmap_io_range_from_ioreq_server(
67 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
68 uint64_t start, uint64_t end)
70 return xc_hvm_unmap_io_range_from_ioreq_server(dmod, domid, id, is_mmio,
71 start, end);
74 static inline int xendevicemodel_map_pcidev_to_ioreq_server(
75 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
76 uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
78 return xc_hvm_map_pcidev_to_ioreq_server(dmod, domid, id, segment,
79 bus, device, function);
82 static inline int xendevicemodel_unmap_pcidev_from_ioreq_server(
83 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
84 uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
86 return xc_hvm_unmap_pcidev_from_ioreq_server(dmod, domid, id, segment,
87 bus, device, function);
90 static inline int xendevicemodel_destroy_ioreq_server(
91 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id)
93 return xc_hvm_destroy_ioreq_server(dmod, domid, id);
96 static inline int xendevicemodel_set_ioreq_server_state(
97 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int enabled)
99 return xc_hvm_set_ioreq_server_state(dmod, domid, id, enabled);
102 #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 450 */
104 static inline int xendevicemodel_set_pci_intx_level(
105 xendevicemodel_handle *dmod, domid_t domid, uint16_t segment,
106 uint8_t bus, uint8_t device, uint8_t intx, unsigned int level)
108 return xc_hvm_set_pci_intx_level(dmod, domid, segment, bus, device,
109 intx, level);
112 static inline int xendevicemodel_set_isa_irq_level(
113 xendevicemodel_handle *dmod, domid_t domid, uint8_t irq,
114 unsigned int level)
116 return xc_hvm_set_isa_irq_level(dmod, domid, irq, level);
119 static inline int xendevicemodel_set_pci_link_route(
120 xendevicemodel_handle *dmod, domid_t domid, uint8_t link, uint8_t irq)
122 return xc_hvm_set_pci_link_route(dmod, domid, link, irq);
125 static inline int xendevicemodel_inject_msi(
126 xendevicemodel_handle *dmod, domid_t domid, uint64_t msi_addr,
127 uint32_t msi_data)
129 return xc_hvm_inject_msi(dmod, domid, msi_addr, msi_data);
132 static inline int xendevicemodel_track_dirty_vram(
133 xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
134 uint32_t nr, unsigned long *dirty_bitmap)
136 return xc_hvm_track_dirty_vram(dmod, domid, first_pfn, nr,
137 dirty_bitmap);
140 static inline int xendevicemodel_modified_memory(
141 xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
142 uint32_t nr)
144 return xc_hvm_modified_memory(dmod, domid, first_pfn, nr);
147 static inline int xendevicemodel_set_mem_type(
148 xendevicemodel_handle *dmod, domid_t domid, hvmmem_type_t mem_type,
149 uint64_t first_pfn, uint32_t nr)
151 return xc_hvm_set_mem_type(dmod, domid, mem_type, first_pfn, nr);
154 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 490 */
156 #undef XC_WANT_COMPAT_DEVICEMODEL_API
157 #include <xendevicemodel.h>
159 #endif
161 extern xendevicemodel_handle *xen_dmod;
163 static inline int xen_set_mem_type(domid_t domid, hvmmem_type_t type,
164 uint64_t first_pfn, uint32_t nr)
166 return xendevicemodel_set_mem_type(xen_dmod, domid, type, first_pfn,
167 nr);
170 static inline int xen_set_pci_intx_level(domid_t domid, uint16_t segment,
171 uint8_t bus, uint8_t device,
172 uint8_t intx, unsigned int level)
174 return xendevicemodel_set_pci_intx_level(xen_dmod, domid, segment, bus,
175 device, intx, level);
178 static inline int xen_set_pci_link_route(domid_t domid, uint8_t link,
179 uint8_t irq)
181 return xendevicemodel_set_pci_link_route(xen_dmod, domid, link, irq);
184 static inline int xen_inject_msi(domid_t domid, uint64_t msi_addr,
185 uint32_t msi_data)
187 return xendevicemodel_inject_msi(xen_dmod, domid, msi_addr, msi_data);
190 static inline int xen_set_isa_irq_level(domid_t domid, uint8_t irq,
191 unsigned int level)
193 return xendevicemodel_set_isa_irq_level(xen_dmod, domid, irq, level);
196 static inline int xen_track_dirty_vram(domid_t domid, uint64_t first_pfn,
197 uint32_t nr, unsigned long *bitmap)
199 return xendevicemodel_track_dirty_vram(xen_dmod, domid, first_pfn, nr,
200 bitmap);
203 static inline int xen_modified_memory(domid_t domid, uint64_t first_pfn,
204 uint32_t nr)
206 return xendevicemodel_modified_memory(xen_dmod, domid, first_pfn, nr);
209 /* Xen 4.2 through 4.6 */
210 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 471
212 typedef xc_interface xenforeignmemory_handle;
213 typedef xc_evtchn xenevtchn_handle;
214 typedef xc_gnttab xengnttab_handle;
216 #define xenevtchn_open(l, f) xc_evtchn_open(l, f);
217 #define xenevtchn_close(h) xc_evtchn_close(h)
218 #define xenevtchn_fd(h) xc_evtchn_fd(h)
219 #define xenevtchn_pending(h) xc_evtchn_pending(h)
220 #define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
221 #define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
222 #define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
223 #define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
225 #define xengnttab_open(l, f) xc_gnttab_open(l, f)
226 #define xengnttab_close(h) xc_gnttab_close(h)
227 #define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
228 #define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
229 #define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
230 #define xengnttab_map_grant_refs(h, c, d, r, p) \
231 xc_gnttab_map_grant_refs(h, c, d, r, p)
232 #define xengnttab_map_domain_grant_refs(h, c, d, r, p) \
233 xc_gnttab_map_domain_grant_refs(h, c, d, r, p)
235 #define xenforeignmemory_open(l, f) xen_xc
236 #define xenforeignmemory_close(h)
238 static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom,
239 int prot, size_t pages,
240 const xen_pfn_t arr[/*pages*/],
241 int err[/*pages*/])
243 if (err)
244 return xc_map_foreign_bulk(h, dom, prot, arr, err, pages);
245 else
246 return xc_map_foreign_pages(h, dom, prot, arr, pages);
249 #define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE)
251 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 471 */
253 #include <xenevtchn.h>
254 #include <xengnttab.h>
255 #include <xenforeignmemory.h>
257 #endif
259 extern xenforeignmemory_handle *xen_fmem;
261 void destroy_hvm_domain(bool reboot);
263 /* shutdown/destroy current domain because of an error */
264 void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
266 #ifdef HVM_PARAM_VMPORT_REGS_PFN
267 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
268 xen_pfn_t *vmport_regs_pfn)
270 int rc;
271 uint64_t value;
272 rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value);
273 if (rc >= 0) {
274 *vmport_regs_pfn = (xen_pfn_t) value;
276 return rc;
278 #else
279 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
280 xen_pfn_t *vmport_regs_pfn)
282 return -ENOSYS;
284 #endif
286 /* Xen before 4.6 */
287 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460
289 #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
290 #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
291 #endif
293 #endif
295 static inline int xen_get_default_ioreq_server_info(domid_t dom,
296 xen_pfn_t *ioreq_pfn,
297 xen_pfn_t *bufioreq_pfn,
298 evtchn_port_t
299 *bufioreq_evtchn)
301 unsigned long param;
302 int rc;
304 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_IOREQ_PFN, &param);
305 if (rc < 0) {
306 fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
307 return -1;
310 *ioreq_pfn = param;
312 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
313 if (rc < 0) {
314 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
315 return -1;
318 *bufioreq_pfn = param;
320 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
321 &param);
322 if (rc < 0) {
323 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
324 return -1;
327 *bufioreq_evtchn = param;
329 return 0;
332 /* Xen before 4.5 */
333 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 450
335 #ifndef HVM_PARAM_BUFIOREQ_EVTCHN
336 #define HVM_PARAM_BUFIOREQ_EVTCHN 26
337 #endif
339 #define IOREQ_TYPE_PCI_CONFIG 2
341 typedef uint16_t ioservid_t;
343 static inline void xen_map_memory_section(domid_t dom,
344 ioservid_t ioservid,
345 MemoryRegionSection *section)
349 static inline void xen_unmap_memory_section(domid_t dom,
350 ioservid_t ioservid,
351 MemoryRegionSection *section)
355 static inline void xen_map_io_section(domid_t dom,
356 ioservid_t ioservid,
357 MemoryRegionSection *section)
361 static inline void xen_unmap_io_section(domid_t dom,
362 ioservid_t ioservid,
363 MemoryRegionSection *section)
367 static inline void xen_map_pcidev(domid_t dom,
368 ioservid_t ioservid,
369 PCIDevice *pci_dev)
373 static inline void xen_unmap_pcidev(domid_t dom,
374 ioservid_t ioservid,
375 PCIDevice *pci_dev)
379 static inline void xen_create_ioreq_server(domid_t dom,
380 ioservid_t *ioservid)
384 static inline void xen_destroy_ioreq_server(domid_t dom,
385 ioservid_t ioservid)
389 static inline int xen_get_ioreq_server_info(domid_t dom,
390 ioservid_t ioservid,
391 xen_pfn_t *ioreq_pfn,
392 xen_pfn_t *bufioreq_pfn,
393 evtchn_port_t *bufioreq_evtchn)
395 return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
396 bufioreq_pfn,
397 bufioreq_evtchn);
400 static inline int xen_set_ioreq_server_state(domid_t dom,
401 ioservid_t ioservid,
402 bool enable)
404 return 0;
407 /* Xen 4.5 */
408 #else
410 static bool use_default_ioreq_server;
412 static inline void xen_map_memory_section(domid_t dom,
413 ioservid_t ioservid,
414 MemoryRegionSection *section)
416 hwaddr start_addr = section->offset_within_address_space;
417 ram_addr_t size = int128_get64(section->size);
418 hwaddr end_addr = start_addr + size - 1;
420 if (use_default_ioreq_server) {
421 return;
424 trace_xen_map_mmio_range(ioservid, start_addr, end_addr);
425 xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 1,
426 start_addr, end_addr);
429 static inline void xen_unmap_memory_section(domid_t dom,
430 ioservid_t ioservid,
431 MemoryRegionSection *section)
433 hwaddr start_addr = section->offset_within_address_space;
434 ram_addr_t size = int128_get64(section->size);
435 hwaddr end_addr = start_addr + size - 1;
437 if (use_default_ioreq_server) {
438 return;
441 trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr);
442 xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
443 1, start_addr, end_addr);
446 static inline void xen_map_io_section(domid_t dom,
447 ioservid_t ioservid,
448 MemoryRegionSection *section)
450 hwaddr start_addr = section->offset_within_address_space;
451 ram_addr_t size = int128_get64(section->size);
452 hwaddr end_addr = start_addr + size - 1;
454 if (use_default_ioreq_server) {
455 return;
458 trace_xen_map_portio_range(ioservid, start_addr, end_addr);
459 xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 0,
460 start_addr, end_addr);
463 static inline void xen_unmap_io_section(domid_t dom,
464 ioservid_t ioservid,
465 MemoryRegionSection *section)
467 hwaddr start_addr = section->offset_within_address_space;
468 ram_addr_t size = int128_get64(section->size);
469 hwaddr end_addr = start_addr + size - 1;
471 if (use_default_ioreq_server) {
472 return;
475 trace_xen_unmap_portio_range(ioservid, start_addr, end_addr);
476 xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
477 0, start_addr, end_addr);
480 static inline void xen_map_pcidev(domid_t dom,
481 ioservid_t ioservid,
482 PCIDevice *pci_dev)
484 if (use_default_ioreq_server) {
485 return;
488 trace_xen_map_pcidev(ioservid, pci_bus_num(pci_dev->bus),
489 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
490 xendevicemodel_map_pcidev_to_ioreq_server(xen_dmod, dom, ioservid, 0,
491 pci_bus_num(pci_dev->bus),
492 PCI_SLOT(pci_dev->devfn),
493 PCI_FUNC(pci_dev->devfn));
496 static inline void xen_unmap_pcidev(domid_t dom,
497 ioservid_t ioservid,
498 PCIDevice *pci_dev)
500 if (use_default_ioreq_server) {
501 return;
504 trace_xen_unmap_pcidev(ioservid, pci_bus_num(pci_dev->bus),
505 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
506 xendevicemodel_unmap_pcidev_from_ioreq_server(xen_dmod, dom, ioservid, 0,
507 pci_bus_num(pci_dev->bus),
508 PCI_SLOT(pci_dev->devfn),
509 PCI_FUNC(pci_dev->devfn));
512 static inline void xen_create_ioreq_server(domid_t dom,
513 ioservid_t *ioservid)
515 int rc = xendevicemodel_create_ioreq_server(xen_dmod, dom,
516 HVM_IOREQSRV_BUFIOREQ_ATOMIC,
517 ioservid);
519 if (rc == 0) {
520 trace_xen_ioreq_server_create(*ioservid);
521 return;
524 *ioservid = 0;
525 use_default_ioreq_server = true;
526 trace_xen_default_ioreq_server();
529 static inline void xen_destroy_ioreq_server(domid_t dom,
530 ioservid_t ioservid)
532 if (use_default_ioreq_server) {
533 return;
536 trace_xen_ioreq_server_destroy(ioservid);
537 xendevicemodel_destroy_ioreq_server(xen_dmod, dom, ioservid);
540 static inline int xen_get_ioreq_server_info(domid_t dom,
541 ioservid_t ioservid,
542 xen_pfn_t *ioreq_pfn,
543 xen_pfn_t *bufioreq_pfn,
544 evtchn_port_t *bufioreq_evtchn)
546 if (use_default_ioreq_server) {
547 return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
548 bufioreq_pfn,
549 bufioreq_evtchn);
552 return xendevicemodel_get_ioreq_server_info(xen_dmod, dom, ioservid,
553 ioreq_pfn, bufioreq_pfn,
554 bufioreq_evtchn);
557 static inline int xen_set_ioreq_server_state(domid_t dom,
558 ioservid_t ioservid,
559 bool enable)
561 if (use_default_ioreq_server) {
562 return 0;
565 trace_xen_ioreq_server_state(ioservid, enable);
566 return xendevicemodel_set_ioreq_server_state(xen_dmod, dom, ioservid,
567 enable);
570 #endif
572 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460
573 static inline int xen_xc_domain_add_to_physmap(xc_interface *xch, uint32_t domid,
574 unsigned int space,
575 unsigned long idx,
576 xen_pfn_t gpfn)
578 return xc_domain_add_to_physmap(xch, domid, space, idx, gpfn);
580 #else
581 static inline int xen_xc_domain_add_to_physmap(xc_interface *xch, uint32_t domid,
582 unsigned int space,
583 unsigned long idx,
584 xen_pfn_t gpfn)
586 /* In Xen 4.6 rc is -1 and errno contains the error value. */
587 int rc = xc_domain_add_to_physmap(xch, domid, space, idx, gpfn);
588 if (rc == -1)
589 return errno;
590 return rc;
592 #endif
594 #ifdef CONFIG_XEN_PV_DOMAIN_BUILD
595 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 470
596 static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref,
597 xen_domain_handle_t handle, uint32_t flags,
598 uint32_t *pdomid)
600 return xc_domain_create(xc, ssidref, handle, flags, pdomid);
602 #else
603 static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref,
604 xen_domain_handle_t handle, uint32_t flags,
605 uint32_t *pdomid)
607 return xc_domain_create(xc, ssidref, handle, flags, pdomid, NULL);
609 #endif
610 #endif
612 /* Xen before 4.8 */
614 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 480
617 typedef void *xengnttab_grant_copy_segment_t;
619 static inline int xengnttab_grant_copy(xengnttab_handle *xgt, uint32_t count,
620 xengnttab_grant_copy_segment_t *segs)
622 return -ENOSYS;
624 #endif
626 #endif /* QEMU_HW_XEN_COMMON_H */