xen/mapcache: introduce xen_replace_cache_entry()
[qemu.git] / include / hw / xen / xen_common.h
blobe28ed4846aee9009c1f016677aeb49bf6f6eecb6
1 #ifndef QEMU_HW_XEN_COMMON_H
2 #define QEMU_HW_XEN_COMMON_H
4 /*
5 * If we have new enough libxenctrl then we do not want/need these compat
6 * interfaces, despite what the user supplied cflags might say. They
7 * must be undefined before including xenctrl.h
8 */
9 #undef XC_WANT_COMPAT_EVTCHN_API
10 #undef XC_WANT_COMPAT_GNTTAB_API
11 #undef XC_WANT_COMPAT_MAP_FOREIGN_API
13 #include <xenctrl.h>
14 #include <xenstore.h>
15 #include <xen/io/xenbus.h>
17 #include "hw/hw.h"
18 #include "hw/xen/xen.h"
19 #include "hw/pci/pci.h"
20 #include "qemu/queue.h"
21 #include "hw/xen/trace.h"
23 extern xc_interface *xen_xc;
26 * We don't support Xen prior to 4.2.0.
29 /* Xen 4.2 through 4.6 */
30 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701
32 typedef xc_interface xenforeignmemory_handle;
33 typedef xc_evtchn xenevtchn_handle;
34 typedef xc_gnttab xengnttab_handle;
36 #define xenevtchn_open(l, f) xc_evtchn_open(l, f);
37 #define xenevtchn_close(h) xc_evtchn_close(h)
38 #define xenevtchn_fd(h) xc_evtchn_fd(h)
39 #define xenevtchn_pending(h) xc_evtchn_pending(h)
40 #define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
41 #define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
42 #define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
43 #define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
45 #define xengnttab_open(l, f) xc_gnttab_open(l, f)
46 #define xengnttab_close(h) xc_gnttab_close(h)
47 #define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
48 #define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
49 #define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
50 #define xengnttab_map_grant_refs(h, c, d, r, p) \
51 xc_gnttab_map_grant_refs(h, c, d, r, p)
52 #define xengnttab_map_domain_grant_refs(h, c, d, r, p) \
53 xc_gnttab_map_domain_grant_refs(h, c, d, r, p)
55 #define xenforeignmemory_open(l, f) xen_xc
56 #define xenforeignmemory_close(h)
58 static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom,
59 int prot, size_t pages,
60 const xen_pfn_t arr[/*pages*/],
61 int err[/*pages*/])
63 if (err)
64 return xc_map_foreign_bulk(h, dom, prot, arr, err, pages);
65 else
66 return xc_map_foreign_pages(h, dom, prot, arr, pages);
69 #define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE)
71 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */
73 #include <xenevtchn.h>
74 #include <xengnttab.h>
75 #include <xenforeignmemory.h>
77 #endif
79 extern xenforeignmemory_handle *xen_fmem;
81 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41000
83 static inline void *xenforeignmemory_map2(xenforeignmemory_handle *h,
84 uint32_t dom, void *addr,
85 int prot, int flags, size_t pages,
86 const xen_pfn_t arr[/*pages*/],
87 int err[/*pages*/])
89 assert(addr == NULL && flags == 0);
90 return xenforeignmemory_map(h, dom, prot, pages, arr, err);
93 #endif
95 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
97 typedef xc_interface xendevicemodel_handle;
99 static inline xendevicemodel_handle *xendevicemodel_open(
100 struct xentoollog_logger *logger, unsigned int open_flags)
102 return xen_xc;
105 #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500
107 static inline int xendevicemodel_create_ioreq_server(
108 xendevicemodel_handle *dmod, domid_t domid, int handle_bufioreq,
109 ioservid_t *id)
111 return xc_hvm_create_ioreq_server(dmod, domid, handle_bufioreq,
112 id);
115 static inline int xendevicemodel_get_ioreq_server_info(
116 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
117 xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn,
118 evtchn_port_t *bufioreq_port)
120 return xc_hvm_get_ioreq_server_info(dmod, domid, id, ioreq_pfn,
121 bufioreq_pfn, bufioreq_port);
124 static inline int xendevicemodel_map_io_range_to_ioreq_server(
125 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
126 uint64_t start, uint64_t end)
128 return xc_hvm_map_io_range_to_ioreq_server(dmod, domid, id, is_mmio,
129 start, end);
132 static inline int xendevicemodel_unmap_io_range_from_ioreq_server(
133 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
134 uint64_t start, uint64_t end)
136 return xc_hvm_unmap_io_range_from_ioreq_server(dmod, domid, id, is_mmio,
137 start, end);
140 static inline int xendevicemodel_map_pcidev_to_ioreq_server(
141 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
142 uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
144 return xc_hvm_map_pcidev_to_ioreq_server(dmod, domid, id, segment,
145 bus, device, function);
148 static inline int xendevicemodel_unmap_pcidev_from_ioreq_server(
149 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
150 uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
152 return xc_hvm_unmap_pcidev_from_ioreq_server(dmod, domid, id, segment,
153 bus, device, function);
156 static inline int xendevicemodel_destroy_ioreq_server(
157 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id)
159 return xc_hvm_destroy_ioreq_server(dmod, domid, id);
162 static inline int xendevicemodel_set_ioreq_server_state(
163 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int enabled)
165 return xc_hvm_set_ioreq_server_state(dmod, domid, id, enabled);
168 #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500 */
170 static inline int xendevicemodel_set_pci_intx_level(
171 xendevicemodel_handle *dmod, domid_t domid, uint16_t segment,
172 uint8_t bus, uint8_t device, uint8_t intx, unsigned int level)
174 return xc_hvm_set_pci_intx_level(dmod, domid, segment, bus, device,
175 intx, level);
178 static inline int xendevicemodel_set_isa_irq_level(
179 xendevicemodel_handle *dmod, domid_t domid, uint8_t irq,
180 unsigned int level)
182 return xc_hvm_set_isa_irq_level(dmod, domid, irq, level);
185 static inline int xendevicemodel_set_pci_link_route(
186 xendevicemodel_handle *dmod, domid_t domid, uint8_t link, uint8_t irq)
188 return xc_hvm_set_pci_link_route(dmod, domid, link, irq);
191 static inline int xendevicemodel_inject_msi(
192 xendevicemodel_handle *dmod, domid_t domid, uint64_t msi_addr,
193 uint32_t msi_data)
195 return xc_hvm_inject_msi(dmod, domid, msi_addr, msi_data);
198 static inline int xendevicemodel_track_dirty_vram(
199 xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
200 uint32_t nr, unsigned long *dirty_bitmap)
202 return xc_hvm_track_dirty_vram(dmod, domid, first_pfn, nr,
203 dirty_bitmap);
206 static inline int xendevicemodel_modified_memory(
207 xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
208 uint32_t nr)
210 return xc_hvm_modified_memory(dmod, domid, first_pfn, nr);
213 static inline int xendevicemodel_set_mem_type(
214 xendevicemodel_handle *dmod, domid_t domid, hvmmem_type_t mem_type,
215 uint64_t first_pfn, uint32_t nr)
217 return xc_hvm_set_mem_type(dmod, domid, mem_type, first_pfn, nr);
220 static inline int xendevicemodel_restrict(
221 xendevicemodel_handle *dmod, domid_t domid)
223 errno = ENOTTY;
224 return -1;
227 static inline int xenforeignmemory_restrict(
228 xenforeignmemory_handle *fmem, domid_t domid)
230 errno = ENOTTY;
231 return -1;
234 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40900 */
236 #undef XC_WANT_COMPAT_DEVICEMODEL_API
237 #include <xendevicemodel.h>
239 #endif
241 extern xendevicemodel_handle *xen_dmod;
243 static inline int xen_set_mem_type(domid_t domid, hvmmem_type_t type,
244 uint64_t first_pfn, uint32_t nr)
246 return xendevicemodel_set_mem_type(xen_dmod, domid, type, first_pfn,
247 nr);
250 static inline int xen_set_pci_intx_level(domid_t domid, uint16_t segment,
251 uint8_t bus, uint8_t device,
252 uint8_t intx, unsigned int level)
254 return xendevicemodel_set_pci_intx_level(xen_dmod, domid, segment, bus,
255 device, intx, level);
258 static inline int xen_set_pci_link_route(domid_t domid, uint8_t link,
259 uint8_t irq)
261 return xendevicemodel_set_pci_link_route(xen_dmod, domid, link, irq);
264 static inline int xen_inject_msi(domid_t domid, uint64_t msi_addr,
265 uint32_t msi_data)
267 return xendevicemodel_inject_msi(xen_dmod, domid, msi_addr, msi_data);
270 static inline int xen_set_isa_irq_level(domid_t domid, uint8_t irq,
271 unsigned int level)
273 return xendevicemodel_set_isa_irq_level(xen_dmod, domid, irq, level);
276 static inline int xen_track_dirty_vram(domid_t domid, uint64_t first_pfn,
277 uint32_t nr, unsigned long *bitmap)
279 return xendevicemodel_track_dirty_vram(xen_dmod, domid, first_pfn, nr,
280 bitmap);
283 static inline int xen_modified_memory(domid_t domid, uint64_t first_pfn,
284 uint32_t nr)
286 return xendevicemodel_modified_memory(xen_dmod, domid, first_pfn, nr);
289 static inline int xen_restrict(domid_t domid)
291 int rc;
293 /* Attempt to restrict devicemodel operations */
294 rc = xendevicemodel_restrict(xen_dmod, domid);
295 trace_xen_domid_restrict(rc ? errno : 0);
297 if (rc < 0) {
299 * If errno is ENOTTY then restriction is not implemented so
300 * there's no point in trying to restrict other types of
301 * operation, but it should not be treated as a failure.
303 if (errno == ENOTTY) {
304 return 0;
307 return rc;
310 /* Restrict foreignmemory operations */
311 rc = xenforeignmemory_restrict(xen_fmem, domid);
312 trace_xen_domid_restrict(rc ? errno : 0);
314 return rc;
317 void destroy_hvm_domain(bool reboot);
319 /* shutdown/destroy current domain because of an error */
320 void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
322 #ifdef HVM_PARAM_VMPORT_REGS_PFN
323 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
324 xen_pfn_t *vmport_regs_pfn)
326 int rc;
327 uint64_t value;
328 rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value);
329 if (rc >= 0) {
330 *vmport_regs_pfn = (xen_pfn_t) value;
332 return rc;
334 #else
335 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
336 xen_pfn_t *vmport_regs_pfn)
338 return -ENOSYS;
340 #endif
342 /* Xen before 4.6 */
343 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600
345 #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
346 #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
347 #endif
349 #endif
351 static inline int xen_get_default_ioreq_server_info(domid_t dom,
352 xen_pfn_t *ioreq_pfn,
353 xen_pfn_t *bufioreq_pfn,
354 evtchn_port_t
355 *bufioreq_evtchn)
357 unsigned long param;
358 int rc;
360 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_IOREQ_PFN, &param);
361 if (rc < 0) {
362 fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
363 return -1;
366 *ioreq_pfn = param;
368 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
369 if (rc < 0) {
370 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
371 return -1;
374 *bufioreq_pfn = param;
376 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
377 &param);
378 if (rc < 0) {
379 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
380 return -1;
383 *bufioreq_evtchn = param;
385 return 0;
388 /* Xen before 4.5 */
389 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40500
391 #ifndef HVM_PARAM_BUFIOREQ_EVTCHN
392 #define HVM_PARAM_BUFIOREQ_EVTCHN 26
393 #endif
395 #define IOREQ_TYPE_PCI_CONFIG 2
397 typedef uint16_t ioservid_t;
399 static inline void xen_map_memory_section(domid_t dom,
400 ioservid_t ioservid,
401 MemoryRegionSection *section)
405 static inline void xen_unmap_memory_section(domid_t dom,
406 ioservid_t ioservid,
407 MemoryRegionSection *section)
411 static inline void xen_map_io_section(domid_t dom,
412 ioservid_t ioservid,
413 MemoryRegionSection *section)
417 static inline void xen_unmap_io_section(domid_t dom,
418 ioservid_t ioservid,
419 MemoryRegionSection *section)
423 static inline void xen_map_pcidev(domid_t dom,
424 ioservid_t ioservid,
425 PCIDevice *pci_dev)
429 static inline void xen_unmap_pcidev(domid_t dom,
430 ioservid_t ioservid,
431 PCIDevice *pci_dev)
435 static inline void xen_create_ioreq_server(domid_t dom,
436 ioservid_t *ioservid)
440 static inline void xen_destroy_ioreq_server(domid_t dom,
441 ioservid_t ioservid)
445 static inline int xen_get_ioreq_server_info(domid_t dom,
446 ioservid_t ioservid,
447 xen_pfn_t *ioreq_pfn,
448 xen_pfn_t *bufioreq_pfn,
449 evtchn_port_t *bufioreq_evtchn)
451 return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
452 bufioreq_pfn,
453 bufioreq_evtchn);
456 static inline int xen_set_ioreq_server_state(domid_t dom,
457 ioservid_t ioservid,
458 bool enable)
460 return 0;
463 /* Xen 4.5 */
464 #else
466 static bool use_default_ioreq_server;
468 static inline void xen_map_memory_section(domid_t dom,
469 ioservid_t ioservid,
470 MemoryRegionSection *section)
472 hwaddr start_addr = section->offset_within_address_space;
473 ram_addr_t size = int128_get64(section->size);
474 hwaddr end_addr = start_addr + size - 1;
476 if (use_default_ioreq_server) {
477 return;
480 trace_xen_map_mmio_range(ioservid, start_addr, end_addr);
481 xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 1,
482 start_addr, end_addr);
485 static inline void xen_unmap_memory_section(domid_t dom,
486 ioservid_t ioservid,
487 MemoryRegionSection *section)
489 hwaddr start_addr = section->offset_within_address_space;
490 ram_addr_t size = int128_get64(section->size);
491 hwaddr end_addr = start_addr + size - 1;
493 if (use_default_ioreq_server) {
494 return;
497 trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr);
498 xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
499 1, start_addr, end_addr);
502 static inline void xen_map_io_section(domid_t dom,
503 ioservid_t ioservid,
504 MemoryRegionSection *section)
506 hwaddr start_addr = section->offset_within_address_space;
507 ram_addr_t size = int128_get64(section->size);
508 hwaddr end_addr = start_addr + size - 1;
510 if (use_default_ioreq_server) {
511 return;
514 trace_xen_map_portio_range(ioservid, start_addr, end_addr);
515 xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 0,
516 start_addr, end_addr);
519 static inline void xen_unmap_io_section(domid_t dom,
520 ioservid_t ioservid,
521 MemoryRegionSection *section)
523 hwaddr start_addr = section->offset_within_address_space;
524 ram_addr_t size = int128_get64(section->size);
525 hwaddr end_addr = start_addr + size - 1;
527 if (use_default_ioreq_server) {
528 return;
531 trace_xen_unmap_portio_range(ioservid, start_addr, end_addr);
532 xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
533 0, start_addr, end_addr);
536 static inline void xen_map_pcidev(domid_t dom,
537 ioservid_t ioservid,
538 PCIDevice *pci_dev)
540 if (use_default_ioreq_server) {
541 return;
544 trace_xen_map_pcidev(ioservid, pci_bus_num(pci_dev->bus),
545 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
546 xendevicemodel_map_pcidev_to_ioreq_server(xen_dmod, dom, ioservid, 0,
547 pci_bus_num(pci_dev->bus),
548 PCI_SLOT(pci_dev->devfn),
549 PCI_FUNC(pci_dev->devfn));
552 static inline void xen_unmap_pcidev(domid_t dom,
553 ioservid_t ioservid,
554 PCIDevice *pci_dev)
556 if (use_default_ioreq_server) {
557 return;
560 trace_xen_unmap_pcidev(ioservid, pci_bus_num(pci_dev->bus),
561 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
562 xendevicemodel_unmap_pcidev_from_ioreq_server(xen_dmod, dom, ioservid, 0,
563 pci_bus_num(pci_dev->bus),
564 PCI_SLOT(pci_dev->devfn),
565 PCI_FUNC(pci_dev->devfn));
568 static inline void xen_create_ioreq_server(domid_t dom,
569 ioservid_t *ioservid)
571 int rc = xendevicemodel_create_ioreq_server(xen_dmod, dom,
572 HVM_IOREQSRV_BUFIOREQ_ATOMIC,
573 ioservid);
575 if (rc == 0) {
576 trace_xen_ioreq_server_create(*ioservid);
577 return;
580 *ioservid = 0;
581 use_default_ioreq_server = true;
582 trace_xen_default_ioreq_server();
585 static inline void xen_destroy_ioreq_server(domid_t dom,
586 ioservid_t ioservid)
588 if (use_default_ioreq_server) {
589 return;
592 trace_xen_ioreq_server_destroy(ioservid);
593 xendevicemodel_destroy_ioreq_server(xen_dmod, dom, ioservid);
596 static inline int xen_get_ioreq_server_info(domid_t dom,
597 ioservid_t ioservid,
598 xen_pfn_t *ioreq_pfn,
599 xen_pfn_t *bufioreq_pfn,
600 evtchn_port_t *bufioreq_evtchn)
602 if (use_default_ioreq_server) {
603 return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
604 bufioreq_pfn,
605 bufioreq_evtchn);
608 return xendevicemodel_get_ioreq_server_info(xen_dmod, dom, ioservid,
609 ioreq_pfn, bufioreq_pfn,
610 bufioreq_evtchn);
613 static inline int xen_set_ioreq_server_state(domid_t dom,
614 ioservid_t ioservid,
615 bool enable)
617 if (use_default_ioreq_server) {
618 return 0;
621 trace_xen_ioreq_server_state(ioservid, enable);
622 return xendevicemodel_set_ioreq_server_state(xen_dmod, dom, ioservid,
623 enable);
626 #endif
628 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600
629 static inline int xen_xc_domain_add_to_physmap(xc_interface *xch, uint32_t domid,
630 unsigned int space,
631 unsigned long idx,
632 xen_pfn_t gpfn)
634 return xc_domain_add_to_physmap(xch, domid, space, idx, gpfn);
636 #else
637 static inline int xen_xc_domain_add_to_physmap(xc_interface *xch, uint32_t domid,
638 unsigned int space,
639 unsigned long idx,
640 xen_pfn_t gpfn)
642 /* In Xen 4.6 rc is -1 and errno contains the error value. */
643 int rc = xc_domain_add_to_physmap(xch, domid, space, idx, gpfn);
644 if (rc == -1)
645 return errno;
646 return rc;
648 #endif
650 #ifdef CONFIG_XEN_PV_DOMAIN_BUILD
651 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40700
652 static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref,
653 xen_domain_handle_t handle, uint32_t flags,
654 uint32_t *pdomid)
656 return xc_domain_create(xc, ssidref, handle, flags, pdomid);
658 #else
659 static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref,
660 xen_domain_handle_t handle, uint32_t flags,
661 uint32_t *pdomid)
663 return xc_domain_create(xc, ssidref, handle, flags, pdomid, NULL);
665 #endif
666 #endif
668 /* Xen before 4.8 */
670 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40800
673 typedef void *xengnttab_grant_copy_segment_t;
675 static inline int xengnttab_grant_copy(xengnttab_handle *xgt, uint32_t count,
676 xengnttab_grant_copy_segment_t *segs)
678 return -ENOSYS;
680 #endif
682 #endif /* QEMU_HW_XEN_COMMON_H */