2 * DMA memory preregistration
5 * Alexey Kardashevskiy <aik@ozlabs.ru>
7 * This work is licensed under the terms of the GNU GPL, version 2. See
8 * the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
12 #include <sys/ioctl.h>
13 #include <linux/vfio.h>
15 #include "hw/vfio/vfio-common.h"
17 #include "exec/ram_addr.h"
18 #include "qemu/error-report.h"
19 #include "qapi/error.h"
22 static bool vfio_prereg_listener_skipped_section(MemoryRegionSection
*section
)
24 if (memory_region_is_iommu(section
->mr
)) {
25 hw_error("Cannot possibly preregister IOMMU memory");
28 return !memory_region_is_ram(section
->mr
) ||
29 memory_region_is_ram_device(section
->mr
);
32 static void *vfio_prereg_gpa_to_vaddr(MemoryRegionSection
*section
, hwaddr gpa
)
34 return memory_region_get_ram_ptr(section
->mr
) +
35 section
->offset_within_region
+
36 (gpa
- section
->offset_within_address_space
);
39 static void vfio_prereg_listener_region_add(MemoryListener
*listener
,
40 MemoryRegionSection
*section
)
42 VFIOContainer
*container
= container_of(listener
, VFIOContainer
,
44 const hwaddr gpa
= section
->offset_within_address_space
;
47 hwaddr page_mask
= qemu_real_host_page_mask
;
48 struct vfio_iommu_spapr_register_memory reg
= {
53 if (vfio_prereg_listener_skipped_section(section
)) {
54 trace_vfio_prereg_listener_region_add_skip(
55 section
->offset_within_address_space
,
56 section
->offset_within_address_space
+
57 int128_get64(int128_sub(section
->size
, int128_one())));
61 if (unlikely((section
->offset_within_address_space
& ~page_mask
) ||
62 (section
->offset_within_region
& ~page_mask
) ||
63 (int128_get64(section
->size
) & ~page_mask
))) {
64 error_report("%s received unaligned region", __func__
);
68 end
= section
->offset_within_address_space
+ int128_get64(section
->size
);
73 memory_region_ref(section
->mr
);
75 reg
.vaddr
= (uintptr_t) vfio_prereg_gpa_to_vaddr(section
, gpa
);
78 ret
= ioctl(container
->fd
, VFIO_IOMMU_SPAPR_REGISTER_MEMORY
, ®
);
79 trace_vfio_prereg_register(reg
.vaddr
, reg
.size
, ret
? -errno
: 0);
82 * On the initfn path, store the first error in the container so we
83 * can gracefully fail. Runtime, there's not much we can do other
84 * than throw a hardware error.
86 if (!container
->initialized
) {
87 if (!container
->error
) {
88 error_setg_errno(&container
->error
, -ret
,
89 "Memory registering failed");
92 hw_error("vfio: Memory registering failed, unable to continue");
97 static void vfio_prereg_listener_region_del(MemoryListener
*listener
,
98 MemoryRegionSection
*section
)
100 VFIOContainer
*container
= container_of(listener
, VFIOContainer
,
102 const hwaddr gpa
= section
->offset_within_address_space
;
105 hwaddr page_mask
= qemu_real_host_page_mask
;
106 struct vfio_iommu_spapr_register_memory reg
= {
107 .argsz
= sizeof(reg
),
111 if (vfio_prereg_listener_skipped_section(section
)) {
112 trace_vfio_prereg_listener_region_del_skip(
113 section
->offset_within_address_space
,
114 section
->offset_within_address_space
+
115 int128_get64(int128_sub(section
->size
, int128_one())));
119 if (unlikely((section
->offset_within_address_space
& ~page_mask
) ||
120 (section
->offset_within_region
& ~page_mask
) ||
121 (int128_get64(section
->size
) & ~page_mask
))) {
122 error_report("%s received unaligned region", __func__
);
126 end
= section
->offset_within_address_space
+ int128_get64(section
->size
);
131 reg
.vaddr
= (uintptr_t) vfio_prereg_gpa_to_vaddr(section
, gpa
);
132 reg
.size
= end
- gpa
;
134 ret
= ioctl(container
->fd
, VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY
, ®
);
135 trace_vfio_prereg_unregister(reg
.vaddr
, reg
.size
, ret
? -errno
: 0);
138 const MemoryListener vfio_prereg_listener
= {
139 .region_add
= vfio_prereg_listener_region_add
,
140 .region_del
= vfio_prereg_listener_region_del
,
143 int vfio_spapr_create_window(VFIOContainer
*container
,
144 MemoryRegionSection
*section
,
148 IOMMUMemoryRegion
*iommu_mr
= IOMMU_MEMORY_REGION(section
->mr
);
149 uint64_t pagesize
= memory_region_iommu_get_min_page_size(iommu_mr
), pgmask
;
150 unsigned entries
, bits_total
, bits_per_level
, max_levels
;
151 struct vfio_iommu_spapr_tce_create create
= { .argsz
= sizeof(create
) };
152 long rampagesize
= qemu_minrampagesize();
155 * The host might not support the guest supported IOMMU page size,
156 * so we will use smaller physical IOMMU pages to back them.
158 if (pagesize
> rampagesize
) {
159 pagesize
= rampagesize
;
161 pgmask
= container
->pgsizes
& (pagesize
| (pagesize
- 1));
162 pagesize
= pgmask
? (1ULL << (63 - clz64(pgmask
))) : 0;
164 error_report("Host doesn't support page size 0x%"PRIx64
165 ", the supported mask is 0x%lx",
166 memory_region_iommu_get_min_page_size(iommu_mr
),
172 * FIXME: For VFIO iommu types which have KVM acceleration to
173 * avoid bouncing all map/unmaps through qemu this way, this
174 * would be the right place to wire that up (tell the KVM
175 * device emulation the VFIO iommu handles to use).
177 create
.window_size
= int128_get64(section
->size
);
178 create
.page_shift
= ctz64(pagesize
);
180 * SPAPR host supports multilevel TCE tables. We try to guess optimal
181 * levels number and if this fails (for example due to the host memory
182 * fragmentation), we increase levels. The DMA address structure is:
183 * rrrrrrrr rxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx iiiiiiii
185 * r = reserved (bits >= 55 are reserved in the existing hardware)
186 * i = IOMMU page offset (64K in this example)
187 * x = bits to index a TCE which can be split to equal chunks to index
189 * The aim is to split "x" to smaller possible number of levels.
191 entries
= create
.window_size
>> create
.page_shift
;
192 /* bits_total is number of "x" needed */
193 bits_total
= ctz64(entries
* sizeof(uint64_t));
195 * bits_per_level is a safe guess of how much we can allocate per level:
196 * 8 is the current minimum for CONFIG_FORCE_MAX_ZONEORDER and MAX_ORDER
197 * is usually bigger than that.
198 * Below we look at qemu_real_host_page_size as TCEs are allocated from
201 bits_per_level
= ctz64(qemu_real_host_page_size
) + 8;
202 create
.levels
= bits_total
/ bits_per_level
;
203 if (bits_total
% bits_per_level
) {
206 max_levels
= (64 - create
.page_shift
) / ctz64(qemu_real_host_page_size
);
207 for ( ; create
.levels
<= max_levels
; ++create
.levels
) {
208 ret
= ioctl(container
->fd
, VFIO_IOMMU_SPAPR_TCE_CREATE
, &create
);
214 error_report("Failed to create a window, ret = %d (%m)", ret
);
218 if (create
.start_addr
!= section
->offset_within_address_space
) {
219 vfio_spapr_remove_window(container
, create
.start_addr
);
221 error_report("Host doesn't support DMA window at %"HWADDR_PRIx
", must be %"PRIx64
,
222 section
->offset_within_address_space
,
223 (uint64_t)create
.start_addr
);
226 trace_vfio_spapr_create_window(create
.page_shift
,
235 int vfio_spapr_remove_window(VFIOContainer
*container
,
236 hwaddr offset_within_address_space
)
238 struct vfio_iommu_spapr_tce_remove remove
= {
239 .argsz
= sizeof(remove
),
240 .start_addr
= offset_within_address_space
,
244 ret
= ioctl(container
->fd
, VFIO_IOMMU_SPAPR_TCE_REMOVE
, &remove
);
246 error_report("Failed to remove window at %"PRIx64
,
247 (uint64_t)remove
.start_addr
);
251 trace_vfio_spapr_remove_window(offset_within_address_space
);