1 // SPDX-License-Identifier: GPL-2.0
3 * IOMMU mmap management and range allocation functions.
4 * Based almost entirely upon the powerpc iommu allocator.
7 #include <linux/export.h>
8 #include <linux/bitmap.h>
10 #include <linux/iommu-helper.h>
11 #include <linux/iommu-common.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/hash.h>
15 static unsigned long iommu_large_alloc
= 15;
17 static DEFINE_PER_CPU(unsigned int, iommu_hash_common
);
19 static inline bool need_flush(struct iommu_map_table
*iommu
)
21 return ((iommu
->flags
& IOMMU_NEED_FLUSH
) != 0);
24 static inline void set_flush(struct iommu_map_table
*iommu
)
26 iommu
->flags
|= IOMMU_NEED_FLUSH
;
29 static inline void clear_flush(struct iommu_map_table
*iommu
)
31 iommu
->flags
&= ~IOMMU_NEED_FLUSH
;
34 static void setup_iommu_pool_hash(void)
42 for_each_possible_cpu(i
)
43 per_cpu(iommu_hash_common
, i
) = hash_32(i
, IOMMU_POOL_HASHBITS
);
47 * Initialize iommu_pool entries for the iommu_map_table. `num_entries'
48 * is the number of table entries. If `large_pool' is set to true,
49 * the top 1/4 of the table will be set aside for pool allocations
50 * of more than iommu_large_alloc pages.
52 void iommu_tbl_pool_init(struct iommu_map_table
*iommu
,
53 unsigned long num_entries
,
55 void (*lazy_flush
)(struct iommu_map_table
*),
56 bool large_pool
, u32 npools
,
57 bool skip_span_boundary_check
)
59 unsigned int start
, i
;
60 struct iommu_pool
*p
= &(iommu
->large_pool
);
62 setup_iommu_pool_hash();
64 iommu
->nr_pools
= IOMMU_NR_POOLS
;
66 iommu
->nr_pools
= npools
;
67 BUG_ON(npools
> IOMMU_NR_POOLS
);
69 iommu
->table_shift
= table_shift
;
70 iommu
->lazy_flush
= lazy_flush
;
72 if (skip_span_boundary_check
)
73 iommu
->flags
|= IOMMU_NO_SPAN_BOUND
;
75 iommu
->flags
|= IOMMU_HAS_LARGE_POOL
;
78 iommu
->poolsize
= num_entries
/iommu
->nr_pools
;
80 iommu
->poolsize
= (num_entries
* 3 / 4)/iommu
->nr_pools
;
81 for (i
= 0; i
< iommu
->nr_pools
; i
++) {
82 spin_lock_init(&(iommu
->pools
[i
].lock
));
83 iommu
->pools
[i
].start
= start
;
84 iommu
->pools
[i
].hint
= start
;
85 start
+= iommu
->poolsize
; /* start for next pool */
86 iommu
->pools
[i
].end
= start
- 1;
90 /* initialize large_pool */
91 spin_lock_init(&(p
->lock
));
96 EXPORT_SYMBOL(iommu_tbl_pool_init
);
98 unsigned long iommu_tbl_range_alloc(struct device
*dev
,
99 struct iommu_map_table
*iommu
,
100 unsigned long npages
,
101 unsigned long *handle
,
103 unsigned int align_order
)
105 unsigned int pool_hash
= __this_cpu_read(iommu_hash_common
);
106 unsigned long n
, end
, start
, limit
, boundary_size
;
107 struct iommu_pool
*pool
;
109 unsigned int pool_nr
;
110 unsigned int npools
= iommu
->nr_pools
;
112 bool large_pool
= ((iommu
->flags
& IOMMU_HAS_LARGE_POOL
) != 0);
113 bool largealloc
= (large_pool
&& npages
> iommu_large_alloc
);
115 unsigned long align_mask
= 0;
118 align_mask
= ~0ul >> (BITS_PER_LONG
- align_order
);
121 if (unlikely(npages
== 0)) {
123 return IOMMU_ERROR_CODE
;
127 pool
= &(iommu
->large_pool
);
128 pool_nr
= 0; /* to keep compiler happy */
130 /* pick out pool_nr */
131 pool_nr
= pool_hash
& (npools
- 1);
132 pool
= &(iommu
->pools
[pool_nr
]);
134 spin_lock_irqsave(&pool
->lock
, flags
);
137 if (pass
== 0 && handle
&& *handle
&&
138 (*handle
>= pool
->start
) && (*handle
< pool
->end
))
145 /* The case below can happen if we have a small segment appended
146 * to a large, or when the previous alloc was at the very end of
147 * the available space. If so, go back to the beginning. If a
148 * flush is needed, it will get done based on the return value
149 * from iommu_area_alloc() below.
153 shift
= iommu
->table_map_base
>> iommu
->table_shift
;
154 if (limit
+ shift
> mask
) {
155 limit
= mask
- shift
+ 1;
156 /* If we're constrained on address range, first try
157 * at the masked hint to avoid O(n) search complexity,
158 * but on second pass, start at 0 in pool 0.
160 if ((start
& mask
) >= limit
|| pass
> 0) {
161 spin_unlock(&(pool
->lock
));
162 pool
= &(iommu
->pools
[0]);
163 spin_lock(&(pool
->lock
));
171 boundary_size
= ALIGN(dma_get_seg_boundary(dev
) + 1,
172 1 << iommu
->table_shift
);
174 boundary_size
= ALIGN(1ULL << 32, 1 << iommu
->table_shift
);
176 boundary_size
= boundary_size
>> iommu
->table_shift
;
178 * if the skip_span_boundary_check had been set during init, we set
179 * things up so that iommu_is_span_boundary() merely checks if the
180 * (index + npages) < num_tsb_entries
182 if ((iommu
->flags
& IOMMU_NO_SPAN_BOUND
) != 0) {
184 boundary_size
= iommu
->poolsize
* iommu
->nr_pools
;
186 n
= iommu_area_alloc(iommu
->map
, limit
, start
, npages
, shift
,
187 boundary_size
, align_mask
);
189 if (likely(pass
== 0)) {
190 /* First failure, rescan from the beginning. */
191 pool
->hint
= pool
->start
;
195 } else if (!largealloc
&& pass
<= iommu
->nr_pools
) {
196 spin_unlock(&(pool
->lock
));
197 pool_nr
= (pool_nr
+ 1) & (iommu
->nr_pools
- 1);
198 pool
= &(iommu
->pools
[pool_nr
]);
199 spin_lock(&(pool
->lock
));
200 pool
->hint
= pool
->start
;
206 n
= IOMMU_ERROR_CODE
;
210 if (iommu
->lazy_flush
&&
211 (n
< pool
->hint
|| need_flush(iommu
))) {
213 iommu
->lazy_flush(iommu
);
219 /* Update handle for SG allocations */
223 spin_unlock_irqrestore(&(pool
->lock
), flags
);
227 EXPORT_SYMBOL(iommu_tbl_range_alloc
);
229 static struct iommu_pool
*get_pool(struct iommu_map_table
*tbl
,
232 struct iommu_pool
*p
;
233 unsigned long largepool_start
= tbl
->large_pool
.start
;
234 bool large_pool
= ((tbl
->flags
& IOMMU_HAS_LARGE_POOL
) != 0);
236 /* The large pool is the last pool at the top of the table */
237 if (large_pool
&& entry
>= largepool_start
) {
238 p
= &tbl
->large_pool
;
240 unsigned int pool_nr
= entry
/ tbl
->poolsize
;
242 BUG_ON(pool_nr
>= tbl
->nr_pools
);
243 p
= &tbl
->pools
[pool_nr
];
248 /* Caller supplies the index of the entry into the iommu map table
249 * itself when the mapping from dma_addr to the entry is not the
250 * default addr->entry mapping below.
252 void iommu_tbl_range_free(struct iommu_map_table
*iommu
, u64 dma_addr
,
253 unsigned long npages
, unsigned long entry
)
255 struct iommu_pool
*pool
;
257 unsigned long shift
= iommu
->table_shift
;
259 if (entry
== IOMMU_ERROR_CODE
) /* use default addr->entry mapping */
260 entry
= (dma_addr
- iommu
->table_map_base
) >> shift
;
261 pool
= get_pool(iommu
, entry
);
263 spin_lock_irqsave(&(pool
->lock
), flags
);
264 bitmap_clear(iommu
->map
, entry
, npages
);
265 spin_unlock_irqrestore(&(pool
->lock
), flags
);
267 EXPORT_SYMBOL(iommu_tbl_range_free
);