2 * Basic general purpose allocator for managing special purpose
3 * memory, for example, memory that is not managed by the regular
4 * kmalloc/kfree interface. Uses for this includes on-device special
5 * memory, uncached memory etc.
7 * It is safe to use the allocator in NMI handlers and other special
8 * unblockable contexts that could otherwise deadlock on locks. This
9 * is implemented by using atomic operations and retries on any
10 * conflicts. The disadvantage is that there may be livelocks in
11 * extreme cases. For better scalability, one allocator can be used
14 * The lockless operation only works if there is enough memory
15 * available. If new memory is added to the pool a lock has to be
16 * still taken. So any user relying on locklessness has to ensure
17 * that sufficient memory is preallocated.
19 * The basic atomic operation of this allocator is cmpxchg on long.
20 * On architectures that don't have NMI-safe cmpxchg implementation,
21 * the allocator can NOT be used in NMI handler. So code uses the
22 * allocator in NMI handler should depend on
23 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
25 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
27 * This source code is licensed under the GNU General Public License,
28 * Version 2. See the file COPYING for more details.
31 #include <linux/slab.h>
32 #include <linux/export.h>
33 #include <linux/bitmap.h>
34 #include <linux/rculist.h>
35 #include <linux/interrupt.h>
36 #include <linux/genalloc.h>
38 static int set_bits_ll(unsigned long *addr
, unsigned long mask_to_set
)
40 unsigned long val
, nval
;
45 if (val
& mask_to_set
)
48 } while ((nval
= cmpxchg(addr
, val
, val
| mask_to_set
)) != val
);
53 static int clear_bits_ll(unsigned long *addr
, unsigned long mask_to_clear
)
55 unsigned long val
, nval
;
60 if ((val
& mask_to_clear
) != mask_to_clear
)
63 } while ((nval
= cmpxchg(addr
, val
, val
& ~mask_to_clear
)) != val
);
69 * bitmap_set_ll - set the specified number of bits at the specified position
70 * @map: pointer to a bitmap
71 * @start: a bit position in @map
72 * @nr: number of bits to set
74 * Set @nr bits start from @start in @map lock-lessly. Several users
75 * can set/clear the same bitmap simultaneously without lock. If two
76 * users set the same bit, one user will return remain bits, otherwise
79 static int bitmap_set_ll(unsigned long *map
, int start
, int nr
)
81 unsigned long *p
= map
+ BIT_WORD(start
);
82 const int size
= start
+ nr
;
83 int bits_to_set
= BITS_PER_LONG
- (start
% BITS_PER_LONG
);
84 unsigned long mask_to_set
= BITMAP_FIRST_WORD_MASK(start
);
86 while (nr
- bits_to_set
>= 0) {
87 if (set_bits_ll(p
, mask_to_set
))
90 bits_to_set
= BITS_PER_LONG
;
95 mask_to_set
&= BITMAP_LAST_WORD_MASK(size
);
96 if (set_bits_ll(p
, mask_to_set
))
104 * bitmap_clear_ll - clear the specified number of bits at the specified position
105 * @map: pointer to a bitmap
106 * @start: a bit position in @map
107 * @nr: number of bits to set
109 * Clear @nr bits start from @start in @map lock-lessly. Several users
110 * can set/clear the same bitmap simultaneously without lock. If two
111 * users clear the same bit, one user will return remain bits,
112 * otherwise return 0.
114 static int bitmap_clear_ll(unsigned long *map
, int start
, int nr
)
116 unsigned long *p
= map
+ BIT_WORD(start
);
117 const int size
= start
+ nr
;
118 int bits_to_clear
= BITS_PER_LONG
- (start
% BITS_PER_LONG
);
119 unsigned long mask_to_clear
= BITMAP_FIRST_WORD_MASK(start
);
121 while (nr
- bits_to_clear
>= 0) {
122 if (clear_bits_ll(p
, mask_to_clear
))
125 bits_to_clear
= BITS_PER_LONG
;
126 mask_to_clear
= ~0UL;
130 mask_to_clear
&= BITMAP_LAST_WORD_MASK(size
);
131 if (clear_bits_ll(p
, mask_to_clear
))
139 * gen_pool_create - create a new special memory pool
140 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
141 * @nid: node id of the node the pool structure should be allocated on, or -1
143 * Create a new special memory pool that can be used to manage special purpose
144 * memory not managed by the regular kmalloc/kfree interface.
146 struct gen_pool
*gen_pool_create(int min_alloc_order
, int nid
)
148 struct gen_pool
*pool
;
150 pool
= kmalloc_node(sizeof(struct gen_pool
), GFP_KERNEL
, nid
);
152 spin_lock_init(&pool
->lock
);
153 INIT_LIST_HEAD(&pool
->chunks
);
154 pool
->min_alloc_order
= min_alloc_order
;
155 pool
->algo
= gen_pool_first_fit
;
160 EXPORT_SYMBOL(gen_pool_create
);
163 * gen_pool_add_virt - add a new chunk of special memory to the pool
164 * @pool: pool to add new memory chunk to
165 * @virt: virtual starting address of memory chunk to add to pool
166 * @phys: physical starting address of memory chunk to add to pool
167 * @size: size in bytes of the memory chunk to add to pool
168 * @nid: node id of the node the chunk structure and bitmap should be
169 * allocated on, or -1
171 * Add a new chunk of special memory to the specified pool.
173 * Returns 0 on success or a -ve errno on failure.
175 int gen_pool_add_virt(struct gen_pool
*pool
, unsigned long virt
, phys_addr_t phys
,
176 size_t size
, int nid
)
178 struct gen_pool_chunk
*chunk
;
179 int nbits
= size
>> pool
->min_alloc_order
;
180 int nbytes
= sizeof(struct gen_pool_chunk
) +
181 (nbits
+ BITS_PER_BYTE
- 1) / BITS_PER_BYTE
;
183 chunk
= kmalloc_node(nbytes
, GFP_KERNEL
| __GFP_ZERO
, nid
);
184 if (unlikely(chunk
== NULL
))
187 chunk
->phys_addr
= phys
;
188 chunk
->start_addr
= virt
;
189 chunk
->end_addr
= virt
+ size
;
190 atomic_set(&chunk
->avail
, size
);
192 spin_lock(&pool
->lock
);
193 list_add_rcu(&chunk
->next_chunk
, &pool
->chunks
);
194 spin_unlock(&pool
->lock
);
198 EXPORT_SYMBOL(gen_pool_add_virt
);
201 * gen_pool_virt_to_phys - return the physical address of memory
202 * @pool: pool to allocate from
203 * @addr: starting address of memory
205 * Returns the physical address on success, or -1 on error.
207 phys_addr_t
gen_pool_virt_to_phys(struct gen_pool
*pool
, unsigned long addr
)
209 struct gen_pool_chunk
*chunk
;
210 phys_addr_t paddr
= -1;
213 list_for_each_entry_rcu(chunk
, &pool
->chunks
, next_chunk
) {
214 if (addr
>= chunk
->start_addr
&& addr
< chunk
->end_addr
) {
215 paddr
= chunk
->phys_addr
+ (addr
- chunk
->start_addr
);
223 EXPORT_SYMBOL(gen_pool_virt_to_phys
);
226 * gen_pool_destroy - destroy a special memory pool
227 * @pool: pool to destroy
229 * Destroy the specified special memory pool. Verifies that there are no
230 * outstanding allocations.
232 void gen_pool_destroy(struct gen_pool
*pool
)
234 struct list_head
*_chunk
, *_next_chunk
;
235 struct gen_pool_chunk
*chunk
;
236 int order
= pool
->min_alloc_order
;
239 list_for_each_safe(_chunk
, _next_chunk
, &pool
->chunks
) {
240 chunk
= list_entry(_chunk
, struct gen_pool_chunk
, next_chunk
);
241 list_del(&chunk
->next_chunk
);
243 end_bit
= (chunk
->end_addr
- chunk
->start_addr
) >> order
;
244 bit
= find_next_bit(chunk
->bits
, end_bit
, 0);
245 BUG_ON(bit
< end_bit
);
252 EXPORT_SYMBOL(gen_pool_destroy
);
255 * gen_pool_alloc - allocate special memory from the pool
256 * @pool: pool to allocate from
257 * @size: number of bytes to allocate from the pool
259 * Allocate the requested number of bytes from the specified pool.
260 * Uses the pool allocation function (with first-fit algorithm by default).
261 * Can not be used in NMI handler on architectures without
262 * NMI-safe cmpxchg implementation.
264 unsigned long gen_pool_alloc(struct gen_pool
*pool
, size_t size
)
266 struct gen_pool_chunk
*chunk
;
267 unsigned long addr
= 0;
268 int order
= pool
->min_alloc_order
;
269 int nbits
, start_bit
= 0, end_bit
, remain
;
271 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
278 nbits
= (size
+ (1UL << order
) - 1) >> order
;
280 list_for_each_entry_rcu(chunk
, &pool
->chunks
, next_chunk
) {
281 if (size
> atomic_read(&chunk
->avail
))
284 end_bit
= (chunk
->end_addr
- chunk
->start_addr
) >> order
;
286 start_bit
= pool
->algo(chunk
->bits
, end_bit
, start_bit
, nbits
,
288 if (start_bit
>= end_bit
)
290 remain
= bitmap_set_ll(chunk
->bits
, start_bit
, nbits
);
292 remain
= bitmap_clear_ll(chunk
->bits
, start_bit
,
298 addr
= chunk
->start_addr
+ ((unsigned long)start_bit
<< order
);
299 size
= nbits
<< order
;
300 atomic_sub(size
, &chunk
->avail
);
306 EXPORT_SYMBOL(gen_pool_alloc
);
309 * gen_pool_free - free allocated special memory back to the pool
310 * @pool: pool to free to
311 * @addr: starting address of memory to free back to pool
312 * @size: size in bytes of memory to free
314 * Free previously allocated special memory back to the specified
315 * pool. Can not be used in NMI handler on architectures without
316 * NMI-safe cmpxchg implementation.
318 void gen_pool_free(struct gen_pool
*pool
, unsigned long addr
, size_t size
)
320 struct gen_pool_chunk
*chunk
;
321 int order
= pool
->min_alloc_order
;
322 int start_bit
, nbits
, remain
;
324 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
328 nbits
= (size
+ (1UL << order
) - 1) >> order
;
330 list_for_each_entry_rcu(chunk
, &pool
->chunks
, next_chunk
) {
331 if (addr
>= chunk
->start_addr
&& addr
< chunk
->end_addr
) {
332 BUG_ON(addr
+ size
> chunk
->end_addr
);
333 start_bit
= (addr
- chunk
->start_addr
) >> order
;
334 remain
= bitmap_clear_ll(chunk
->bits
, start_bit
, nbits
);
336 size
= nbits
<< order
;
337 atomic_add(size
, &chunk
->avail
);
345 EXPORT_SYMBOL(gen_pool_free
);
348 * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
349 * @pool: the generic memory pool
350 * @func: func to call
351 * @data: additional data used by @func
353 * Call @func for every chunk of generic memory pool. The @func is
354 * called with rcu_read_lock held.
356 void gen_pool_for_each_chunk(struct gen_pool
*pool
,
357 void (*func
)(struct gen_pool
*pool
, struct gen_pool_chunk
*chunk
, void *data
),
360 struct gen_pool_chunk
*chunk
;
363 list_for_each_entry_rcu(chunk
, &(pool
)->chunks
, next_chunk
)
364 func(pool
, chunk
, data
);
367 EXPORT_SYMBOL(gen_pool_for_each_chunk
);
370 * gen_pool_avail - get available free space of the pool
371 * @pool: pool to get available free space
373 * Return available free space of the specified pool.
375 size_t gen_pool_avail(struct gen_pool
*pool
)
377 struct gen_pool_chunk
*chunk
;
381 list_for_each_entry_rcu(chunk
, &pool
->chunks
, next_chunk
)
382 avail
+= atomic_read(&chunk
->avail
);
386 EXPORT_SYMBOL_GPL(gen_pool_avail
);
389 * gen_pool_size - get size in bytes of memory managed by the pool
390 * @pool: pool to get size
392 * Return size in bytes of memory managed by the pool.
394 size_t gen_pool_size(struct gen_pool
*pool
)
396 struct gen_pool_chunk
*chunk
;
400 list_for_each_entry_rcu(chunk
, &pool
->chunks
, next_chunk
)
401 size
+= chunk
->end_addr
- chunk
->start_addr
;
405 EXPORT_SYMBOL_GPL(gen_pool_size
);
408 * gen_pool_set_algo - set the allocation algorithm
409 * @pool: pool to change allocation algorithm
410 * @algo: custom algorithm function
411 * @data: additional data used by @algo
413 * Call @algo for each memory allocation in the pool.
414 * If @algo is NULL use gen_pool_first_fit as default
415 * memory allocation function.
417 void gen_pool_set_algo(struct gen_pool
*pool
, genpool_algo_t algo
, void *data
)
423 pool
->algo
= gen_pool_first_fit
;
429 EXPORT_SYMBOL(gen_pool_set_algo
);
432 * gen_pool_first_fit - find the first available region
433 * of memory matching the size requirement (no alignment constraint)
434 * @map: The address to base the search on
435 * @size: The bitmap size in bits
436 * @start: The bitnumber to start searching at
437 * @nr: The number of zeroed bits we're looking for
438 * @data: additional data - unused
440 unsigned long gen_pool_first_fit(unsigned long *map
, unsigned long size
,
441 unsigned long start
, unsigned int nr
, void *data
)
443 return bitmap_find_next_zero_area(map
, size
, start
, nr
, 0);
445 EXPORT_SYMBOL(gen_pool_first_fit
);
448 * gen_pool_best_fit - find the best fitting region of memory
449 * macthing the size requirement (no alignment constraint)
450 * @map: The address to base the search on
451 * @size: The bitmap size in bits
452 * @start: The bitnumber to start searching at
453 * @nr: The number of zeroed bits we're looking for
454 * @data: additional data - unused
456 * Iterate over the bitmap to find the smallest free region
457 * which we can allocate the memory.
459 unsigned long gen_pool_best_fit(unsigned long *map
, unsigned long size
,
460 unsigned long start
, unsigned int nr
, void *data
)
462 unsigned long start_bit
= size
;
463 unsigned long len
= size
+ 1;
466 index
= bitmap_find_next_zero_area(map
, size
, start
, nr
, 0);
468 while (index
< size
) {
469 int next_bit
= find_next_bit(map
, size
, index
+ nr
);
470 if ((next_bit
- index
) < len
) {
471 len
= next_bit
- index
;
476 index
= bitmap_find_next_zero_area(map
, size
,
477 next_bit
+ 1, nr
, 0);
482 EXPORT_SYMBOL(gen_pool_best_fit
);