2 * 2002-10-18 written by Jim Houston jim.houston@ccur.com
3 * Copyright (C) 2002 by Concurrent Computer Corporation
4 * Distributed under the GNU GPL license version 2.
6 * Modified by George Anzinger to reuse immediately and to use
7 * find bit instructions. Also removed _irq on spinlocks.
9 * Modified by Nadia Derbey to make it RCU safe.
11 * Small id to pointer translation service.
13 * It uses a radix tree like structure as a sparse array indexed
14 * by the id to obtain the pointer. The bitmap makes allocating
17 * You call it to allocate an id (an int) an associate with that id a
18 * pointer or what ever, we treat it as a (void *). You can pass this
19 * id to a user for him to pass back at a later time. You then pass
20 * that id to this code and it returns your pointer.
22 * You can release ids at any time. When all ids are released, most of
23 * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we
24 * don't need to go to the memory "store" during an id allocate, just
25 * so you don't need to be too concerned about locking and conflicts
26 * with the slab allocator.
29 #ifndef TEST // to test in user space...
30 #include <linux/slab.h>
31 #include <linux/init.h>
32 #include <linux/export.h>
34 #include <linux/err.h>
35 #include <linux/string.h>
36 #include <linux/idr.h>
37 #include <linux/spinlock.h>
38 #include <linux/percpu.h>
39 #include <linux/hardirq.h>
41 #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1)
42 #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
44 /* Leave the possibility of an incomplete final layer */
45 #define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
47 /* Number of id_layer structs to leave in free list */
48 #define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
50 static struct kmem_cache
*idr_layer_cache
;
51 static DEFINE_PER_CPU(struct idr_layer
*, idr_preload_head
);
52 static DEFINE_PER_CPU(int, idr_preload_cnt
);
53 static DEFINE_SPINLOCK(simple_ida_lock
);
55 /* the maximum ID which can be allocated given idr->layers */
56 static int idr_max(int layers
)
58 int bits
= min_t(int, layers
* IDR_BITS
, MAX_IDR_SHIFT
);
60 return (1 << bits
) - 1;
64 * Prefix mask for an idr_layer at @layer. For layer 0, the prefix mask is
65 * all bits except for the lower IDR_BITS. For layer 1, 2 * IDR_BITS, and
68 static int idr_layer_prefix_mask(int layer
)
70 return ~idr_max(layer
+ 1);
73 static struct idr_layer
*get_from_free_list(struct idr
*idp
)
78 spin_lock_irqsave(&idp
->lock
, flags
);
79 if ((p
= idp
->id_free
)) {
80 idp
->id_free
= p
->ary
[0];
84 spin_unlock_irqrestore(&idp
->lock
, flags
);
89 * idr_layer_alloc - allocate a new idr_layer
90 * @gfp_mask: allocation mask
91 * @layer_idr: optional idr to allocate from
93 * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch
94 * one from the per-cpu preload buffer. If @layer_idr is not %NULL, fetch
95 * an idr_layer from @idr->id_free.
97 * @layer_idr is to maintain backward compatibility with the old alloc
98 * interface - idr_pre_get() and idr_get_new*() - and will be removed
99 * together with per-pool preload buffer.
101 static struct idr_layer
*idr_layer_alloc(gfp_t gfp_mask
, struct idr
*layer_idr
)
103 struct idr_layer
*new;
105 /* this is the old path, bypass to get_from_free_list() */
107 return get_from_free_list(layer_idr
);
109 /* try to allocate directly from kmem_cache */
110 new = kmem_cache_zalloc(idr_layer_cache
, gfp_mask
);
115 * Try to fetch one from the per-cpu preload buffer if in process
116 * context. See idr_preload() for details.
122 new = __this_cpu_read(idr_preload_head
);
124 __this_cpu_write(idr_preload_head
, new->ary
[0]);
125 __this_cpu_dec(idr_preload_cnt
);
132 static void idr_layer_rcu_free(struct rcu_head
*head
)
134 struct idr_layer
*layer
;
136 layer
= container_of(head
, struct idr_layer
, rcu_head
);
137 kmem_cache_free(idr_layer_cache
, layer
);
140 static inline void free_layer(struct idr
*idr
, struct idr_layer
*p
)
142 if (idr
->hint
&& idr
->hint
== p
)
143 RCU_INIT_POINTER(idr
->hint
, NULL
);
144 call_rcu(&p
->rcu_head
, idr_layer_rcu_free
);
147 /* only called when idp->lock is held */
148 static void __move_to_free_list(struct idr
*idp
, struct idr_layer
*p
)
150 p
->ary
[0] = idp
->id_free
;
155 static void move_to_free_list(struct idr
*idp
, struct idr_layer
*p
)
160 * Depends on the return element being zeroed.
162 spin_lock_irqsave(&idp
->lock
, flags
);
163 __move_to_free_list(idp
, p
);
164 spin_unlock_irqrestore(&idp
->lock
, flags
);
167 static void idr_mark_full(struct idr_layer
**pa
, int id
)
169 struct idr_layer
*p
= pa
[0];
172 __set_bit(id
& IDR_MASK
, p
->bitmap
);
174 * If this layer is full mark the bit in the layer above to
175 * show that this part of the radix tree is full. This may
176 * complete the layer above and require walking up the radix
179 while (bitmap_full(p
->bitmap
, IDR_SIZE
)) {
183 __set_bit((id
& IDR_MASK
), p
->bitmap
);
188 * idr_pre_get - reserve resources for idr allocation
190 * @gfp_mask: memory allocation flags
192 * This function should be called prior to calling the idr_get_new* functions.
193 * It preallocates enough memory to satisfy the worst possible allocation. The
194 * caller should pass in GFP_KERNEL if possible. This of course requires that
195 * no spinning locks be held.
197 * If the system is REALLY out of memory this function returns %0,
200 int idr_pre_get(struct idr
*idp
, gfp_t gfp_mask
)
202 while (idp
->id_free_cnt
< MAX_IDR_FREE
) {
203 struct idr_layer
*new;
204 new = kmem_cache_zalloc(idr_layer_cache
, gfp_mask
);
207 move_to_free_list(idp
, new);
211 EXPORT_SYMBOL(idr_pre_get
);
214 * sub_alloc - try to allocate an id without growing the tree depth
216 * @starting_id: id to start search at
217 * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
218 * @gfp_mask: allocation mask for idr_layer_alloc()
219 * @layer_idr: optional idr passed to idr_layer_alloc()
221 * Allocate an id in range [@starting_id, INT_MAX] from @idp without
222 * growing its depth. Returns
224 * the allocated id >= 0 if successful,
225 * -EAGAIN if the tree needs to grow for allocation to succeed,
226 * -ENOSPC if the id space is exhausted,
227 * -ENOMEM if more idr_layers need to be allocated.
229 static int sub_alloc(struct idr
*idp
, int *starting_id
, struct idr_layer
**pa
,
230 gfp_t gfp_mask
, struct idr
*layer_idr
)
233 struct idr_layer
*p
, *new;
243 * We run around this while until we reach the leaf node...
245 n
= (id
>> (IDR_BITS
*l
)) & IDR_MASK
;
246 m
= find_next_zero_bit(p
->bitmap
, IDR_SIZE
, n
);
248 /* no space available go back to previous layer. */
251 id
= (id
| ((1 << (IDR_BITS
* l
)) - 1)) + 1;
253 /* if already at the top layer, we need to grow */
254 if (id
>= 1 << (idp
->layers
* IDR_BITS
)) {
261 /* If we need to go up one layer, continue the
262 * loop; otherwise, restart from the top.
264 sh
= IDR_BITS
* (l
+ 1);
265 if (oid
>> sh
== id
>> sh
)
272 id
= ((id
>> sh
) ^ n
^ m
) << sh
;
274 if ((id
>= MAX_IDR_BIT
) || (id
< 0))
279 * Create the layer below if it is missing.
282 new = idr_layer_alloc(gfp_mask
, layer_idr
);
286 new->prefix
= id
& idr_layer_prefix_mask(new->layer
);
287 rcu_assign_pointer(p
->ary
[m
], new);
298 static int idr_get_empty_slot(struct idr
*idp
, int starting_id
,
299 struct idr_layer
**pa
, gfp_t gfp_mask
,
300 struct idr
*layer_idr
)
302 struct idr_layer
*p
, *new;
309 layers
= idp
->layers
;
311 if (!(p
= idr_layer_alloc(gfp_mask
, layer_idr
)))
317 * Add a new layer to the top of the tree if the requested
318 * id is larger than the currently allocated space.
320 while (id
> idr_max(layers
)) {
323 /* special case: if the tree is currently empty,
324 * then we grow the tree by moving the top node
328 WARN_ON_ONCE(p
->prefix
);
331 if (!(new = idr_layer_alloc(gfp_mask
, layer_idr
))) {
333 * The allocation failed. If we built part of
334 * the structure tear it down.
336 spin_lock_irqsave(&idp
->lock
, flags
);
337 for (new = p
; p
&& p
!= idp
->top
; new = p
) {
341 bitmap_clear(new->bitmap
, 0, IDR_SIZE
);
342 __move_to_free_list(idp
, new);
344 spin_unlock_irqrestore(&idp
->lock
, flags
);
349 new->layer
= layers
-1;
350 new->prefix
= id
& idr_layer_prefix_mask(new->layer
);
351 if (bitmap_full(p
->bitmap
, IDR_SIZE
))
352 __set_bit(0, new->bitmap
);
355 rcu_assign_pointer(idp
->top
, p
);
356 idp
->layers
= layers
;
357 v
= sub_alloc(idp
, &id
, pa
, gfp_mask
, layer_idr
);
364 * @id and @pa are from a successful allocation from idr_get_empty_slot().
365 * Install the user pointer @ptr and mark the slot full.
367 static void idr_fill_slot(struct idr
*idr
, void *ptr
, int id
,
368 struct idr_layer
**pa
)
370 /* update hint used for lookup, cleared from free_layer() */
371 rcu_assign_pointer(idr
->hint
, pa
[0]);
373 rcu_assign_pointer(pa
[0]->ary
[id
& IDR_MASK
], (struct idr_layer
*)ptr
);
375 idr_mark_full(pa
, id
);
379 * idr_get_new_above - allocate new idr entry above or equal to a start id
381 * @ptr: pointer you want associated with the id
382 * @starting_id: id to start search at
383 * @id: pointer to the allocated handle
385 * This is the allocate id function. It should be called with any
388 * If allocation from IDR's private freelist fails, idr_get_new_above() will
389 * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill
390 * IDR's preallocation and then retry the idr_get_new_above() call.
392 * If the idr is full idr_get_new_above() will return %-ENOSPC.
394 * @id returns a value in the range @starting_id ... %0x7fffffff
396 int idr_get_new_above(struct idr
*idp
, void *ptr
, int starting_id
, int *id
)
398 struct idr_layer
*pa
[MAX_IDR_LEVEL
+ 1];
401 rv
= idr_get_empty_slot(idp
, starting_id
, pa
, 0, idp
);
403 return rv
== -ENOMEM
? -EAGAIN
: rv
;
405 idr_fill_slot(idp
, ptr
, rv
, pa
);
409 EXPORT_SYMBOL(idr_get_new_above
);
412 * idr_preload - preload for idr_alloc()
413 * @gfp_mask: allocation mask to use for preloading
415 * Preload per-cpu layer buffer for idr_alloc(). Can only be used from
416 * process context and each idr_preload() invocation should be matched with
417 * idr_preload_end(). Note that preemption is disabled while preloaded.
419 * The first idr_alloc() in the preloaded section can be treated as if it
420 * were invoked with @gfp_mask used for preloading. This allows using more
421 * permissive allocation masks for idrs protected by spinlocks.
423 * For example, if idr_alloc() below fails, the failure can be treated as
424 * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT.
426 * idr_preload(GFP_KERNEL);
429 * id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
436 void idr_preload(gfp_t gfp_mask
)
439 * Consuming preload buffer from non-process context breaks preload
440 * allocation guarantee. Disallow usage from those contexts.
442 WARN_ON_ONCE(in_interrupt());
443 might_sleep_if(gfp_mask
& __GFP_WAIT
);
448 * idr_alloc() is likely to succeed w/o full idr_layer buffer and
449 * return value from idr_alloc() needs to be checked for failure
450 * anyway. Silently give up if allocation fails. The caller can
451 * treat failures from idr_alloc() as if idr_alloc() were called
452 * with @gfp_mask which should be enough.
454 while (__this_cpu_read(idr_preload_cnt
) < MAX_IDR_FREE
) {
455 struct idr_layer
*new;
458 new = kmem_cache_zalloc(idr_layer_cache
, gfp_mask
);
463 /* link the new one to per-cpu preload list */
464 new->ary
[0] = __this_cpu_read(idr_preload_head
);
465 __this_cpu_write(idr_preload_head
, new);
466 __this_cpu_inc(idr_preload_cnt
);
469 EXPORT_SYMBOL(idr_preload
);
472 * idr_alloc - allocate new idr entry
473 * @idr: the (initialized) idr
474 * @ptr: pointer to be associated with the new id
475 * @start: the minimum id (inclusive)
476 * @end: the maximum id (exclusive, <= 0 for max)
477 * @gfp_mask: memory allocation flags
479 * Allocate an id in [start, end) and associate it with @ptr. If no ID is
480 * available in the specified range, returns -ENOSPC. On memory allocation
481 * failure, returns -ENOMEM.
483 * Note that @end is treated as max when <= 0. This is to always allow
484 * using @start + N as @end as long as N is inside integer range.
486 * The user is responsible for exclusively synchronizing all operations
487 * which may modify @idr. However, read-only accesses such as idr_find()
488 * or iteration can be performed under RCU read lock provided the user
489 * destroys @ptr in RCU-safe way after removal from idr.
491 int idr_alloc(struct idr
*idr
, void *ptr
, int start
, int end
, gfp_t gfp_mask
)
493 int max
= end
> 0 ? end
- 1 : INT_MAX
; /* inclusive upper limit */
494 struct idr_layer
*pa
[MAX_IDR_LEVEL
+ 1];
497 might_sleep_if(gfp_mask
& __GFP_WAIT
);
500 if (WARN_ON_ONCE(start
< 0))
502 if (unlikely(max
< start
))
506 id
= idr_get_empty_slot(idr
, start
, pa
, gfp_mask
, NULL
);
507 if (unlikely(id
< 0))
509 if (unlikely(id
> max
))
512 idr_fill_slot(idr
, ptr
, id
, pa
);
515 EXPORT_SYMBOL_GPL(idr_alloc
);
517 static void idr_remove_warning(int id
)
520 "idr_remove called for id=%d which is not allocated.\n", id
);
524 static void sub_remove(struct idr
*idp
, int shift
, int id
)
526 struct idr_layer
*p
= idp
->top
;
527 struct idr_layer
**pa
[MAX_IDR_LEVEL
+ 1];
528 struct idr_layer
***paa
= &pa
[0];
529 struct idr_layer
*to_free
;
535 while ((shift
> 0) && p
) {
536 n
= (id
>> shift
) & IDR_MASK
;
537 __clear_bit(n
, p
->bitmap
);
543 if (likely(p
!= NULL
&& test_bit(n
, p
->bitmap
))) {
544 __clear_bit(n
, p
->bitmap
);
545 rcu_assign_pointer(p
->ary
[n
], NULL
);
547 while(*paa
&& ! --((**paa
)->count
)){
549 free_layer(idp
, to_free
);
556 free_layer(idp
, to_free
);
558 idr_remove_warning(id
);
562 * idr_remove - remove the given id and free its slot
566 void idr_remove(struct idr
*idp
, int id
)
569 struct idr_layer
*to_free
;
574 sub_remove(idp
, (idp
->layers
- 1) * IDR_BITS
, id
);
575 if (idp
->top
&& idp
->top
->count
== 1 && (idp
->layers
> 1) &&
578 * Single child at leftmost slot: we can shrink the tree.
579 * This level is not needed anymore since when layers are
580 * inserted, they are inserted at the top of the existing
584 p
= idp
->top
->ary
[0];
585 rcu_assign_pointer(idp
->top
, p
);
588 bitmap_clear(to_free
->bitmap
, 0, IDR_SIZE
);
589 free_layer(idp
, to_free
);
591 while (idp
->id_free_cnt
>= MAX_IDR_FREE
) {
592 p
= get_from_free_list(idp
);
594 * Note: we don't call the rcu callback here, since the only
595 * layers that fall into the freelist are those that have been
598 kmem_cache_free(idr_layer_cache
, p
);
602 EXPORT_SYMBOL(idr_remove
);
604 void __idr_remove_all(struct idr
*idp
)
609 struct idr_layer
*pa
[MAX_IDR_LEVEL
+ 1];
610 struct idr_layer
**paa
= &pa
[0];
612 n
= idp
->layers
* IDR_BITS
;
614 rcu_assign_pointer(idp
->top
, NULL
);
615 max
= idr_max(idp
->layers
);
618 while (id
>= 0 && id
<= max
) {
619 while (n
> IDR_BITS
&& p
) {
622 p
= p
->ary
[(id
>> n
) & IDR_MASK
];
627 /* Get the highest bit that the above add changed from 0->1. */
628 while (n
< fls(id
^ bt_mask
)) {
637 EXPORT_SYMBOL(__idr_remove_all
);
640 * idr_destroy - release all cached layers within an idr tree
643 * Free all id mappings and all idp_layers. After this function, @idp is
644 * completely unused and can be freed / recycled. The caller is
645 * responsible for ensuring that no one else accesses @idp during or after
648 * A typical clean-up sequence for objects stored in an idr tree will use
649 * idr_for_each() to free all objects, if necessay, then idr_destroy() to
650 * free up the id mappings and cached idr_layers.
652 void idr_destroy(struct idr
*idp
)
654 __idr_remove_all(idp
);
656 while (idp
->id_free_cnt
) {
657 struct idr_layer
*p
= get_from_free_list(idp
);
658 kmem_cache_free(idr_layer_cache
, p
);
661 EXPORT_SYMBOL(idr_destroy
);
663 void *idr_find_slowpath(struct idr
*idp
, int id
)
671 p
= rcu_dereference_raw(idp
->top
);
674 n
= (p
->layer
+1) * IDR_BITS
;
676 if (id
> idr_max(p
->layer
+ 1))
682 BUG_ON(n
!= p
->layer
*IDR_BITS
);
683 p
= rcu_dereference_raw(p
->ary
[(id
>> n
) & IDR_MASK
]);
687 EXPORT_SYMBOL(idr_find_slowpath
);
690 * idr_for_each - iterate through all stored pointers
692 * @fn: function to be called for each pointer
693 * @data: data passed back to callback function
695 * Iterate over the pointers registered with the given idr. The
696 * callback function will be called for each pointer currently
697 * registered, passing the id, the pointer and the data pointer passed
698 * to this function. It is not safe to modify the idr tree while in
699 * the callback, so functions such as idr_get_new and idr_remove are
702 * We check the return of @fn each time. If it returns anything other
703 * than %0, we break out and return that value.
705 * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
707 int idr_for_each(struct idr
*idp
,
708 int (*fn
)(int id
, void *p
, void *data
), void *data
)
710 int n
, id
, max
, error
= 0;
712 struct idr_layer
*pa
[MAX_IDR_LEVEL
+ 1];
713 struct idr_layer
**paa
= &pa
[0];
715 n
= idp
->layers
* IDR_BITS
;
716 p
= rcu_dereference_raw(idp
->top
);
717 max
= idr_max(idp
->layers
);
720 while (id
>= 0 && id
<= max
) {
724 p
= rcu_dereference_raw(p
->ary
[(id
>> n
) & IDR_MASK
]);
728 error
= fn(id
, (void *)p
, data
);
734 while (n
< fls(id
)) {
742 EXPORT_SYMBOL(idr_for_each
);
745 * idr_get_next - lookup next object of id to given id.
747 * @nextidp: pointer to lookup key
749 * Returns pointer to registered object with id, which is next number to
750 * given id. After being looked up, *@nextidp will be updated for the next
753 * This function can be called under rcu_read_lock(), given that the leaf
754 * pointers lifetimes are correctly managed.
756 void *idr_get_next(struct idr
*idp
, int *nextidp
)
758 struct idr_layer
*p
, *pa
[MAX_IDR_LEVEL
+ 1];
759 struct idr_layer
**paa
= &pa
[0];
764 p
= rcu_dereference_raw(idp
->top
);
767 n
= (p
->layer
+ 1) * IDR_BITS
;
768 max
= idr_max(p
->layer
+ 1);
770 while (id
>= 0 && id
<= max
) {
774 p
= rcu_dereference_raw(p
->ary
[(id
>> n
) & IDR_MASK
]);
783 * Proceed to the next layer at the current level. Unlike
784 * idr_for_each(), @id isn't guaranteed to be aligned to
785 * layer boundary at this point and adding 1 << n may
786 * incorrectly skip IDs. Make sure we jump to the
787 * beginning of the next layer using round_up().
789 id
= round_up(id
+ 1, 1 << n
);
790 while (n
< fls(id
)) {
797 EXPORT_SYMBOL(idr_get_next
);
801 * idr_replace - replace pointer for given id
803 * @ptr: pointer you want associated with the id
806 * Replace the pointer registered with an id and return the old value.
807 * A %-ENOENT return indicates that @id was not found.
808 * A %-EINVAL return indicates that @id was not within valid constraints.
810 * The caller must serialize with writers.
812 void *idr_replace(struct idr
*idp
, void *ptr
, int id
)
815 struct idr_layer
*p
, *old_p
;
818 return ERR_PTR(-EINVAL
);
822 return ERR_PTR(-EINVAL
);
824 n
= (p
->layer
+1) * IDR_BITS
;
827 return ERR_PTR(-EINVAL
);
830 while ((n
> 0) && p
) {
831 p
= p
->ary
[(id
>> n
) & IDR_MASK
];
836 if (unlikely(p
== NULL
|| !test_bit(n
, p
->bitmap
)))
837 return ERR_PTR(-ENOENT
);
840 rcu_assign_pointer(p
->ary
[n
], ptr
);
844 EXPORT_SYMBOL(idr_replace
);
846 void __init
idr_init_cache(void)
848 idr_layer_cache
= kmem_cache_create("idr_layer_cache",
849 sizeof(struct idr_layer
), 0, SLAB_PANIC
, NULL
);
853 * idr_init - initialize idr handle
856 * This function is use to set up the handle (@idp) that you will pass
857 * to the rest of the functions.
859 void idr_init(struct idr
*idp
)
861 memset(idp
, 0, sizeof(struct idr
));
862 spin_lock_init(&idp
->lock
);
864 EXPORT_SYMBOL(idr_init
);
868 * DOC: IDA description
869 * IDA - IDR based ID allocator
871 * This is id allocator without id -> pointer translation. Memory
872 * usage is much lower than full blown idr because each id only
873 * occupies a bit. ida uses a custom leaf node which contains
874 * IDA_BITMAP_BITS slots.
876 * 2007-04-25 written by Tejun Heo <htejun@gmail.com>
879 static void free_bitmap(struct ida
*ida
, struct ida_bitmap
*bitmap
)
883 if (!ida
->free_bitmap
) {
884 spin_lock_irqsave(&ida
->idr
.lock
, flags
);
885 if (!ida
->free_bitmap
) {
886 ida
->free_bitmap
= bitmap
;
889 spin_unlock_irqrestore(&ida
->idr
.lock
, flags
);
896 * ida_pre_get - reserve resources for ida allocation
898 * @gfp_mask: memory allocation flag
900 * This function should be called prior to locking and calling the
901 * following function. It preallocates enough memory to satisfy the
902 * worst possible allocation.
904 * If the system is REALLY out of memory this function returns %0,
907 int ida_pre_get(struct ida
*ida
, gfp_t gfp_mask
)
909 /* allocate idr_layers */
910 if (!idr_pre_get(&ida
->idr
, gfp_mask
))
913 /* allocate free_bitmap */
914 if (!ida
->free_bitmap
) {
915 struct ida_bitmap
*bitmap
;
917 bitmap
= kmalloc(sizeof(struct ida_bitmap
), gfp_mask
);
921 free_bitmap(ida
, bitmap
);
926 EXPORT_SYMBOL(ida_pre_get
);
929 * ida_get_new_above - allocate new ID above or equal to a start id
931 * @starting_id: id to start search at
932 * @p_id: pointer to the allocated handle
934 * Allocate new ID above or equal to @starting_id. It should be called
935 * with any required locks.
937 * If memory is required, it will return %-EAGAIN, you should unlock
938 * and go back to the ida_pre_get() call. If the ida is full, it will
941 * @p_id returns a value in the range @starting_id ... %0x7fffffff.
943 int ida_get_new_above(struct ida
*ida
, int starting_id
, int *p_id
)
945 struct idr_layer
*pa
[MAX_IDR_LEVEL
+ 1];
946 struct ida_bitmap
*bitmap
;
948 int idr_id
= starting_id
/ IDA_BITMAP_BITS
;
949 int offset
= starting_id
% IDA_BITMAP_BITS
;
953 /* get vacant slot */
954 t
= idr_get_empty_slot(&ida
->idr
, idr_id
, pa
, 0, &ida
->idr
);
956 return t
== -ENOMEM
? -EAGAIN
: t
;
958 if (t
* IDA_BITMAP_BITS
>= MAX_IDR_BIT
)
965 /* if bitmap isn't there, create a new one */
966 bitmap
= (void *)pa
[0]->ary
[idr_id
& IDR_MASK
];
968 spin_lock_irqsave(&ida
->idr
.lock
, flags
);
969 bitmap
= ida
->free_bitmap
;
970 ida
->free_bitmap
= NULL
;
971 spin_unlock_irqrestore(&ida
->idr
.lock
, flags
);
976 memset(bitmap
, 0, sizeof(struct ida_bitmap
));
977 rcu_assign_pointer(pa
[0]->ary
[idr_id
& IDR_MASK
],
982 /* lookup for empty slot */
983 t
= find_next_zero_bit(bitmap
->bitmap
, IDA_BITMAP_BITS
, offset
);
984 if (t
== IDA_BITMAP_BITS
) {
985 /* no empty slot after offset, continue to the next chunk */
991 id
= idr_id
* IDA_BITMAP_BITS
+ t
;
992 if (id
>= MAX_IDR_BIT
)
995 __set_bit(t
, bitmap
->bitmap
);
996 if (++bitmap
->nr_busy
== IDA_BITMAP_BITS
)
997 idr_mark_full(pa
, idr_id
);
1001 /* Each leaf node can handle nearly a thousand slots and the
1002 * whole idea of ida is to have small memory foot print.
1003 * Throw away extra resources one by one after each successful
1006 if (ida
->idr
.id_free_cnt
|| ida
->free_bitmap
) {
1007 struct idr_layer
*p
= get_from_free_list(&ida
->idr
);
1009 kmem_cache_free(idr_layer_cache
, p
);
1014 EXPORT_SYMBOL(ida_get_new_above
);
1017 * ida_remove - remove the given ID
1021 void ida_remove(struct ida
*ida
, int id
)
1023 struct idr_layer
*p
= ida
->idr
.top
;
1024 int shift
= (ida
->idr
.layers
- 1) * IDR_BITS
;
1025 int idr_id
= id
/ IDA_BITMAP_BITS
;
1026 int offset
= id
% IDA_BITMAP_BITS
;
1028 struct ida_bitmap
*bitmap
;
1030 /* clear full bits while looking up the leaf idr_layer */
1031 while ((shift
> 0) && p
) {
1032 n
= (idr_id
>> shift
) & IDR_MASK
;
1033 __clear_bit(n
, p
->bitmap
);
1041 n
= idr_id
& IDR_MASK
;
1042 __clear_bit(n
, p
->bitmap
);
1044 bitmap
= (void *)p
->ary
[n
];
1045 if (!test_bit(offset
, bitmap
->bitmap
))
1048 /* update bitmap and remove it if empty */
1049 __clear_bit(offset
, bitmap
->bitmap
);
1050 if (--bitmap
->nr_busy
== 0) {
1051 __set_bit(n
, p
->bitmap
); /* to please idr_remove() */
1052 idr_remove(&ida
->idr
, idr_id
);
1053 free_bitmap(ida
, bitmap
);
1060 "ida_remove called for id=%d which is not allocated.\n", id
);
1062 EXPORT_SYMBOL(ida_remove
);
1065 * ida_destroy - release all cached layers within an ida tree
1068 void ida_destroy(struct ida
*ida
)
1070 idr_destroy(&ida
->idr
);
1071 kfree(ida
->free_bitmap
);
1073 EXPORT_SYMBOL(ida_destroy
);
1076 * ida_simple_get - get a new id.
1077 * @ida: the (initialized) ida.
1078 * @start: the minimum id (inclusive, < 0x8000000)
1079 * @end: the maximum id (exclusive, < 0x8000000 or 0)
1080 * @gfp_mask: memory allocation flags
1082 * Allocates an id in the range start <= id < end, or returns -ENOSPC.
1083 * On memory allocation failure, returns -ENOMEM.
1085 * Use ida_simple_remove() to get rid of an id.
1087 int ida_simple_get(struct ida
*ida
, unsigned int start
, unsigned int end
,
1092 unsigned long flags
;
1094 BUG_ON((int)start
< 0);
1095 BUG_ON((int)end
< 0);
1100 BUG_ON(end
< start
);
1105 if (!ida_pre_get(ida
, gfp_mask
))
1108 spin_lock_irqsave(&simple_ida_lock
, flags
);
1109 ret
= ida_get_new_above(ida
, start
, &id
);
1112 ida_remove(ida
, id
);
1118 spin_unlock_irqrestore(&simple_ida_lock
, flags
);
1120 if (unlikely(ret
== -EAGAIN
))
1125 EXPORT_SYMBOL(ida_simple_get
);
1128 * ida_simple_remove - remove an allocated id.
1129 * @ida: the (initialized) ida.
1130 * @id: the id returned by ida_simple_get.
1132 void ida_simple_remove(struct ida
*ida
, unsigned int id
)
1134 unsigned long flags
;
1136 BUG_ON((int)id
< 0);
1137 spin_lock_irqsave(&simple_ida_lock
, flags
);
1138 ida_remove(ida
, id
);
1139 spin_unlock_irqrestore(&simple_ida_lock
, flags
);
1141 EXPORT_SYMBOL(ida_simple_remove
);
1144 * ida_init - initialize ida handle
1147 * This function is use to set up the handle (@ida) that you will pass
1148 * to the rest of the functions.
1150 void ida_init(struct ida
*ida
)
1152 memset(ida
, 0, sizeof(struct ida
));
1153 idr_init(&ida
->idr
);
1156 EXPORT_SYMBOL(ida_init
);