4 * 2002-10-18 written by Jim Houston jim.houston@ccur.com
5 * Copyright (C) 2002 by Concurrent Computer Corporation
6 * Distributed under the GNU GPL license version 2.
8 * Small id to pointer translation service.
10 * It uses a radix tree like structure as a sparse array indexed
11 * by the id to obtain the pointer. The bitmap makes allocating
14 * Modified by George Anzinger to reuse immediately and to use
15 * find bit instructions. Also removed _irq on spinlocks.
17 * So here is what this bit of code does:
19 * You call it to allocate an id (an int) an associate with that id a
20 * pointer or what ever, we treat it as a (void *). You can pass this
21 * id to a user for him to pass back at a later time. You then pass
22 * that id to this code and it returns your pointer.
24 * You can release ids at any time. When all ids are released, most of
25 * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
26 * don't need to go to the memory "store" during an id allocate, just
27 * so you don't need to be too concerned about locking and conflicts
28 * with the slab allocator.
30 * A word on reuse. We reuse empty id slots as soon as we can, always
31 * using the lowest one available. But we also merge a counter in the
32 * high bits of the id. The counter is RESERVED_ID_BITS (8 at this time)
33 * long. This means that if you allocate and release the same id in a
34 * loop we will reuse an id after about 256 times around the loop. The
35 * word about is used here as we will NOT return a valid id of -1 so if
36 * you loop on the largest possible id (and that is 24 bits, wow!) we
37 * will kick the counter to avoid -1. (Paranoid? You bet!)
39 * What you need to do is, since we don't keep the counter as part of
40 * id / ptr pair, to keep a copy of it in the pointed to structure
41 * (or else where) so that when you ask for a ptr you can varify that
42 * the returned ptr is correct by comparing the id it contains with the one
43 * you asked for. In other words, we only did half the reuse protection.
44 * Since the code depends on your code doing this check, we ignore high
45 * order bits in the id, not just the count, but bits that would, if used,
46 * index outside of the allocated ids. In other words, if the largest id
47 * currently allocated is 32 a look up will only look at the low 5 bits of
48 * the id. Since you will want to keep this id in the structure anyway
49 * (if for no other reason than to be able to eliminate the id when the
50 * structure is found in some other way) this seems reasonable. If you
51 * really think otherwise, the code to check these bits here, it is just
52 * disabled with a #if 0.
55 * So here are the complete details:
57 * include <linux/idr.h>
59 * void idr_init(struct idr *idp)
61 * This function is use to set up the handle (idp) that you will pass
62 * to the rest of the functions. The structure is defined in the
65 * int idr_pre_get(struct idr *idp)
67 * This function should be called prior to locking and calling the
68 * following function. It pre allocates enough memory to satisfy the
69 * worst possible allocation. It can sleep, so must not be called
70 * with any spinlocks held. If the system is REALLY out of memory
71 * this function returns 0, other wise 1.
73 * int idr_get_new(struct idr *idp, void *ptr);
75 * This is the allocate id function. It should be called with any
76 * required locks. In fact, in the SMP case, you MUST lock prior to
77 * calling this function to avoid possible out of memory problems. If
78 * memory is required, it will return a -1, in which case you should
79 * unlock and go back to the idr_pre_get() call. ptr is the pointer
80 * you want associated with the id. In other words:
82 * void *idr_find(struct idr *idp, int id);
84 * returns the "ptr", given the id. A NULL return indicates that the
85 * id is not valid (or you passed NULL in the idr_get_new(), shame on
86 * you). This function must be called with a spinlock that prevents
87 * calling either idr_get_new() or idr_remove() or idr_find() while it
90 * void idr_remove(struct idr *idp, int id);
92 * removes the given id, freeing that slot and any memory that may
93 * now be unused. See idr_find() for locking restrictions.
99 #ifndef TEST // to test in user space...
100 #include <linux/slab.h>
101 #include <linux/init.h>
102 #include <linux/module.h>
104 #include <linux/string.h>
105 #include <linux/idr.h>
108 static kmem_cache_t
*idr_layer_cache
;
112 static inline struct idr_layer
*alloc_layer(struct idr
*idp
)
116 spin_lock(&idp
->lock
);
117 if (!(p
= idp
->id_free
))
119 idp
->id_free
= p
->ary
[0];
122 spin_unlock(&idp
->lock
);
126 static inline void free_layer(struct idr
*idp
, struct idr_layer
*p
)
129 * Depends on the return element being zeroed.
131 spin_lock(&idp
->lock
);
132 p
->ary
[0] = idp
->id_free
;
135 spin_unlock(&idp
->lock
);
138 int idr_pre_get(struct idr
*idp
)
140 while (idp
->id_free_cnt
< idp
->layers
+ 1) {
141 struct idr_layer
*new;
142 new = kmem_cache_alloc(idr_layer_cache
, GFP_KERNEL
);
145 free_layer(idp
, new);
149 EXPORT_SYMBOL(idr_pre_get
);
151 static inline int sub_alloc(struct idr
*idp
, int shift
, void *ptr
)
155 struct idr_layer
**pa
[MAX_LEVEL
];
156 struct idr_layer
***paa
= &pa
[0];
162 * By keeping each pointer in an array we can do the
163 * "after" recursion processing. In this case, that means
164 * we can update the upper level bit map.
172 * We run around this while until we
173 * reach the leaf node...
177 * If no node, allocate one, AFTER
178 * we insure that we will not
179 * intrude on the reserved bit field.
181 if ((n
<< shift
) >= MAX_ID_BIT
)
183 p
->ary
[n
] = alloc_layer(idp
);
191 * We have reached the leaf node, plant the
192 * users pointer and return the raw id.
194 p
->ary
[n
] = (struct idr_layer
*)ptr
;
195 __set_bit(n
, &p
->bitmap
);
199 * This is the post recursion processing. Once
200 * we find a bitmap that is not full we are
203 while (*(paa
-1) && (**paa
)->bitmap
== IDR_FULL
){
204 n
= *paa
- &(**(paa
-1))->ary
[0];
205 __set_bit(n
, &(**--paa
)->bitmap
);
212 int idr_get_new(struct idr
*idp
, void *ptr
)
216 if (idp
->id_free_cnt
< idp
->layers
+ 1)
219 * Add a new layer if the array is full
221 if (unlikely(!idp
->top
|| idp
->top
->bitmap
== IDR_FULL
)){
223 * This is a bit different than the lower layers because
224 * we have one branch already allocated and full.
226 struct idr_layer
*new = alloc_layer(idp
);
227 new->ary
[0] = idp
->top
;
232 __set_bit(0, &new->bitmap
);
234 v
= sub_alloc(idp
, (idp
->layers
- 1) * IDR_BITS
, ptr
);
235 if ( likely(v
>= 0 )){
237 v
+= (idp
->count
<< MAX_ID_SHIFT
);
238 if ( unlikely( v
== -1 ))
239 v
+= (1L << MAX_ID_SHIFT
);
243 EXPORT_SYMBOL(idr_get_new
);
246 static inline void sub_remove(struct idr
*idp
, int shift
, int id
)
248 struct idr_layer
*p
= idp
->top
;
249 struct idr_layer
**pa
[MAX_LEVEL
];
250 struct idr_layer
***paa
= &pa
[0];
255 while ((shift
> 0) && p
) {
256 int n
= (id
>> shift
) & IDR_MASK
;
257 __clear_bit(n
, &p
->bitmap
);
262 if (likely(p
!= NULL
)){
263 int n
= id
& IDR_MASK
;
264 __clear_bit(n
, &p
->bitmap
);
266 while(*paa
&& ! --((**paa
)->count
)){
267 free_layer(idp
, **paa
);
274 void idr_remove(struct idr
*idp
, int id
)
278 sub_remove(idp
, (idp
->layers
- 1) * IDR_BITS
, id
);
279 if ( idp
->top
&& idp
->top
->count
== 1 &&
281 idp
->top
->ary
[0]){ // We can drop a layer
283 p
= idp
->top
->ary
[0];
284 idp
->top
->bitmap
= idp
->top
->count
= 0;
285 free_layer(idp
, idp
->top
);
289 while (idp
->id_free_cnt
>= IDR_FREE_MAX
) {
291 p
= alloc_layer(idp
);
292 kmem_cache_free(idr_layer_cache
, p
);
296 EXPORT_SYMBOL(idr_remove
);
298 void *idr_find(struct idr
*idp
, int id
)
303 n
= idp
->layers
* IDR_BITS
;
307 * This tests to see if bits outside the current tree are
308 * present. If so, tain't one of ours!
310 if ( unlikely( (id
& ~(~0 << MAX_ID_SHIFT
)) >> (n
+ IDR_BITS
)))
315 p
= p
->ary
[(id
>> n
) & IDR_MASK
];
319 EXPORT_SYMBOL(idr_find
);
321 static void idr_cache_ctor(void * idr_layer
,
322 kmem_cache_t
*idr_layer_cache
, unsigned long flags
)
324 memset(idr_layer
, 0, sizeof(struct idr_layer
));
327 static int init_id_cache(void)
329 if (!idr_layer_cache
)
330 idr_layer_cache
= kmem_cache_create("idr_layer_cache",
331 sizeof(struct idr_layer
), 0, 0, idr_cache_ctor
, 0);
335 void idr_init(struct idr
*idp
)
338 memset(idp
, 0, sizeof(struct idr
));
339 spin_lock_init(&idp
->lock
);
341 EXPORT_SYMBOL(idr_init
);