2 * Copyright (c) 2005-2012 The DragonFly Project.
3 * Copyright (c) 2013 François Tigeot
4 * Copyright (c) 2013 Matthew Dillon
7 * This code is derived from software contributed to The DragonFly Project
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 * cc -I. -DUSERLAND_TEST libkern/linux_idr.c -o /tmp/idr -g
46 #define _KERNEL_STRUCTURES
55 #include <sys/errno.h>
58 #define MALLOC_DEFINE(a, b, c)
59 #define lwkt_gettoken(x)
60 #define lwkt_reltoken(x)
61 #define kmalloc(bytes, zone, flags) calloc(bytes, 1)
62 #define lwkt_token_init(a, b)
63 #define lwkt_token_uninit(a)
64 #define kfree(ptr, flags) free(ptr)
66 #define panic(str, ...) assert(0)
67 #define min(a, b) (((a) < (b)) ? (a) : (b))
68 #define max(a, b) (((a) > (b)) ? (a) : (b))
71 main(int ac
, char **av
)
75 intptr_t generation
= 0x10000000;
83 while (fgets(buf
, sizeof(buf
), stdin
) != NULL
) {
84 if (sscanf(buf
, "a %d", &id
) == 1) {
86 if (idr_pre_get(&idr
, 0) == 0) {
87 fprintf(stderr
, "pre_get failed\n");
90 error
= idr_get_new_above(&idr
,
96 fprintf(stderr
, "get_new err %d\n",
100 printf("allocated %d value %08x\n",
101 id
, (int)generation
);
105 } else if (sscanf(buf
, "f %d", &id
) == 1) {
106 idr_remove(&idr
, id
);
107 } else if (sscanf(buf
, "g %d", &id
) == 1) {
108 void *res
= idr_find(&idr
, id
);
109 printf("find %d res %p\n", id
, res
);
120 #include <sys/kernel.h>
121 #include <sys/libkern.h>
122 #include <sys/malloc.h>
123 #include <sys/param.h>
124 #include <sys/systm.h>
125 #include <sys/spinlock2.h>
126 #include <sys/limits.h>
130 /* Must be 2^n - 1 */
131 #define IDR_DEFAULT_SIZE 255
133 MALLOC_DEFINE(M_IDR
, "idr", "Integer ID management");
135 static void idr_grow(struct idr
*idp
, int want
);
136 static void idr_reserve(struct idr
*idp
, int id
, int incr
);
137 static int idr_find_free(struct idr
*idp
, int want
, int lim
);
140 * Number of nodes in right subtree, including the root.
143 right_subtree_size(int n
)
145 return (n
^ (n
| (n
+ 1)));
152 right_ancestor(int n
)
154 return (n
| (n
+ 1));
163 return ((n
& (n
+ 1)) - 1);
167 idrfixup(struct idr
*idp
, int id
)
169 if (id
< idp
->idr_freeindex
) {
170 idp
->idr_freeindex
= id
;
172 while (idp
->idr_lastindex
>= 0 &&
173 idp
->idr_nodes
[idp
->idr_lastindex
].data
== NULL
175 --idp
->idr_lastindex
;
179 static __inline
struct idr_node
*
180 idr_get_node(struct idr
*idp
, int id
)
182 struct idr_node
*idrnp
;
183 if (id
< 0 || id
>= idp
->idr_count
)
185 idrnp
= &idp
->idr_nodes
[id
];
186 if (idrnp
->allocated
== 0)
192 idr_reserve(struct idr
*idp
, int id
, int incr
)
195 idp
->idr_nodes
[id
].allocated
+= incr
;
196 KKASSERT(idp
->idr_nodes
[id
].allocated
>= 0);
197 id
= left_ancestor(id
);
202 idr_find_free(struct idr
*idp
, int want
, int lim
)
204 int id
, rsum
, rsize
, node
;
207 * Search for a free descriptor starting at the higher
208 * of want or fd_freefile. If that fails, consider
209 * expanding the ofile array.
211 * NOTE! the 'allocated' field is a cumulative recursive allocation
212 * count. If we happen to see a value of 0 then we can shortcut
213 * our search. Otherwise we run through through the tree going
214 * down branches we know have free descriptor(s) until we hit a
215 * leaf node. The leaf node will be free but will not necessarily
216 * have an allocated field of 0.
219 /* move up the tree looking for a subtree with a free node */
220 for (id
= max(want
, idp
->idr_freeindex
); id
< min(idp
->idr_count
, lim
);
221 id
= right_ancestor(id
)) {
222 if (idp
->idr_nodes
[id
].allocated
== 0)
225 rsize
= right_subtree_size(id
);
226 if (idp
->idr_nodes
[id
].allocated
== rsize
)
227 continue; /* right subtree full */
230 * Free fd is in the right subtree of the tree rooted at fd.
231 * Call that subtree R. Look for the smallest (leftmost)
232 * subtree of R with an unallocated fd: continue moving
233 * down the left branch until encountering a full left
234 * subtree, then move to the right.
236 for (rsum
= 0, rsize
/= 2; rsize
> 0; rsize
/= 2) {
238 rsum
+= idp
->idr_nodes
[node
].allocated
;
239 if (idp
->idr_nodes
[id
].allocated
== rsum
+ rsize
) {
240 id
= node
; /* move to the right */
241 if (idp
->idr_nodes
[node
].allocated
== 0)
252 * Blocking pre-get support, allows callers to use idr_pre_get() in
253 * combination with idr_get_new_above() such that idr_get_new_above()
254 * can be called safely with a spinlock held.
256 * Returns 0 on failure, 1 on success.
258 * Caller must hold a blockable lock.
261 idr_pre_get(struct idr
*idp
, __unused
unsigned gfp_mask
)
263 int want
= idp
->idr_maxwant
;
265 int result
= 1; /* success */
268 KKASSERT(mycpu
->gd_spinlocks
== 0);
269 lwkt_gettoken(&idp
->idr_token
);
272 * Grow if necessary (or if forced by the loop)
274 if (want
>= idp
->idr_count
)
278 * Check if a spot is available, break and return 0 if true,
279 * unless the available spot is beyond our limit. It is
280 * possible to exceed the limit due to the way array growth
283 * XXX we assume that the caller uses a consistent <sid> such
284 * that the idr_maxwant field is correct, otherwise we
285 * may believe that a slot is available but the caller then
286 * fails in idr_get_new_above() and loops.
288 id
= idr_find_free(idp
, idp
->idr_maxwant
, lim
);
291 result
= 0; /* failure */
296 * Return ENOSPC if our limit has been reached, otherwise
297 * loop and force growth.
299 if (idp
->idr_count
>= lim
) {
300 result
= 0; /* failure */
303 want
= idp
->idr_count
;
305 lwkt_reltoken(&idp
->idr_token
);
310 * Allocate an integer. If -EAGAIN is returned the caller should loop
311 * and call idr_pre_get() with no locks held, and then retry the call
312 * to idr_get_new_above().
314 * Can be safely called with spinlocks held.
317 idr_get_new_above(struct idr
*idp
, void *ptr
, int sid
, int *id
)
322 * NOTE! Because the idp is initialized with a non-zero count,
323 * sid might be < idp->idr_count but idr_maxwant might not
324 * yet be initialized. So check both cases.
326 lwkt_gettoken(&idp
->idr_token
);
327 if (sid
>= idp
->idr_count
|| idp
->idr_maxwant
< sid
) {
328 idp
->idr_maxwant
= max(idp
->idr_maxwant
, sid
);
329 lwkt_reltoken(&idp
->idr_token
);
333 resid
= idr_find_free(idp
, sid
, INT_MAX
);
335 lwkt_reltoken(&idp
->idr_token
);
339 if (resid
>= idp
->idr_count
)
340 panic("idr_get_new_above(): illegal resid %d", resid
);
341 if (resid
> idp
->idr_lastindex
)
342 idp
->idr_lastindex
= resid
;
343 if (sid
<= idp
->idr_freeindex
)
344 idp
->idr_freeindex
= resid
;
346 idr_reserve(idp
, resid
, 1);
347 idp
->idr_nodes
[resid
].data
= ptr
;
349 lwkt_reltoken(&idp
->idr_token
);
354 * start: minimum id, inclusive
355 * end: maximum id, exclusive or INT_MAX if end is negative
358 idr_alloc(struct idr
*idp
, void *ptr
, int start
, int end
, unsigned gfp_mask
)
360 int lim
= end
> 0 ? end
- 1 : INT_MAX
;
370 lwkt_gettoken(&idp
->idr_token
);
373 if (want
>= idp
->idr_count
)
377 * Check if a spot is available, break and return 0 if true,
378 * unless the available spot is beyond our limit. It is
379 * possible to exceed the limit due to the way array growth
382 id
= idr_find_free(idp
, start
, lim
);
384 want
= idp
->idr_count
;
393 if (id
>= idp
->idr_count
)
394 panic("idr_alloc(): illegal resid %d", id
);
395 if (id
> idp
->idr_lastindex
)
396 idp
->idr_lastindex
= id
;
397 if (start
<= idp
->idr_freeindex
)
398 idp
->idr_freeindex
= id
;
400 idr_reserve(idp
, id
, 1);
401 idp
->idr_nodes
[id
].data
= ptr
;
404 lwkt_reltoken(&idp
->idr_token
);
409 idr_get_new(struct idr
*idp
, void *ptr
, int *id
)
411 return idr_get_new_above(idp
, ptr
, 0, id
);
415 * Grow the file table so it can hold through descriptor (want).
417 * Caller must hold a blockable lock.
420 idr_grow(struct idr
*idp
, int want
)
422 struct idr_node
*oldnodes
, *newnodes
;
425 /* We want 2^n - 1 descriptors */
429 } while (nf
<= want
);
432 printf("idr_grow: %d -> %d\n", idp
->idr_count
, nf
);
435 /* Allocate a new zero'ed node array */
436 newnodes
= kmalloc(nf
* sizeof(struct idr_node
),
437 M_IDR
, M_ZERO
| M_WAITOK
);
439 /* We might race another grow */
440 if (nf
<= idp
->idr_count
) {
441 kfree(newnodes
, M_IDR
);
446 * Copy existing nodes to the beginning of the new array
448 oldnodes
= idp
->idr_nodes
;
450 bcopy(oldnodes
, newnodes
,
451 idp
->idr_count
* sizeof(struct idr_node
));
453 idp
->idr_nodes
= newnodes
;
457 kfree(oldnodes
, M_IDR
);
463 idr_remove(struct idr
*idp
, int id
)
467 lwkt_gettoken(&idp
->idr_token
);
468 if (id
< 0 || id
>= idp
->idr_count
) {
469 lwkt_reltoken(&idp
->idr_token
);
472 if ((ptr
= idp
->idr_nodes
[id
].data
) == NULL
) {
473 lwkt_reltoken(&idp
->idr_token
);
476 idp
->idr_nodes
[id
].data
= NULL
;
477 idr_reserve(idp
, id
, -1);
479 lwkt_reltoken(&idp
->idr_token
);
483 * Remove all int allocations, leave array intact.
485 * Caller must hold a blockable lock (or be in a context where holding
486 * the spinlock is not relevant).
489 idr_remove_all(struct idr
*idp
)
491 lwkt_gettoken(&idp
->idr_token
);
492 bzero(idp
->idr_nodes
, idp
->idr_count
* sizeof(struct idr_node
));
493 idp
->idr_lastindex
= -1;
494 idp
->idr_freeindex
= 0;
495 idp
->idr_nexpands
= 0;
496 idp
->idr_maxwant
= 0;
497 lwkt_reltoken(&idp
->idr_token
);
501 idr_destroy(struct idr
*idp
)
503 lwkt_token_uninit(&idp
->idr_token
);
504 if (idp
->idr_nodes
) {
505 kfree(idp
->idr_nodes
, M_IDR
);
506 idp
->idr_nodes
= NULL
;
508 bzero(idp
, sizeof(*idp
));
512 idr_find(struct idr
*idp
, int id
)
516 if (id
< 0 || id
>= idp
->idr_count
) {
518 } else if (idp
->idr_nodes
[id
].allocated
== 0) {
521 ret
= idp
->idr_nodes
[id
].data
;
527 idr_for_each(struct idr
*idp
, int (*fn
)(int id
, void *p
, void *data
),
531 struct idr_node
*nodes
;
533 nodes
= idp
->idr_nodes
;
534 for (i
= 0; i
< idp
->idr_count
; i
++) {
535 if (nodes
[i
].data
!= NULL
&& nodes
[i
].allocated
> 0) {
536 error
= fn(i
, nodes
[i
].data
, data
);
545 idr_replace(struct idr
*idp
, void *ptr
, int id
)
547 struct idr_node
*idrnp
;
550 lwkt_gettoken(&idp
->idr_token
);
551 idrnp
= idr_get_node(idp
, id
);
552 if (idrnp
== NULL
|| ptr
== NULL
) {
558 lwkt_reltoken(&idp
->idr_token
);
563 idr_init(struct idr
*idp
)
565 bzero(idp
, sizeof(struct idr
));
566 idp
->idr_nodes
= kmalloc(IDR_DEFAULT_SIZE
* sizeof(struct idr_node
),
567 M_IDR
, M_WAITOK
| M_ZERO
);
568 idp
->idr_count
= IDR_DEFAULT_SIZE
;
569 idp
->idr_lastindex
= -1;
570 idp
->idr_maxwant
= 0;
571 lwkt_token_init(&idp
->idr_token
, "idr token");