dhclient: teach the script about resolvconf(8)
[dragonfly.git] / sys / libkern / linux_idr.c
blob86b7a6b3dd8cf4295403438f2300f6416e333334
1 /*
2 * Copyright (c) 2005-2012 The DragonFly Project.
3 * Copyright (c) 2013 François Tigeot
4 * Copyright (c) 2013 Matthew Dillon
5 * All rights reserved.
7 * This code is derived from software contributed to The DragonFly Project
8 * by Jeffrey Hsu.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
19 * distribution.
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
38 #ifdef USERLAND_TEST
40 * Testing:
42 * cc -I. -DUSERLAND_TEST libkern/linux_idr.c -o /tmp/idr -g
45 #define _KERNEL
46 #define _KERNEL_STRUCTURES
47 #define KLD_MODULE
48 #include <stdio.h>
49 #include <stdlib.h>
50 #include <unistd.h>
51 #include <string.h>
52 #include <limits.h>
53 #include <assert.h>
54 #include <sys/idr.h>
55 #include <sys/errno.h>
57 #undef MALLOC_DEFINE
58 #define MALLOC_DEFINE(a, b, c)
59 #define lwkt_gettoken(x)
60 #define lwkt_reltoken(x)
61 #undef kmalloc
62 #define kmalloc(bytes, zone, flags) calloc(bytes, 1)
63 #define lwkt_token_init(a, b)
64 #define lwkt_token_uninit(a)
65 #define kfree(ptr, flags) free(ptr)
66 #define KKASSERT(a)
67 #define panic(str, ...) assert(0)
68 #define min(a, b) (((a) < (b)) ? (a) : (b))
69 #define max(a, b) (((a) > (b)) ? (a) : (b))
71 int
72 main(int ac, char **av)
74 char buf[256];
75 struct idr idr;
76 intptr_t generation = 0x0;
77 int error;
78 int id;
80 idr_init(&idr);
82 printf("cmd> ");
83 fflush(stdout);
84 while (fgets(buf, sizeof(buf), stdin) != NULL) {
85 if (sscanf(buf, "a %d", &id) == 1) {
86 for (;;) {
87 if (idr_pre_get(&idr, 0) == 0) {
88 fprintf(stderr, "pre_get failed\n");
89 exit(1);
91 error = idr_get_new_above(&idr,
92 (void *)generation,
93 id, &id);
94 if (error == -EAGAIN)
95 continue;
96 if (error) {
97 fprintf(stderr, "get_new err %d\n",
98 error);
99 exit(1);
101 printf("allocated %d value %08x\n",
102 id, (int)generation);
103 ++generation;
104 break;
106 } else if (sscanf(buf, "f %d", &id) == 1) {
107 idr_remove(&idr, id);
108 } else if (sscanf(buf, "g %d", &id) == 1) {
109 void *res = idr_find(&idr, id);
110 printf("find %d res %p\n", id, res);
112 printf("cmd> ");
113 fflush(stdout);
115 return 0;
118 #else
120 #include <sys/idr.h>
121 #include <sys/kernel.h>
122 #include <sys/libkern.h>
123 #include <sys/malloc.h>
124 #include <sys/param.h>
125 #include <sys/systm.h>
126 #include <sys/spinlock2.h>
127 #include <sys/limits.h>
129 #endif
131 /* Must be 2^n - 1 */
132 #define IDR_DEFAULT_SIZE 255
134 MALLOC_DEFINE(M_IDR, "idr", "Integer ID management");
136 static void idr_grow(struct idr *idp, int want);
137 static void idr_reserve(struct idr *idp, int id, int incr);
138 static int idr_find_free(struct idr *idp, int want, int lim);
141 * Number of nodes in right subtree, including the root.
143 static __inline int
144 right_subtree_size(int n)
146 return (n ^ (n | (n + 1)));
150 * Bigger ancestor.
152 static __inline int
153 right_ancestor(int n)
155 return (n | (n + 1));
159 * Smaller ancestor.
161 static __inline int
162 left_ancestor(int n)
164 return ((n & (n + 1)) - 1);
167 static __inline void
168 idrfixup(struct idr *idp, int id)
170 if (id < idp->idr_freeindex) {
171 idp->idr_freeindex = id;
173 while (idp->idr_lastindex >= 0 &&
174 idp->idr_nodes[idp->idr_lastindex].data == NULL
176 --idp->idr_lastindex;
180 static __inline struct idr_node *
181 idr_get_node(struct idr *idp, int id)
183 struct idr_node *idrnp;
184 if (id < 0 || id >= idp->idr_count)
185 return (NULL);
186 idrnp = &idp->idr_nodes[id];
187 if (idrnp->allocated == 0)
188 return (NULL);
189 return (idrnp);
192 static void
193 idr_reserve(struct idr *idp, int id, int incr)
195 while (id >= 0) {
196 idp->idr_nodes[id].allocated += incr;
197 KKASSERT(idp->idr_nodes[id].allocated >= 0);
198 id = left_ancestor(id);
202 static int
203 idr_find_free(struct idr *idp, int want, int lim)
205 int id, rsum, rsize, node;
208 * Search for a free descriptor starting at the higher
209 * of want or fd_freefile. If that fails, consider
210 * expanding the ofile array.
212 * NOTE! the 'allocated' field is a cumulative recursive allocation
213 * count. If we happen to see a value of 0 then we can shortcut
214 * our search. Otherwise we run through through the tree going
215 * down branches we know have free descriptor(s) until we hit a
216 * leaf node. The leaf node will be free but will not necessarily
217 * have an allocated field of 0.
220 /* move up the tree looking for a subtree with a free node */
221 for (id = max(want, idp->idr_freeindex); id < min(idp->idr_count, lim);
222 id = right_ancestor(id)) {
223 if (idp->idr_nodes[id].allocated == 0)
224 return (id);
226 rsize = right_subtree_size(id);
227 if (idp->idr_nodes[id].allocated == rsize)
228 continue; /* right subtree full */
231 * Free fd is in the right subtree of the tree rooted at fd.
232 * Call that subtree R. Look for the smallest (leftmost)
233 * subtree of R with an unallocated fd: continue moving
234 * down the left branch until encountering a full left
235 * subtree, then move to the right.
237 for (rsum = 0, rsize /= 2; rsize > 0; rsize /= 2) {
238 node = id + rsize;
239 rsum += idp->idr_nodes[node].allocated;
240 if (idp->idr_nodes[id].allocated == rsum + rsize) {
241 id = node; /* move to the right */
242 if (idp->idr_nodes[node].allocated == 0)
243 return (id);
244 rsum = 0;
247 return (id);
249 return (-1);
253 * Blocking pre-get support, allows callers to use idr_pre_get() in
254 * combination with idr_get_new_above() such that idr_get_new_above()
255 * can be called safely with a spinlock held.
257 * Returns 0 on failure, 1 on success.
259 * Caller must hold a blockable lock.
262 idr_pre_get(struct idr *idp, __unused unsigned gfp_mask)
264 int want = idp->idr_maxwant;
265 int lim = INT_MAX;
266 int result = 1; /* success */
267 int id;
269 KKASSERT(mycpu->gd_spinlocks == 0);
270 lwkt_gettoken(&idp->idr_token);
271 for (;;) {
273 * Grow if necessary (or if forced by the loop)
275 if (want >= idp->idr_count)
276 idr_grow(idp, want);
279 * Check if a spot is available, break and return 0 if true,
280 * unless the available spot is beyond our limit. It is
281 * possible to exceed the limit due to the way array growth
282 * works.
284 * XXX we assume that the caller uses a consistent <sid> such
285 * that the idr_maxwant field is correct, otherwise we
286 * may believe that a slot is available but the caller then
287 * fails in idr_get_new_above() and loops.
289 id = idr_find_free(idp, idp->idr_maxwant, lim);
290 if (id != -1) {
291 if (id >= lim)
292 result = 0; /* failure */
293 break;
297 * Return ENOSPC if our limit has been reached, otherwise
298 * loop and force growth.
300 if (idp->idr_count >= lim) {
301 result = 0; /* failure */
302 break;
304 want = idp->idr_count;
306 lwkt_reltoken(&idp->idr_token);
307 return result;
311 * Allocate an integer. If -EAGAIN is returned the caller should loop
312 * and call idr_pre_get() with no locks held, and then retry the call
313 * to idr_get_new_above().
315 * Can be safely called with spinlocks held.
318 idr_get_new_above(struct idr *idp, void *ptr, int sid, int *id)
320 int resid;
323 * NOTE! Because the idp is initialized with a non-zero count,
324 * sid might be < idp->idr_count but idr_maxwant might not
325 * yet be initialized. So check both cases.
327 lwkt_gettoken(&idp->idr_token);
328 if (sid >= idp->idr_count || idp->idr_maxwant < sid) {
329 idp->idr_maxwant = max(idp->idr_maxwant, sid);
330 lwkt_reltoken(&idp->idr_token);
331 return -EAGAIN;
334 resid = idr_find_free(idp, sid, INT_MAX);
335 if (resid == -1) {
336 lwkt_reltoken(&idp->idr_token);
337 return -EAGAIN;
340 if (resid >= idp->idr_count)
341 panic("idr_get_new_above(): illegal resid %d", resid);
342 if (resid > idp->idr_lastindex)
343 idp->idr_lastindex = resid;
344 if (sid <= idp->idr_freeindex)
345 idp->idr_freeindex = resid;
346 *id = resid;
347 idr_reserve(idp, resid, 1);
348 idp->idr_nodes[resid].data = ptr;
350 lwkt_reltoken(&idp->idr_token);
351 return (0);
355 * start: minimum id, inclusive
356 * end: maximum id, exclusive or INT_MAX if end is negative
359 idr_alloc(struct idr *idp, void *ptr, int start, int end, unsigned gfp_mask)
361 int lim = end > 0 ? end - 1 : INT_MAX;
362 int want = start;
363 int result, id;
365 if (start < 0)
366 return -EINVAL;
368 if (lim < start)
369 return -ENOSPC;
371 lwkt_gettoken(&idp->idr_token);
373 grow_again:
374 if (want >= idp->idr_count)
375 idr_grow(idp, want);
378 * Check if a spot is available, break and return 0 if true,
379 * unless the available spot is beyond our limit. It is
380 * possible to exceed the limit due to the way array growth
381 * works.
383 id = idr_find_free(idp, start, lim);
384 if (id == -1) {
385 want = idp->idr_count;
386 goto grow_again;
389 if (id >= lim) {
390 result = -ENOSPC;
391 goto done;
394 if (id >= idp->idr_count)
395 panic("idr_alloc(): illegal resid %d", id);
396 if (id > idp->idr_lastindex)
397 idp->idr_lastindex = id;
398 if (start <= idp->idr_freeindex)
399 idp->idr_freeindex = id;
400 result = id;
401 idr_reserve(idp, id, 1);
402 idp->idr_nodes[id].data = ptr;
404 done:
405 lwkt_reltoken(&idp->idr_token);
406 return result;
410 idr_get_new(struct idr *idp, void *ptr, int *id)
412 return idr_get_new_above(idp, ptr, 0, id);
416 * Grow the file table so it can hold through descriptor (want).
418 * Caller must hold a blockable lock.
420 static void
421 idr_grow(struct idr *idp, int want)
423 struct idr_node *oldnodes, *newnodes;
424 int nf;
426 /* We want 2^n - 1 descriptors */
427 nf = idp->idr_count;
428 do {
429 nf = 2 * nf + 1;
430 } while (nf <= want);
432 #ifdef USERLAND_TEST
433 printf("idr_grow: %d -> %d\n", idp->idr_count, nf);
434 #endif
436 /* Allocate a new zero'ed node array */
437 newnodes = kmalloc(nf * sizeof(struct idr_node),
438 M_IDR, M_ZERO | M_WAITOK);
440 /* We might race another grow */
441 if (nf <= idp->idr_count) {
442 kfree(newnodes, M_IDR);
443 return;
447 * Copy existing nodes to the beginning of the new array
449 oldnodes = idp->idr_nodes;
450 if (oldnodes) {
451 bcopy(oldnodes, newnodes,
452 idp->idr_count * sizeof(struct idr_node));
454 idp->idr_nodes = newnodes;
455 idp->idr_count = nf;
457 if (oldnodes) {
458 kfree(oldnodes, M_IDR);
460 idp->idr_nexpands++;
463 void *
464 idr_remove(struct idr *idp, int id)
466 void *ptr;
468 lwkt_gettoken(&idp->idr_token);
469 if (id < 0 || id >= idp->idr_count) {
470 lwkt_reltoken(&idp->idr_token);
471 return NULL;
473 if (idp->idr_nodes[id].allocated == 0) {
474 lwkt_reltoken(&idp->idr_token);
475 return NULL;
477 ptr = idp->idr_nodes[id].data;
478 idp->idr_nodes[id].data = NULL;
479 idr_reserve(idp, id, -1);
480 idrfixup(idp, id);
481 lwkt_reltoken(&idp->idr_token);
483 return ptr;
487 * Remove all int allocations, leave array intact.
489 * Caller must hold a blockable lock (or be in a context where holding
490 * the spinlock is not relevant).
492 void
493 idr_remove_all(struct idr *idp)
495 lwkt_gettoken(&idp->idr_token);
496 bzero(idp->idr_nodes, idp->idr_count * sizeof(struct idr_node));
497 idp->idr_lastindex = -1;
498 idp->idr_freeindex = 0;
499 idp->idr_nexpands = 0;
500 idp->idr_maxwant = 0;
501 lwkt_reltoken(&idp->idr_token);
504 void
505 idr_destroy(struct idr *idp)
507 lwkt_token_uninit(&idp->idr_token);
508 if (idp->idr_nodes) {
509 kfree(idp->idr_nodes, M_IDR);
510 idp->idr_nodes = NULL;
512 bzero(idp, sizeof(*idp));
515 void *
516 idr_find(struct idr *idp, int id)
518 void *ret;
520 if (id < 0 || id >= idp->idr_count) {
521 ret = NULL;
522 } else if (idp->idr_nodes[id].allocated == 0) {
523 ret = NULL;
524 } else {
525 ret = idp->idr_nodes[id].data;
527 return ret;
531 idr_for_each(struct idr *idp, int (*fn)(int id, void *p, void *data),
532 void *data)
534 int i, error = 0;
535 struct idr_node *nodes;
537 nodes = idp->idr_nodes;
538 for (i = 0; i < idp->idr_count; i++) {
539 if (nodes[i].data != NULL && nodes[i].allocated > 0) {
540 error = fn(i, nodes[i].data, data);
541 if (error != 0)
542 break;
545 return error;
548 void *
549 idr_replace(struct idr *idp, void *ptr, int id)
551 struct idr_node *idrnp;
552 void *ret;
554 lwkt_gettoken(&idp->idr_token);
555 idrnp = idr_get_node(idp, id);
556 if (idrnp == NULL) {
557 ret = NULL;
558 } else {
559 ret = idrnp->data;
560 idrnp->data = ptr;
562 lwkt_reltoken(&idp->idr_token);
563 return (ret);
566 void
567 idr_init(struct idr *idp)
569 bzero(idp, sizeof(struct idr));
570 idp->idr_nodes = kmalloc(IDR_DEFAULT_SIZE * sizeof(struct idr_node),
571 M_IDR, M_WAITOK | M_ZERO);
572 idp->idr_count = IDR_DEFAULT_SIZE;
573 idp->idr_lastindex = -1;
574 idp->idr_maxwant = 0;
575 lwkt_token_init(&idp->idr_token, "idr token");