usched: Allow process to change self cpu affinity
[dragonfly.git] / sys / libkern / linux_idr.c
blob26881605319306a10e0d58120768154e35beb6b7
1 /*
2 * Copyright (c) 2005-2012 The DragonFly Project.
3 * Copyright (c) 2013 François Tigeot
4 * Copyright (c) 2013 Matthew Dillon
5 * All rights reserved.
7 * This code is derived from software contributed to The DragonFly Project
8 * by Jeffrey Hsu.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
19 * distribution.
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
38 #ifdef USERLAND_TEST
40 * Testing:
42 * cc -I. -DUSERLAND_TEST libkern/linux_idr.c -o /tmp/idr -g
45 #define _KERNEL
46 #define _KERNEL_STRUCTURES
47 #define KLD_MODULE
48 #include <stdio.h>
49 #include <stdlib.h>
50 #include <unistd.h>
51 #include <string.h>
52 #include <limits.h>
53 #include <assert.h>
54 #include <sys/idr.h>
55 #include <sys/errno.h>
57 #undef MALLOC_DEFINE
58 #define MALLOC_DEFINE(a, b, c)
59 #define lwkt_gettoken(x)
60 #define lwkt_reltoken(x)
61 #define kmalloc(bytes, zone, flags) calloc(bytes, 1)
62 #define lwkt_token_init(a, b)
63 #define lwkt_token_uninit(a)
64 #define kfree(ptr, flags) free(ptr)
65 #define KKASSERT(a)
66 #define panic(str, ...) assert(0)
67 #define min(a, b) (((a) < (b)) ? (a) : (b))
68 #define max(a, b) (((a) > (b)) ? (a) : (b))
70 int
71 main(int ac, char **av)
73 char buf[256];
74 struct idr idr;
75 intptr_t generation = 0x10000000;
76 int error;
77 int id;
79 idr_init(&idr);
81 printf("cmd> ");
82 fflush(stdout);
83 while (fgets(buf, sizeof(buf), stdin) != NULL) {
84 if (sscanf(buf, "a %d", &id) == 1) {
85 for (;;) {
86 if (idr_pre_get(&idr, 0) == 0) {
87 fprintf(stderr, "pre_get failed\n");
88 exit(1);
90 error = idr_get_new_above(&idr,
91 (void *)generation,
92 id, &id);
93 if (error == -EAGAIN)
94 continue;
95 if (error) {
96 fprintf(stderr, "get_new err %d\n",
97 error);
98 exit(1);
100 printf("allocated %d value %08x\n",
101 id, (int)generation);
102 ++generation;
103 break;
105 } else if (sscanf(buf, "f %d", &id) == 1) {
106 idr_remove(&idr, id);
107 } else if (sscanf(buf, "g %d", &id) == 1) {
108 void *res = idr_find(&idr, id);
109 printf("find %d res %p\n", id, res);
111 printf("cmd> ");
112 fflush(stdout);
114 return 0;
117 #else
119 #include <sys/idr.h>
120 #include <sys/kernel.h>
121 #include <sys/libkern.h>
122 #include <sys/malloc.h>
123 #include <sys/param.h>
124 #include <sys/systm.h>
125 #include <sys/spinlock2.h>
126 #include <sys/limits.h>
128 #endif
130 /* Must be 2^n - 1 */
131 #define IDR_DEFAULT_SIZE 255
133 MALLOC_DEFINE(M_IDR, "idr", "Integer ID management");
135 static void idr_grow(struct idr *idp, int want);
136 static void idr_reserve(struct idr *idp, int id, int incr);
137 static int idr_find_free(struct idr *idp, int want, int lim);
140 * Number of nodes in right subtree, including the root.
142 static __inline int
143 right_subtree_size(int n)
145 return (n ^ (n | (n + 1)));
149 * Bigger ancestor.
151 static __inline int
152 right_ancestor(int n)
154 return (n | (n + 1));
158 * Smaller ancestor.
160 static __inline int
161 left_ancestor(int n)
163 return ((n & (n + 1)) - 1);
166 static __inline void
167 idrfixup(struct idr *idp, int id)
169 if (id < idp->idr_freeindex) {
170 idp->idr_freeindex = id;
172 while (idp->idr_lastindex >= 0 &&
173 idp->idr_nodes[idp->idr_lastindex].data == NULL
175 --idp->idr_lastindex;
179 static __inline struct idr_node *
180 idr_get_node(struct idr *idp, int id)
182 struct idr_node *idrnp;
183 if (id < 0 || id >= idp->idr_count)
184 return (NULL);
185 idrnp = &idp->idr_nodes[id];
186 if (idrnp->allocated == 0)
187 return (NULL);
188 return (idrnp);
191 static void
192 idr_reserve(struct idr *idp, int id, int incr)
194 while (id >= 0) {
195 idp->idr_nodes[id].allocated += incr;
196 KKASSERT(idp->idr_nodes[id].allocated >= 0);
197 id = left_ancestor(id);
201 static int
202 idr_find_free(struct idr *idp, int want, int lim)
204 int id, rsum, rsize, node;
207 * Search for a free descriptor starting at the higher
208 * of want or fd_freefile. If that fails, consider
209 * expanding the ofile array.
211 * NOTE! the 'allocated' field is a cumulative recursive allocation
212 * count. If we happen to see a value of 0 then we can shortcut
213 * our search. Otherwise we run through through the tree going
214 * down branches we know have free descriptor(s) until we hit a
215 * leaf node. The leaf node will be free but will not necessarily
216 * have an allocated field of 0.
219 /* move up the tree looking for a subtree with a free node */
220 for (id = max(want, idp->idr_freeindex); id < min(idp->idr_count, lim);
221 id = right_ancestor(id)) {
222 if (idp->idr_nodes[id].allocated == 0)
223 return (id);
225 rsize = right_subtree_size(id);
226 if (idp->idr_nodes[id].allocated == rsize)
227 continue; /* right subtree full */
230 * Free fd is in the right subtree of the tree rooted at fd.
231 * Call that subtree R. Look for the smallest (leftmost)
232 * subtree of R with an unallocated fd: continue moving
233 * down the left branch until encountering a full left
234 * subtree, then move to the right.
236 for (rsum = 0, rsize /= 2; rsize > 0; rsize /= 2) {
237 node = id + rsize;
238 rsum += idp->idr_nodes[node].allocated;
239 if (idp->idr_nodes[id].allocated == rsum + rsize) {
240 id = node; /* move to the right */
241 if (idp->idr_nodes[node].allocated == 0)
242 return (id);
243 rsum = 0;
246 return (id);
248 return (-1);
252 * Blocking pre-get support, allows callers to use idr_pre_get() in
253 * combination with idr_get_new_above() such that idr_get_new_above()
254 * can be called safely with a spinlock held.
256 * Returns 0 on failure, 1 on success.
258 * Caller must hold a blockable lock.
261 idr_pre_get(struct idr *idp, __unused unsigned gfp_mask)
263 int want = idp->idr_maxwant;
264 int lim = INT_MAX;
265 int result = 1; /* success */
266 int id;
268 KKASSERT(mycpu->gd_spinlocks == 0);
269 lwkt_gettoken(&idp->idr_token);
270 for (;;) {
272 * Grow if necessary (or if forced by the loop)
274 if (want >= idp->idr_count)
275 idr_grow(idp, want);
278 * Check if a spot is available, break and return 0 if true,
279 * unless the available spot is beyond our limit. It is
280 * possible to exceed the limit due to the way array growth
281 * works.
283 * XXX we assume that the caller uses a consistent <sid> such
284 * that the idr_maxwant field is correct, otherwise we
285 * may believe that a slot is available but the caller then
286 * fails in idr_get_new_above() and loops.
288 id = idr_find_free(idp, idp->idr_maxwant, lim);
289 if (id != -1) {
290 if (id >= lim)
291 result = 0; /* failure */
292 break;
296 * Return ENOSPC if our limit has been reached, otherwise
297 * loop and force growth.
299 if (idp->idr_count >= lim) {
300 result = 0; /* failure */
301 break;
303 want = idp->idr_count;
305 lwkt_reltoken(&idp->idr_token);
306 return result;
310 * Allocate an integer. If -EAGAIN is returned the caller should loop
311 * and call idr_pre_get() with no locks held, and then retry the call
312 * to idr_get_new_above().
314 * Can be safely called with spinlocks held.
317 idr_get_new_above(struct idr *idp, void *ptr, int sid, int *id)
319 int resid;
322 * NOTE! Because the idp is initialized with a non-zero count,
323 * sid might be < idp->idr_count but idr_maxwant might not
324 * yet be initialized. So check both cases.
326 lwkt_gettoken(&idp->idr_token);
327 if (sid >= idp->idr_count || idp->idr_maxwant < sid) {
328 idp->idr_maxwant = max(idp->idr_maxwant, sid);
329 lwkt_reltoken(&idp->idr_token);
330 return -EAGAIN;
333 resid = idr_find_free(idp, sid, INT_MAX);
334 if (resid == -1) {
335 lwkt_reltoken(&idp->idr_token);
336 return -EAGAIN;
339 if (resid >= idp->idr_count)
340 panic("idr_get_new_above(): illegal resid %d", resid);
341 if (resid > idp->idr_lastindex)
342 idp->idr_lastindex = resid;
343 if (sid <= idp->idr_freeindex)
344 idp->idr_freeindex = resid;
345 *id = resid;
346 idr_reserve(idp, resid, 1);
347 idp->idr_nodes[resid].data = ptr;
349 lwkt_reltoken(&idp->idr_token);
350 return (0);
354 * start: minimum id, inclusive
355 * end: maximum id, exclusive or INT_MAX if end is negative
358 idr_alloc(struct idr *idp, void *ptr, int start, int end, unsigned gfp_mask)
360 int lim = end > 0 ? end - 1 : INT_MAX;
361 int want = start;
362 int result, id;
364 if (start < 0)
365 return -EINVAL;
367 if (lim < start)
368 return -ENOSPC;
370 lwkt_gettoken(&idp->idr_token);
372 grow_again:
373 if (want >= idp->idr_count)
374 idr_grow(idp, want);
377 * Check if a spot is available, break and return 0 if true,
378 * unless the available spot is beyond our limit. It is
379 * possible to exceed the limit due to the way array growth
380 * works.
382 id = idr_find_free(idp, start, lim);
383 if (id == -1) {
384 want = idp->idr_count;
385 goto grow_again;
388 if (id >= lim) {
389 result = -ENOSPC;
390 goto done;
393 if (id >= idp->idr_count)
394 panic("idr_alloc(): illegal resid %d", id);
395 if (id > idp->idr_lastindex)
396 idp->idr_lastindex = id;
397 if (start <= idp->idr_freeindex)
398 idp->idr_freeindex = id;
399 result = id;
400 idr_reserve(idp, id, 1);
401 idp->idr_nodes[id].data = ptr;
403 done:
404 lwkt_reltoken(&idp->idr_token);
405 return result;
409 idr_get_new(struct idr *idp, void *ptr, int *id)
411 return idr_get_new_above(idp, ptr, 0, id);
415 * Grow the file table so it can hold through descriptor (want).
417 * Caller must hold a blockable lock.
419 static void
420 idr_grow(struct idr *idp, int want)
422 struct idr_node *oldnodes, *newnodes;
423 int nf;
425 /* We want 2^n - 1 descriptors */
426 nf = idp->idr_count;
427 do {
428 nf = 2 * nf + 1;
429 } while (nf <= want);
431 #ifdef USERLAND_TEST
432 printf("idr_grow: %d -> %d\n", idp->idr_count, nf);
433 #endif
435 /* Allocate a new zero'ed node array */
436 newnodes = kmalloc(nf * sizeof(struct idr_node),
437 M_IDR, M_ZERO | M_WAITOK);
439 /* We might race another grow */
440 if (nf <= idp->idr_count) {
441 kfree(newnodes, M_IDR);
442 return;
446 * Copy existing nodes to the beginning of the new array
448 oldnodes = idp->idr_nodes;
449 if (oldnodes) {
450 bcopy(oldnodes, newnodes,
451 idp->idr_count * sizeof(struct idr_node));
453 idp->idr_nodes = newnodes;
454 idp->idr_count = nf;
456 if (oldnodes) {
457 kfree(oldnodes, M_IDR);
459 idp->idr_nexpands++;
462 void
463 idr_remove(struct idr *idp, int id)
465 void *ptr;
467 lwkt_gettoken(&idp->idr_token);
468 if (id < 0 || id >= idp->idr_count) {
469 lwkt_reltoken(&idp->idr_token);
470 return;
472 if ((ptr = idp->idr_nodes[id].data) == NULL) {
473 lwkt_reltoken(&idp->idr_token);
474 return;
476 idp->idr_nodes[id].data = NULL;
477 idr_reserve(idp, id, -1);
478 idrfixup(idp, id);
479 lwkt_reltoken(&idp->idr_token);
483 * Remove all int allocations, leave array intact.
485 * Caller must hold a blockable lock (or be in a context where holding
486 * the spinlock is not relevant).
488 void
489 idr_remove_all(struct idr *idp)
491 lwkt_gettoken(&idp->idr_token);
492 bzero(idp->idr_nodes, idp->idr_count * sizeof(struct idr_node));
493 idp->idr_lastindex = -1;
494 idp->idr_freeindex = 0;
495 idp->idr_nexpands = 0;
496 idp->idr_maxwant = 0;
497 lwkt_reltoken(&idp->idr_token);
500 void
501 idr_destroy(struct idr *idp)
503 lwkt_token_uninit(&idp->idr_token);
504 if (idp->idr_nodes) {
505 kfree(idp->idr_nodes, M_IDR);
506 idp->idr_nodes = NULL;
508 bzero(idp, sizeof(*idp));
511 void *
512 idr_find(struct idr *idp, int id)
514 void *ret;
516 if (id < 0 || id >= idp->idr_count) {
517 ret = NULL;
518 } else if (idp->idr_nodes[id].allocated == 0) {
519 ret = NULL;
520 } else {
521 ret = idp->idr_nodes[id].data;
523 return ret;
527 idr_for_each(struct idr *idp, int (*fn)(int id, void *p, void *data),
528 void *data)
530 int i, error = 0;
531 struct idr_node *nodes;
533 nodes = idp->idr_nodes;
534 for (i = 0; i < idp->idr_count; i++) {
535 if (nodes[i].data != NULL && nodes[i].allocated > 0) {
536 error = fn(i, nodes[i].data, data);
537 if (error != 0)
538 break;
541 return error;
544 void *
545 idr_replace(struct idr *idp, void *ptr, int id)
547 struct idr_node *idrnp;
548 void *ret;
550 lwkt_gettoken(&idp->idr_token);
551 idrnp = idr_get_node(idp, id);
552 if (idrnp == NULL || ptr == NULL) {
553 ret = NULL;
554 } else {
555 ret = idrnp->data;
556 idrnp->data = ptr;
558 lwkt_reltoken(&idp->idr_token);
559 return (ret);
562 void
563 idr_init(struct idr *idp)
565 bzero(idp, sizeof(struct idr));
566 idp->idr_nodes = kmalloc(IDR_DEFAULT_SIZE * sizeof(struct idr_node),
567 M_IDR, M_WAITOK | M_ZERO);
568 idp->idr_count = IDR_DEFAULT_SIZE;
569 idp->idr_lastindex = -1;
570 idp->idr_maxwant = 0;
571 lwkt_token_init(&idp->idr_token, "idr token");