boot/efi: Sync our TianoCore EDK II headers with the edk2-stable202002 tag.
[dragonfly.git] / sys / kern / subr_blist.c
blobb3536915a7df8a7561929838480bdc448b63d4fb
1 /*
2 * BLIST.C - Bitmap allocator/deallocator, using a radix tree with hinting
3 *
4 * Copyright (c) 1998,2004 The DragonFly Project. All rights reserved.
5 *
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
37 * This module implements a general bitmap allocator/deallocator. The
38 * allocator eats around 2 bits per 'block'. The module does not
39 * try to interpret the meaning of a 'block' other then to return
40 * SWAPBLK_NONE on an allocation failure.
42 * A radix tree is used to maintain the bitmap. Two radix constants are
43 * involved: One for the bitmaps contained in the leaf nodes (typically
44 * 32), and one for the meta nodes (typically 16). Both meta and leaf
45 * nodes have a hint field. This field gives us a hint as to the largest
46 * free contiguous range of blocks under the node. It may contain a
47 * value that is too high, but will never contain a value that is too
48 * low. When the radix tree is searched, allocation failures in subtrees
49 * update the hint.
51 * The radix tree also implements two collapsed states for meta nodes:
52 * the ALL-ALLOCATED state and the ALL-FREE state. If a meta node is
53 * in either of these two states, all information contained underneath
54 * the node is considered stale. These states are used to optimize
55 * allocation and freeing operations.
57 * The hinting greatly increases code efficiency for allocations while
58 * the general radix structure optimizes both allocations and frees. The
59 * radix tree should be able to operate well no matter how much
60 * fragmentation there is and no matter how large a bitmap is used.
62 * Unlike the rlist code, the blist code wires all necessary memory at
63 * creation time. Neither allocations nor frees require interaction with
64 * the memory subsystem. In contrast, the rlist code may allocate memory
65 * on an rlist_free() call. The non-blocking features of the blist code
66 * are used to great advantage in the swap code (vm/nswap_pager.c). The
67 * rlist code uses a little less overall memory then the blist code (but
68 * due to swap interleaving not all that much less), but the blist code
69 * scales much, much better.
71 * LAYOUT: The radix tree is layed out recursively using a
72 * linear array. Each meta node is immediately followed (layed out
73 * sequentially in memory) by BLIST_META_RADIX lower level nodes. This
74 * is a recursive structure but one that can be easily scanned through
75 * a very simple 'skip' calculation. In order to support large radixes,
76 * portions of the tree may reside outside our memory allocation. We
77 * handle this with an early-termination optimization (when bighint is
78 * set to -1) on the scan. The memory allocation is only large enough
79 * to cover the number of blocks requested at creation time even if it
80 * must be encompassed in larger root-node radix.
82 * NOTE: The allocator cannot currently allocate more then
83 * BLIST_BMAP_RADIX blocks per call. It will panic with 'allocation too
84 * large' if you try. This is an area that could use improvement. The
85 * radix is large enough that this restriction does not effect the swap
86 * system, though. Currently only the allocation code is effected by
87 * this algorithmic unfeature. The freeing code can handle arbitrary
88 * ranges.
90 * NOTE: The radix may exceed BLIST_BMAP_RADIX bits in order to support
91 * up to 2^(BLIST_BMAP_RADIX-1) blocks. The first divison will
92 * drop the radix down and fit it within a signed BLIST_BMAP_RADIX
93 * bit integer.
95 * This code can be compiled stand-alone for debugging.
98 #ifdef _KERNEL
100 #include <sys/param.h>
101 #include <sys/systm.h>
102 #include <sys/lock.h>
103 #include <sys/kernel.h>
104 #include <sys/blist.h>
105 #include <sys/malloc.h>
107 #else
109 #ifndef BLIST_NO_DEBUG
110 #define BLIST_DEBUG
111 #endif
113 #define SWAPBLK_NONE ((swblk_t)-1)
115 #include <sys/types.h>
116 #include <stdio.h>
117 #include <string.h>
118 #include <stdlib.h>
119 #include <stdarg.h>
120 #include <limits.h>
122 #define kmalloc(a,b,c) malloc(a)
123 #define kfree(a,b) free(a)
124 #define kprintf printf
125 #define KKASSERT(exp)
127 #include <sys/blist.h>
129 void panic(const char *ctl, ...);
131 #endif
134 * static support functions
137 static swblk_t blst_leaf_alloc(blmeta_t *scan, swblk_t blkat,
138 swblk_t blk, swblk_t count);
139 static swblk_t blst_meta_alloc(blmeta_t *scan, swblk_t blkat,
140 swblk_t blk, swblk_t count,
141 int64_t radix, swblk_t skip);
142 static void blst_leaf_free(blmeta_t *scan, swblk_t relblk, swblk_t count);
143 static void blst_meta_free(blmeta_t *scan, swblk_t freeBlk, swblk_t count,
144 int64_t radix, swblk_t skip,
145 swblk_t blk);
146 static swblk_t blst_leaf_fill(blmeta_t *scan, swblk_t blk, swblk_t count);
147 static swblk_t blst_meta_fill(blmeta_t *scan, swblk_t fillBlk, swblk_t count,
148 int64_t radix, swblk_t skip,
149 swblk_t blk);
150 static void blst_copy(blmeta_t *scan, swblk_t blk, int64_t radix,
151 swblk_t skip, blist_t dest, swblk_t count);
152 static swblk_t blst_radix_init(blmeta_t *scan, int64_t radix,
153 swblk_t skip, swblk_t count);
154 #ifndef _KERNEL
155 static void blst_radix_print(blmeta_t *scan, swblk_t blk,
156 int64_t radix, swblk_t skip, int tab);
157 #endif
159 #ifdef _KERNEL
160 static MALLOC_DEFINE(M_SWAP, "SWAP", "Swap space");
161 #endif
164 * blist_create() - create a blist capable of handling up to the specified
165 * number of blocks
167 * blocks must be greater then 0
169 * The smallest blist consists of a single leaf node capable of
170 * managing BLIST_BMAP_RADIX blocks.
173 blist_t
174 blist_create(swblk_t blocks)
176 blist_t bl;
177 int64_t radix;
178 swblk_t skip = 0;
181 * Calculate radix and skip field used for scanning.
183 * Radix can exceed BLIST_BMAP_RADIX bits even if swblk_t is limited
184 * to BLIST_BMAP_RADIX bits.
186 radix = BLIST_BMAP_RADIX;
188 while (radix < blocks) {
189 radix *= BLIST_META_RADIX;
190 skip = (skip + 1) * BLIST_META_RADIX;
191 KKASSERT(skip > 0);
194 bl = kmalloc(sizeof(struct blist), M_SWAP, M_WAITOK | M_ZERO);
196 bl->bl_blocks = blocks;
197 bl->bl_radix = radix;
198 bl->bl_skip = skip;
199 bl->bl_rootblks = 1 +
200 blst_radix_init(NULL, bl->bl_radix, bl->bl_skip, blocks);
201 bl->bl_root = kmalloc(sizeof(blmeta_t) * bl->bl_rootblks,
202 M_SWAP, M_WAITOK);
204 #if defined(BLIST_DEBUG)
205 kprintf(
206 "BLIST representing %lu blocks (%lu MB of swap)"
207 ", requiring %6.2fM of ram\n",
208 bl->bl_blocks,
209 bl->bl_blocks * 4 / 1024,
210 (bl->bl_rootblks * sizeof(blmeta_t) + 1023) / (1024.0 * 1024.0)
212 kprintf("BLIST raw radix tree: %lu records, top-radix %lu\n",
213 bl->bl_rootblks, bl->bl_radix);
214 #endif
215 blst_radix_init(bl->bl_root, bl->bl_radix, bl->bl_skip, blocks);
217 return(bl);
220 void
221 blist_destroy(blist_t bl)
223 kfree(bl->bl_root, M_SWAP);
224 kfree(bl, M_SWAP);
228 * blist_alloc() - reserve space in the block bitmap. Return the base
229 * of a contiguous region or SWAPBLK_NONE if space could
230 * not be allocated.
233 swblk_t
234 blist_alloc(blist_t bl, swblk_t count)
236 swblk_t blk = SWAPBLK_NONE;
238 if (bl) {
239 if (bl->bl_radix == BLIST_BMAP_RADIX)
240 blk = blst_leaf_alloc(bl->bl_root, 0, 0, count);
241 else
242 blk = blst_meta_alloc(bl->bl_root, 0, 0, count,
243 bl->bl_radix, bl->bl_skip);
244 if (blk != SWAPBLK_NONE)
245 bl->bl_free -= count;
247 return(blk);
250 swblk_t
251 blist_allocat(blist_t bl, swblk_t count, swblk_t blkat)
253 swblk_t blk = SWAPBLK_NONE;
255 if (bl) {
256 if (bl->bl_radix == BLIST_BMAP_RADIX)
257 blk = blst_leaf_alloc(bl->bl_root, blkat, 0, count);
258 else
259 blk = blst_meta_alloc(bl->bl_root, blkat, 0, count,
260 bl->bl_radix, bl->bl_skip);
261 if (blk != SWAPBLK_NONE)
262 bl->bl_free -= count;
264 return(blk);
268 * blist_free() - free up space in the block bitmap. Return the base
269 * of a contiguous region. Panic if an inconsistancy is
270 * found.
273 void
274 blist_free(blist_t bl, swblk_t blkno, swblk_t count)
276 if (bl) {
277 if (bl->bl_radix == BLIST_BMAP_RADIX)
278 blst_leaf_free(bl->bl_root, blkno, count);
279 else
280 blst_meta_free(bl->bl_root, blkno, count, bl->bl_radix, bl->bl_skip, 0);
281 bl->bl_free += count;
286 * blist_fill() - mark a region in the block bitmap as off-limits
287 * to the allocator (i.e. allocate it), ignoring any
288 * existing allocations. Return the number of blocks
289 * actually filled that were free before the call.
292 swblk_t
293 blist_fill(blist_t bl, swblk_t blkno, swblk_t count)
295 swblk_t filled;
297 if (bl) {
298 if (bl->bl_radix == BLIST_BMAP_RADIX) {
299 filled = blst_leaf_fill(bl->bl_root, blkno, count);
300 } else {
301 filled = blst_meta_fill(bl->bl_root, blkno, count,
302 bl->bl_radix, bl->bl_skip, 0);
304 bl->bl_free -= filled;
305 return (filled);
306 } else {
307 return 0;
312 * blist_resize() - resize an existing radix tree to handle the
313 * specified number of blocks. This will reallocate
314 * the tree and transfer the previous bitmap to the new
315 * one. When extending the tree you can specify whether
316 * the new blocks are to left allocated or freed.
319 void
320 blist_resize(blist_t *pbl, swblk_t count, int freenew)
322 blist_t newbl = blist_create(count);
323 blist_t save = *pbl;
325 *pbl = newbl;
326 if (count > save->bl_blocks)
327 count = save->bl_blocks;
328 blst_copy(save->bl_root, 0, save->bl_radix, save->bl_skip, newbl, count);
331 * If resizing upwards, should we free the new space or not?
333 if (freenew && count < newbl->bl_blocks) {
334 blist_free(newbl, count, newbl->bl_blocks - count);
336 blist_destroy(save);
339 #ifdef BLIST_DEBUG
342 * blist_print() - dump radix tree
345 void
346 blist_print(blist_t bl)
348 kprintf("BLIST {\n");
349 blst_radix_print(bl->bl_root, 0, bl->bl_radix, bl->bl_skip, 4);
350 kprintf("}\n");
353 #endif
355 /************************************************************************
356 * ALLOCATION SUPPORT FUNCTIONS *
357 ************************************************************************
359 * These support functions do all the actual work. They may seem
360 * rather longish, but that's because I've commented them up. The
361 * actual code is straight forward.
366 * blist_leaf_alloc() - allocate at a leaf in the radix tree (a bitmap).
368 * This is the core of the allocator and is optimized for the 1 block
369 * and the BLIST_BMAP_RADIX block allocation cases. Other cases are
370 * somewhat slower. The 1 block allocation case is log2 and extremely
371 * quick.
374 static swblk_t
375 blst_leaf_alloc(blmeta_t *scan, swblk_t blkat __unused, swblk_t blk,
376 swblk_t count)
378 u_swblk_t orig = scan->u.bmu_bitmap;
380 if (orig == 0) {
382 * Optimize bitmap all-allocated case. Also, count = 1
383 * case assumes at least 1 bit is free in the bitmap, so
384 * we have to take care of this case here.
386 scan->bm_bighint = 0;
387 return(SWAPBLK_NONE);
389 if (count == 1) {
391 * Optimized code to allocate one bit out of the bitmap
393 u_swblk_t mask;
394 int j = BLIST_BMAP_RADIX/2;
395 int r = 0;
397 mask = (u_swblk_t)-1 >> (BLIST_BMAP_RADIX/2);
399 while (j) {
400 if ((orig & mask) == 0) {
401 r += j;
402 orig >>= j;
404 j >>= 1;
405 mask >>= j;
407 scan->u.bmu_bitmap &= ~((swblk_t)1 << r);
408 return(blk + r);
410 if (count <= BLIST_BMAP_RADIX) {
412 * non-optimized code to allocate N bits out of the bitmap.
413 * The more bits, the faster the code runs. It will run
414 * the slowest allocating 2 bits, but since there aren't any
415 * memory ops in the core loop (or shouldn't be, anyway),
416 * you probably won't notice the difference.
418 int j;
419 int n = (int)(BLIST_BMAP_RADIX - count);
420 u_swblk_t mask;
422 mask = (u_swblk_t)-1 >> n;
424 for (j = 0; j <= n; ++j) {
425 if ((orig & mask) == mask) {
426 scan->u.bmu_bitmap &= ~mask;
427 return(blk + j);
429 mask = (mask << 1);
434 * We couldn't allocate count in this subtree, update bighint.
436 scan->bm_bighint = count - 1;
438 return(SWAPBLK_NONE);
442 * blist_meta_alloc() - allocate at a meta in the radix tree.
444 * Attempt to allocate at a meta node. If we can't, we update
445 * bighint and return a failure. Updating bighint optimize future
446 * calls that hit this node. We have to check for our collapse cases
447 * and we have a few optimizations strewn in as well.
449 static swblk_t
450 blst_meta_alloc(blmeta_t *scan, swblk_t blkat,
451 swblk_t blk, swblk_t count,
452 int64_t radix, swblk_t skip)
454 int hintok = (blk >= blkat);
455 swblk_t next_skip = ((swblk_t)skip / BLIST_META_RADIX);
456 swblk_t i;
458 #ifndef _KERNEL
459 kprintf("blist_meta_alloc blkat %ld blk %ld count %ld radix %ld\n",
460 blkat, blk, count, radix);
461 #endif
464 * ALL-ALLOCATED special case
466 if (scan->u.bmu_avail == 0) {
467 scan->bm_bighint = 0;
468 return(SWAPBLK_NONE);
472 * ALL-FREE special case, initialize uninitialized
473 * sublevel.
475 * NOTE: radix may exceed 32 bits until first division.
477 if (scan->u.bmu_avail == radix) {
478 scan->bm_bighint = radix;
480 radix /= BLIST_META_RADIX;
481 for (i = 1; i <= skip; i += next_skip) {
482 if (scan[i].bm_bighint == (swblk_t)-1)
483 break;
484 if (next_skip == 1) {
485 scan[i].u.bmu_bitmap = (u_swblk_t)-1;
486 scan[i].bm_bighint = BLIST_BMAP_RADIX;
487 } else {
488 scan[i].bm_bighint = (swblk_t)radix;
489 scan[i].u.bmu_avail = (swblk_t)radix;
492 } else {
493 radix /= BLIST_META_RADIX;
496 for (i = 1; i <= skip; i += next_skip) {
497 if (count <= scan[i].bm_bighint &&
498 blk + (swblk_t)radix > blkat) {
500 * count fits in object
502 swblk_t r;
503 if (next_skip == 1) {
504 r = blst_leaf_alloc(&scan[i], blkat,
505 blk, count);
506 } else {
507 r = blst_meta_alloc(&scan[i], blkat,
508 blk, count,
509 radix, next_skip - 1);
511 if (r != SWAPBLK_NONE) {
512 scan->u.bmu_avail -= count;
513 if (scan->bm_bighint > scan->u.bmu_avail)
514 scan->bm_bighint = scan->u.bmu_avail;
515 return(r);
517 /* bighint was updated by recursion */
518 } else if (scan[i].bm_bighint == (swblk_t)-1) {
520 * Terminator
522 break;
523 } else if (count > (swblk_t)radix) {
525 * count does not fit in object even if it were
526 * complete free.
528 panic("blist_meta_alloc: allocation too large %lu/%lu",
529 count, radix);
531 blk += (swblk_t)radix;
535 * We couldn't allocate count in this subtree, update bighint.
537 if (hintok && scan->bm_bighint >= count)
538 scan->bm_bighint = count - 1;
539 return(SWAPBLK_NONE);
543 * BLST_LEAF_FREE() - free allocated block from leaf bitmap
545 static void
546 blst_leaf_free(blmeta_t *scan, swblk_t blk, swblk_t count)
549 * free some data in this bitmap
551 * e.g.
552 * 0000111111111110000
553 * \_________/\__/
554 * v n
556 int n = blk & (BLIST_BMAP_RADIX - 1);
557 u_swblk_t mask;
559 mask = ((u_swblk_t)-1 << n) &
560 ((u_swblk_t)-1 >> (BLIST_BMAP_RADIX - count - n));
562 if (scan->u.bmu_bitmap & mask)
563 panic("blst_radix_free: freeing free block");
564 scan->u.bmu_bitmap |= mask;
567 * We could probably do a better job here. We are required to make
568 * bighint at least as large as the biggest contiguous block of
569 * data. If we just shoehorn it, a little extra overhead will
570 * be incured on the next allocation (but only that one typically).
572 scan->bm_bighint = BLIST_BMAP_RADIX;
576 * BLST_META_FREE() - free allocated blocks from radix tree meta info
578 * This support routine frees a range of blocks from the bitmap.
579 * The range must be entirely enclosed by this radix node. If a
580 * meta node, we break the range down recursively to free blocks
581 * in subnodes (which means that this code can free an arbitrary
582 * range whereas the allocation code cannot allocate an arbitrary
583 * range).
586 static void
587 blst_meta_free(blmeta_t *scan, swblk_t freeBlk, swblk_t count,
588 int64_t radix, swblk_t skip, swblk_t blk)
590 swblk_t i;
591 swblk_t next_skip = ((swblk_t)skip / BLIST_META_RADIX);
593 #if 0
594 kprintf("FREE (%lx,%lu) FROM (%lx,%lu)\n",
595 freeBlk, count,
596 blk, radix
598 #endif
601 * ALL-ALLOCATED special case, initialize for recursion.
603 * We will short-cut the ALL-ALLOCATED -> ALL-FREE case.
605 if (scan->u.bmu_avail == 0) {
606 scan->u.bmu_avail = count;
607 scan->bm_bighint = count;
609 if (count != radix) {
610 for (i = 1; i <= skip; i += next_skip) {
611 if (scan[i].bm_bighint == (swblk_t)-1)
612 break;
613 scan[i].bm_bighint = 0;
614 if (next_skip == 1) {
615 scan[i].u.bmu_bitmap = 0;
616 } else {
617 scan[i].u.bmu_avail = 0;
620 /* fall through */
622 } else {
623 scan->u.bmu_avail += count;
624 /* scan->bm_bighint = radix; */
628 * ALL-FREE special case.
630 * Set bighint for higher levels to snoop.
632 if (scan->u.bmu_avail == radix) {
633 scan->bm_bighint = radix;
634 return;
638 * Break the free down into its components
640 if (scan->u.bmu_avail > radix) {
641 panic("blst_meta_free: freeing already "
642 "free blocks (%lu) %lu/%lu",
643 count, (long)scan->u.bmu_avail, radix);
646 radix /= BLIST_META_RADIX;
648 i = (freeBlk - blk) / (swblk_t)radix;
649 blk += i * (swblk_t)radix;
650 i = i * next_skip + 1;
652 while (i <= skip && blk < freeBlk + count) {
653 swblk_t v;
655 v = blk + (swblk_t)radix - freeBlk;
656 if (v > count)
657 v = count;
659 if (scan->bm_bighint == (swblk_t)-1)
660 panic("blst_meta_free: freeing unexpected range");
662 if (next_skip == 1) {
663 blst_leaf_free(&scan[i], freeBlk, v);
664 } else {
665 blst_meta_free(&scan[i], freeBlk, v,
666 radix, next_skip - 1, blk);
670 * After having dealt with the becomes-all-free case any
671 * partial free will not be able to bring us to the
672 * becomes-all-free state.
674 * We can raise bighint to at least the sub-segment's
675 * bighint.
677 if (scan->bm_bighint < scan[i].bm_bighint) {
678 scan->bm_bighint = scan[i].bm_bighint;
680 count -= v;
681 freeBlk += v;
682 blk += (swblk_t)radix;
683 i += next_skip;
688 * BLST_LEAF_FILL() - allocate specific blocks in leaf bitmap
690 * Allocates all blocks in the specified range regardless of
691 * any existing allocations in that range. Returns the number
692 * of blocks allocated by the call.
694 static swblk_t
695 blst_leaf_fill(blmeta_t *scan, swblk_t blk, swblk_t count)
697 int n = blk & (BLIST_BMAP_RADIX - 1);
698 swblk_t nblks;
699 u_swblk_t mask, bitmap;
701 mask = ((u_swblk_t)-1 << n) &
702 ((u_swblk_t)-1 >> (BLIST_BMAP_RADIX - count - n));
704 /* Count the number of blocks we're about to allocate */
705 bitmap = scan->u.bmu_bitmap & mask;
706 for (nblks = 0; bitmap != 0; nblks++)
707 bitmap &= bitmap - 1;
709 scan->u.bmu_bitmap &= ~mask;
710 return (nblks);
714 * BLST_META_FILL() - allocate specific blocks at a meta node
716 * Allocates the specified range of blocks, regardless of
717 * any existing allocations in the range. The range must
718 * be within the extent of this node. Returns the number
719 * of blocks allocated by the call.
721 static swblk_t
722 blst_meta_fill(blmeta_t *scan, swblk_t fillBlk, swblk_t count,
723 int64_t radix, swblk_t skip, swblk_t blk)
725 swblk_t i;
726 swblk_t next_skip = ((swblk_t)skip / BLIST_META_RADIX);
727 swblk_t nblks = 0;
729 if (count == radix || scan->u.bmu_avail == 0) {
731 * ALL-ALLOCATED special case
733 nblks = scan->u.bmu_avail;
734 scan->u.bmu_avail = 0;
735 scan->bm_bighint = count;
736 return (nblks);
739 if (scan->u.bmu_avail == radix) {
740 radix /= BLIST_META_RADIX;
743 * ALL-FREE special case, initialize sublevel
745 for (i = 1; i <= skip; i += next_skip) {
746 if (scan[i].bm_bighint == (swblk_t)-1)
747 break;
748 if (next_skip == 1) {
749 scan[i].u.bmu_bitmap = (u_swblk_t)-1;
750 scan[i].bm_bighint = BLIST_BMAP_RADIX;
751 } else {
752 scan[i].bm_bighint = (swblk_t)radix;
753 scan[i].u.bmu_avail = (swblk_t)radix;
756 } else {
757 radix /= BLIST_META_RADIX;
760 if (count > (swblk_t)radix)
761 panic("blst_meta_fill: allocation too large");
763 i = (fillBlk - blk) / (swblk_t)radix;
764 blk += i * (swblk_t)radix;
765 i = i * next_skip + 1;
767 while (i <= skip && blk < fillBlk + count) {
768 swblk_t v;
770 v = blk + (swblk_t)radix - fillBlk;
771 if (v > count)
772 v = count;
774 if (scan->bm_bighint == (swblk_t)-1)
775 panic("blst_meta_fill: filling unexpected range");
777 if (next_skip == 1) {
778 nblks += blst_leaf_fill(&scan[i], fillBlk, v);
779 } else {
780 nblks += blst_meta_fill(&scan[i], fillBlk, v,
781 radix, next_skip - 1, blk);
783 count -= v;
784 fillBlk += v;
785 blk += (swblk_t)radix;
786 i += next_skip;
788 scan->u.bmu_avail -= nblks;
789 return (nblks);
793 * BLIST_RADIX_COPY() - copy one radix tree to another
795 * Locates free space in the source tree and frees it in the destination
796 * tree. The space may not already be free in the destination.
799 static void
800 blst_copy(blmeta_t *scan, swblk_t blk, int64_t radix,
801 swblk_t skip, blist_t dest, swblk_t count)
803 swblk_t next_skip;
804 swblk_t i;
807 * Leaf node
810 if (radix == BLIST_BMAP_RADIX) {
811 u_swblk_t v = scan->u.bmu_bitmap;
813 if (v == (u_swblk_t)-1) {
814 blist_free(dest, blk, count);
815 } else if (v != 0) {
816 swblk_t i;
818 for (i = 0; i < BLIST_BMAP_RADIX && i < count; ++i) {
819 if (v & ((swblk_t)1 << i))
820 blist_free(dest, blk + i, 1);
823 return;
827 * Meta node
830 if (scan->u.bmu_avail == 0) {
832 * Source all allocated, leave dest allocated
834 return;
836 if (scan->u.bmu_avail == radix) {
838 * Source all free, free entire dest
840 if (count < radix)
841 blist_free(dest, blk, count);
842 else
843 blist_free(dest, blk, (swblk_t)radix);
844 return;
848 radix /= BLIST_META_RADIX;
849 next_skip = ((u_swblk_t)skip / BLIST_META_RADIX);
851 for (i = 1; count && i <= skip; i += next_skip) {
852 if (scan[i].bm_bighint == (swblk_t)-1)
853 break;
855 if (count >= (swblk_t)radix) {
856 blst_copy(
857 &scan[i],
858 blk,
859 radix,
860 next_skip - 1,
861 dest,
862 (swblk_t)radix
864 count -= (swblk_t)radix;
865 } else {
866 if (count) {
867 blst_copy(
868 &scan[i],
869 blk,
870 radix,
871 next_skip - 1,
872 dest,
873 count
876 count = 0;
878 blk += (swblk_t)radix;
883 * BLST_RADIX_INIT() - initialize radix tree
885 * Initialize our meta structures and bitmaps and calculate the exact
886 * amount of space required to manage 'count' blocks - this space may
887 * be considerably less then the calculated radix due to the large
888 * RADIX values we use.
891 static swblk_t
892 blst_radix_init(blmeta_t *scan, int64_t radix, swblk_t skip, swblk_t count)
894 swblk_t i;
895 swblk_t next_skip;
896 swblk_t memindex = 0;
899 * Leaf node
902 if (radix == BLIST_BMAP_RADIX) {
903 if (scan) {
904 scan->bm_bighint = 0;
905 scan->u.bmu_bitmap = 0;
907 return(memindex);
911 * Meta node. If allocating the entire object we can special
912 * case it. However, we need to figure out how much memory
913 * is required to manage 'count' blocks, so we continue on anyway.
916 if (scan) {
917 scan->bm_bighint = 0;
918 scan->u.bmu_avail = 0;
921 radix /= BLIST_META_RADIX;
922 next_skip = ((u_swblk_t)skip / BLIST_META_RADIX);
924 for (i = 1; i <= skip; i += next_skip) {
925 if (count >= (swblk_t)radix) {
927 * Allocate the entire object
929 memindex = i + blst_radix_init(
930 ((scan) ? &scan[i] : NULL),
931 radix,
932 next_skip - 1,
933 (swblk_t)radix
935 count -= (swblk_t)radix;
936 } else if (count > 0) {
938 * Allocate a partial object
940 memindex = i + blst_radix_init(
941 ((scan) ? &scan[i] : NULL),
942 radix,
943 next_skip - 1,
944 count
946 count = 0;
947 } else {
949 * Add terminator and break out
951 if (scan)
952 scan[i].bm_bighint = (swblk_t)-1;
953 break;
956 if (memindex < i)
957 memindex = i;
958 return(memindex);
961 #ifdef BLIST_DEBUG
963 static void
964 blst_radix_print(blmeta_t *scan, swblk_t blk, int64_t radix, swblk_t skip, int tab)
966 swblk_t i;
967 swblk_t next_skip;
969 if (radix == BLIST_BMAP_RADIX) {
970 kprintf(
971 "%*.*s(%04lx,%lu): bitmap %016lx big=%lu\n",
972 tab, tab, "",
973 blk, radix,
974 scan->u.bmu_bitmap,
975 scan->bm_bighint
977 return;
980 if (scan->u.bmu_avail == 0) {
981 kprintf(
982 "%*.*s(%04lx,%ld) ALL ALLOCATED\n",
983 tab, tab, "",
984 blk,
985 radix
987 return;
989 if (scan->u.bmu_avail == radix) {
990 kprintf(
991 "%*.*s(%04lx,%ld) ALL FREE\n",
992 tab, tab, "",
993 blk,
994 radix
996 return;
999 kprintf(
1000 "%*.*s(%04lx,%lu): subtree (%lu/%lu) big=%lu {\n",
1001 tab, tab, "",
1002 blk, (long long)radix,
1003 scan->u.bmu_avail,
1004 (long long)radix,
1005 scan->bm_bighint
1008 radix /= BLIST_META_RADIX;
1009 next_skip = ((u_swblk_t)skip / BLIST_META_RADIX);
1010 tab += 4;
1012 for (i = 1; i <= skip; i += next_skip) {
1013 if (scan[i].bm_bighint == (swblk_t)-1) {
1014 kprintf(
1015 "%*.*s(%04lx,%ld): Terminator\n",
1016 tab, tab, "",
1017 blk, radix
1019 break;
1021 blst_radix_print(
1022 &scan[i],
1023 blk,
1024 radix,
1025 next_skip - 1,
1028 blk += (swblk_t)radix;
1030 tab -= 4;
1032 kprintf(
1033 "%*.*s}\n",
1034 tab, tab, ""
1038 #endif
1040 #ifdef BLIST_DEBUG
1043 main(int ac, char **av)
1045 swblk_t size = 1024;
1046 swblk_t i;
1047 blist_t bl;
1049 for (i = 1; i < ac; ++i) {
1050 const char *ptr = av[i];
1051 if (*ptr != '-') {
1052 size = strtol(ptr, NULL, 0);
1053 continue;
1055 ptr += 2;
1056 fprintf(stderr, "Bad option: %s\n", ptr - 2);
1057 exit(1);
1059 bl = blist_create(size);
1060 blist_free(bl, 0, size);
1062 for (;;) {
1063 char buf[1024];
1064 swblk_t da = 0;
1065 swblk_t count = 0;
1066 swblk_t blkat;
1069 kprintf("%lu/%lu/%llu> ",
1070 bl->bl_free, size, (long long)bl->bl_radix);
1071 fflush(stdout);
1072 if (fgets(buf, sizeof(buf), stdin) == NULL)
1073 break;
1074 switch(buf[0]) {
1075 case 'r':
1076 if (sscanf(buf + 1, "%li", &count) == 1) {
1077 blist_resize(&bl, count, 1);
1078 size = count;
1079 } else {
1080 kprintf("?\n");
1082 case 'p':
1083 blist_print(bl);
1084 break;
1085 case 'a':
1086 if (sscanf(buf + 1, "%li %li", &count, &blkat) == 1) {
1087 kprintf("count %ld\n", count);
1088 swblk_t blk = blist_alloc(bl, count);
1089 kprintf(" R=%04lx\n", blk);
1090 } else if (sscanf(buf + 1, "%li %li", &count, &blkat) == 2) {
1091 swblk_t blk = blist_allocat(bl, count, blkat);
1092 kprintf(" R=%04lx\n", blk);
1093 } else {
1094 kprintf("?\n");
1096 break;
1097 case 'f':
1098 if (sscanf(buf + 1, "%li %li", &da, &count) == 2) {
1099 blist_free(bl, da, count);
1100 } else {
1101 kprintf("?\n");
1103 break;
1104 case 'l':
1105 if (sscanf(buf + 1, "%li %li", &da, &count) == 2) {
1106 printf(" n=%lu\n",
1107 blist_fill(bl, da, count));
1108 } else {
1109 kprintf("?\n");
1111 break;
1112 case '?':
1113 case 'h':
1114 puts(
1115 "p -print\n"
1116 "a %li -allocate\n"
1117 "f %li %li -free\n"
1118 "l %li %li -fill\n"
1119 "r %li -resize\n"
1120 "h/? -help\n"
1121 " hex may be specified with 0x prefix\n"
1123 break;
1124 default:
1125 kprintf("?\n");
1126 break;
1129 return(0);
1132 void
1133 panic(const char *ctl, ...)
1135 __va_list va;
1137 __va_start(va, ctl);
1138 vfprintf(stderr, ctl, va);
1139 fprintf(stderr, "\n");
1140 __va_end(va);
1141 exit(1);
1144 #endif