2 * BLIST.C - Bitmap allocator/deallocator, using a radix tree with hinting
4 * Copyright (c) 1998,2004 The DragonFly Project. All rights reserved.
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * This module implements a general bitmap allocator/deallocator. The
38 * allocator eats around 2 bits per 'block'. The module does not
39 * try to interpret the meaning of a 'block' other then to return
40 * SWAPBLK_NONE on an allocation failure.
42 * A radix tree is used to maintain the bitmap. Two radix constants are
43 * involved: One for the bitmaps contained in the leaf nodes (typically
44 * 32), and one for the meta nodes (typically 16). Both meta and leaf
45 * nodes have a hint field. This field gives us a hint as to the largest
46 * free contiguous range of blocks under the node. It may contain a
47 * value that is too high, but will never contain a value that is too
48 * low. When the radix tree is searched, allocation failures in subtrees
51 * The radix tree also implements two collapsed states for meta nodes:
52 * the ALL-ALLOCATED state and the ALL-FREE state. If a meta node is
53 * in either of these two states, all information contained underneath
54 * the node is considered stale. These states are used to optimize
55 * allocation and freeing operations.
57 * The hinting greatly increases code efficiency for allocations while
58 * the general radix structure optimizes both allocations and frees. The
59 * radix tree should be able to operate well no matter how much
60 * fragmentation there is and no matter how large a bitmap is used.
62 * Unlike the rlist code, the blist code wires all necessary memory at
63 * creation time. Neither allocations nor frees require interaction with
64 * the memory subsystem. In contrast, the rlist code may allocate memory
65 * on an rlist_free() call. The non-blocking features of the blist code
66 * are used to great advantage in the swap code (vm/nswap_pager.c). The
67 * rlist code uses a little less overall memory then the blist code (but
68 * due to swap interleaving not all that much less), but the blist code
69 * scales much, much better.
71 * LAYOUT: The radix tree is layed out recursively using a
72 * linear array. Each meta node is immediately followed (layed out
73 * sequentially in memory) by BLIST_META_RADIX lower level nodes. This
74 * is a recursive structure but one that can be easily scanned through
75 * a very simple 'skip' calculation. In order to support large radixes,
76 * portions of the tree may reside outside our memory allocation. We
77 * handle this with an early-termination optimization (when bighint is
78 * set to -1) on the scan. The memory allocation is only large enough
79 * to cover the number of blocks requested at creation time even if it
80 * must be encompassed in larger root-node radix.
82 * NOTE: The allocator cannot currently allocate more then
83 * BLIST_BMAP_RADIX blocks per call. It will panic with 'allocation too
84 * large' if you try. This is an area that could use improvement. The
85 * radix is large enough that this restriction does not effect the swap
86 * system, though. Currently only the allocation code is effected by
87 * this algorithmic unfeature. The freeing code can handle arbitrary
90 * NOTE: The radix may exceed 32 bits in order to support up to 2^31
91 * blocks. The first divison will drop the radix down and fit
92 * it within a signed 32 bit integer.
94 * This code can be compiled stand-alone for debugging.
99 #include <sys/param.h>
100 #include <sys/systm.h>
101 #include <sys/lock.h>
102 #include <sys/kernel.h>
103 #include <sys/blist.h>
104 #include <sys/malloc.h>
108 #ifndef BLIST_NO_DEBUG
112 #define SWAPBLK_NONE ((swblk_t)-1)
114 #include <sys/types.h>
120 #define kmalloc(a,b,c) malloc(a)
121 #define kfree(a,b) free(a)
122 #define kprintf printf
123 #define KKASSERT(exp)
125 #include <sys/blist.h>
127 void panic(const char *ctl
, ...);
132 * static support functions
135 static swblk_t
blst_leaf_alloc(blmeta_t
*scan
, swblk_t blkat
,
136 swblk_t blk
, int count
);
137 static swblk_t
blst_meta_alloc(blmeta_t
*scan
, swblk_t blkat
,
138 swblk_t blk
, swblk_t count
,
139 int64_t radix
, int skip
);
140 static void blst_leaf_free(blmeta_t
*scan
, swblk_t relblk
, int count
);
141 static void blst_meta_free(blmeta_t
*scan
, swblk_t freeBlk
, swblk_t count
,
142 int64_t radix
, int skip
, swblk_t blk
);
143 static swblk_t
blst_leaf_fill(blmeta_t
*scan
, swblk_t blk
, int count
);
144 static swblk_t
blst_meta_fill(blmeta_t
*scan
, swblk_t fillBlk
, swblk_t count
,
145 int64_t radix
, int skip
, swblk_t blk
);
146 static void blst_copy(blmeta_t
*scan
, swblk_t blk
, int64_t radix
,
147 swblk_t skip
, blist_t dest
, swblk_t count
);
148 static swblk_t
blst_radix_init(blmeta_t
*scan
, int64_t radix
,
149 int skip
, swblk_t count
);
151 static void blst_radix_print(blmeta_t
*scan
, swblk_t blk
,
152 int64_t radix
, int skip
, int tab
);
156 static MALLOC_DEFINE(M_SWAP
, "SWAP", "Swap space");
160 * blist_create() - create a blist capable of handling up to the specified
163 * blocks must be greater then 0
165 * The smallest blist consists of a single leaf node capable of
166 * managing BLIST_BMAP_RADIX blocks.
170 blist_create(swblk_t blocks
)
177 * Calculate radix and skip field used for scanning.
179 * Radix can exceed 32 bits even if swblk_t is limited to 32 bits.
181 radix
= BLIST_BMAP_RADIX
;
183 while (radix
< blocks
) {
184 radix
*= BLIST_META_RADIX
;
185 skip
= (skip
+ 1) * BLIST_META_RADIX
;
189 bl
= kmalloc(sizeof(struct blist
), M_SWAP
, M_WAITOK
| M_ZERO
);
191 bl
->bl_blocks
= blocks
;
192 bl
->bl_radix
= radix
;
194 bl
->bl_rootblks
= 1 +
195 blst_radix_init(NULL
, bl
->bl_radix
, bl
->bl_skip
, blocks
);
196 bl
->bl_root
= kmalloc(sizeof(blmeta_t
) * bl
->bl_rootblks
, M_SWAP
, M_WAITOK
);
198 #if defined(BLIST_DEBUG)
200 "BLIST representing %d blocks (%d MB of swap)"
201 ", requiring %dK of ram\n",
203 bl
->bl_blocks
* 4 / 1024,
204 (bl
->bl_rootblks
* sizeof(blmeta_t
) + 1023) / 1024
206 kprintf("BLIST raw radix tree contains %d records\n", bl
->bl_rootblks
);
208 blst_radix_init(bl
->bl_root
, bl
->bl_radix
, bl
->bl_skip
, blocks
);
214 blist_destroy(blist_t bl
)
216 kfree(bl
->bl_root
, M_SWAP
);
221 * blist_alloc() - reserve space in the block bitmap. Return the base
222 * of a contiguous region or SWAPBLK_NONE if space could
227 blist_alloc(blist_t bl
, swblk_t count
)
229 swblk_t blk
= SWAPBLK_NONE
;
232 if (bl
->bl_radix
== BLIST_BMAP_RADIX
)
233 blk
= blst_leaf_alloc(bl
->bl_root
, 0, 0, count
);
235 blk
= blst_meta_alloc(bl
->bl_root
, 0, 0, count
,
236 bl
->bl_radix
, bl
->bl_skip
);
237 if (blk
!= SWAPBLK_NONE
)
238 bl
->bl_free
-= count
;
244 blist_allocat(blist_t bl
, swblk_t count
, swblk_t blkat
)
246 swblk_t blk
= SWAPBLK_NONE
;
249 if (bl
->bl_radix
== BLIST_BMAP_RADIX
)
250 blk
= blst_leaf_alloc(bl
->bl_root
, blkat
, 0, count
);
252 blk
= blst_meta_alloc(bl
->bl_root
, blkat
, 0, count
,
253 bl
->bl_radix
, bl
->bl_skip
);
254 if (blk
!= SWAPBLK_NONE
)
255 bl
->bl_free
-= count
;
261 * blist_free() - free up space in the block bitmap. Return the base
262 * of a contiguous region. Panic if an inconsistancy is
267 blist_free(blist_t bl
, swblk_t blkno
, swblk_t count
)
270 if (bl
->bl_radix
== BLIST_BMAP_RADIX
)
271 blst_leaf_free(bl
->bl_root
, blkno
, count
);
273 blst_meta_free(bl
->bl_root
, blkno
, count
, bl
->bl_radix
, bl
->bl_skip
, 0);
274 bl
->bl_free
+= count
;
279 * blist_fill() - mark a region in the block bitmap as off-limits
280 * to the allocator (i.e. allocate it), ignoring any
281 * existing allocations. Return the number of blocks
282 * actually filled that were free before the call.
286 blist_fill(blist_t bl
, swblk_t blkno
, swblk_t count
)
291 if (bl
->bl_radix
== BLIST_BMAP_RADIX
) {
292 filled
= blst_leaf_fill(bl
->bl_root
, blkno
, count
);
294 filled
= blst_meta_fill(bl
->bl_root
, blkno
, count
,
295 bl
->bl_radix
, bl
->bl_skip
, 0);
297 bl
->bl_free
-= filled
;
305 * blist_resize() - resize an existing radix tree to handle the
306 * specified number of blocks. This will reallocate
307 * the tree and transfer the previous bitmap to the new
308 * one. When extending the tree you can specify whether
309 * the new blocks are to left allocated or freed.
313 blist_resize(blist_t
*pbl
, swblk_t count
, int freenew
)
315 blist_t newbl
= blist_create(count
);
319 if (count
> save
->bl_blocks
)
320 count
= save
->bl_blocks
;
321 blst_copy(save
->bl_root
, 0, save
->bl_radix
, save
->bl_skip
, newbl
, count
);
324 * If resizing upwards, should we free the new space or not?
326 if (freenew
&& count
< newbl
->bl_blocks
) {
327 blist_free(newbl
, count
, newbl
->bl_blocks
- count
);
335 * blist_print() - dump radix tree
339 blist_print(blist_t bl
)
341 kprintf("BLIST {\n");
342 blst_radix_print(bl
->bl_root
, 0, bl
->bl_radix
, bl
->bl_skip
, 4);
348 /************************************************************************
349 * ALLOCATION SUPPORT FUNCTIONS *
350 ************************************************************************
352 * These support functions do all the actual work. They may seem
353 * rather longish, but that's because I've commented them up. The
354 * actual code is straight forward.
359 * blist_leaf_alloc() - allocate at a leaf in the radix tree (a bitmap).
361 * This is the core of the allocator and is optimized for the 1 block
362 * and the BLIST_BMAP_RADIX block allocation cases. Other cases are
363 * somewhat slower. The 1 block allocation case is log2 and extremely
368 blst_leaf_alloc(blmeta_t
*scan
, swblk_t blkat __unused
, swblk_t blk
, int count
)
370 u_swblk_t orig
= scan
->u
.bmu_bitmap
;
374 * Optimize bitmap all-allocated case. Also, count = 1
375 * case assumes at least 1 bit is free in the bitmap, so
376 * we have to take care of this case here.
378 scan
->bm_bighint
= 0;
379 return(SWAPBLK_NONE
);
383 * Optimized code to allocate one bit out of the bitmap
386 int j
= BLIST_BMAP_RADIX
/2;
389 mask
= (u_swblk_t
)-1 >> (BLIST_BMAP_RADIX
/2);
392 if ((orig
& mask
) == 0) {
399 scan
->u
.bmu_bitmap
&= ~(1 << r
);
402 if (count
<= BLIST_BMAP_RADIX
) {
404 * non-optimized code to allocate N bits out of the bitmap.
405 * The more bits, the faster the code runs. It will run
406 * the slowest allocating 2 bits, but since there aren't any
407 * memory ops in the core loop (or shouldn't be, anyway),
408 * you probably won't notice the difference.
411 int n
= BLIST_BMAP_RADIX
- count
;
414 mask
= (u_swblk_t
)-1 >> n
;
416 for (j
= 0; j
<= n
; ++j
) {
417 if ((orig
& mask
) == mask
) {
418 scan
->u
.bmu_bitmap
&= ~mask
;
425 * We couldn't allocate count in this subtree, update bighint.
427 scan
->bm_bighint
= count
- 1;
428 return(SWAPBLK_NONE
);
432 * blist_meta_alloc() - allocate at a meta in the radix tree.
434 * Attempt to allocate at a meta node. If we can't, we update
435 * bighint and return a failure. Updating bighint optimize future
436 * calls that hit this node. We have to check for our collapse cases
437 * and we have a few optimizations strewn in as well.
440 blst_meta_alloc(blmeta_t
*scan
, swblk_t blkat
,
441 swblk_t blk
, swblk_t count
,
442 int64_t radix
, int skip
)
445 int next_skip
= ((u_int
)skip
/ BLIST_META_RADIX
);
446 int hintok
= (blk
>= blkat
);
449 * ALL-ALLOCATED special case
451 if (scan
->u
.bmu_avail
== 0) {
452 scan
->bm_bighint
= 0;
453 return(SWAPBLK_NONE
);
457 * ALL-FREE special case, initialize uninitialized
460 * NOTE: radix may exceed 32 bits until first division.
462 if (scan
->u
.bmu_avail
== radix
) {
463 scan
->bm_bighint
= radix
;
465 radix
/= BLIST_META_RADIX
;
466 for (i
= 1; i
<= skip
; i
+= next_skip
) {
467 if (scan
[i
].bm_bighint
== (swblk_t
)-1)
469 if (next_skip
== 1) {
470 scan
[i
].u
.bmu_bitmap
= (u_swblk_t
)-1;
471 scan
[i
].bm_bighint
= BLIST_BMAP_RADIX
;
473 scan
[i
].bm_bighint
= (swblk_t
)radix
;
474 scan
[i
].u
.bmu_avail
= (swblk_t
)radix
;
478 radix
/= BLIST_META_RADIX
;
481 for (i
= 1; i
<= skip
; i
+= next_skip
) {
482 if (count
<= scan
[i
].bm_bighint
&&
483 blk
+ (swblk_t
)radix
> blkat
) {
485 * count fits in object
488 if (next_skip
== 1) {
489 r
= blst_leaf_alloc(&scan
[i
], blkat
,
492 r
= blst_meta_alloc(&scan
[i
], blkat
,
494 radix
, next_skip
- 1);
496 if (r
!= SWAPBLK_NONE
) {
497 scan
->u
.bmu_avail
-= count
;
498 if (scan
->bm_bighint
> scan
->u
.bmu_avail
)
499 scan
->bm_bighint
= scan
->u
.bmu_avail
;
502 /* bighint was updated by recursion */
503 } else if (scan
[i
].bm_bighint
== (swblk_t
)-1) {
508 } else if (count
> (swblk_t
)radix
) {
510 * count does not fit in object even if it were
513 panic("blist_meta_alloc: allocation too large");
515 blk
+= (swblk_t
)radix
;
519 * We couldn't allocate count in this subtree, update bighint.
521 if (hintok
&& scan
->bm_bighint
>= count
)
522 scan
->bm_bighint
= count
- 1;
523 return(SWAPBLK_NONE
);
527 * BLST_LEAF_FREE() - free allocated block from leaf bitmap
530 blst_leaf_free(blmeta_t
*scan
, swblk_t blk
, int count
)
533 * free some data in this bitmap
536 * 0000111111111110000
540 int n
= blk
& (BLIST_BMAP_RADIX
- 1);
543 mask
= ((u_swblk_t
)-1 << n
) &
544 ((u_swblk_t
)-1 >> (BLIST_BMAP_RADIX
- count
- n
));
546 if (scan
->u
.bmu_bitmap
& mask
)
547 panic("blst_radix_free: freeing free block");
548 scan
->u
.bmu_bitmap
|= mask
;
551 * We could probably do a better job here. We are required to make
552 * bighint at least as large as the biggest contiguous block of
553 * data. If we just shoehorn it, a little extra overhead will
554 * be incured on the next allocation (but only that one typically).
556 scan
->bm_bighint
= BLIST_BMAP_RADIX
;
560 * BLST_META_FREE() - free allocated blocks from radix tree meta info
562 * This support routine frees a range of blocks from the bitmap.
563 * The range must be entirely enclosed by this radix node. If a
564 * meta node, we break the range down recursively to free blocks
565 * in subnodes (which means that this code can free an arbitrary
566 * range whereas the allocation code cannot allocate an arbitrary
571 blst_meta_free(blmeta_t
*scan
, swblk_t freeBlk
, swblk_t count
,
572 int64_t radix
, int skip
, swblk_t blk
)
575 int next_skip
= ((u_int
)skip
/ BLIST_META_RADIX
);
578 kprintf("FREE (%x,%d) FROM (%x,%lld)\n",
580 blk
, (long long)radix
585 * ALL-ALLOCATED special case, initialize for recursion.
587 * We will short-cut the ALL-ALLOCATED -> ALL-FREE case.
589 if (scan
->u
.bmu_avail
== 0) {
590 scan
->u
.bmu_avail
= count
;
591 scan
->bm_bighint
= count
;
593 if (count
!= radix
) {
594 for (i
= 1; i
<= skip
; i
+= next_skip
) {
595 if (scan
[i
].bm_bighint
== (swblk_t
)-1)
597 scan
[i
].bm_bighint
= 0;
598 if (next_skip
== 1) {
599 scan
[i
].u
.bmu_bitmap
= 0;
601 scan
[i
].u
.bmu_avail
= 0;
607 scan
->u
.bmu_avail
+= count
;
608 /* scan->bm_bighint = radix; */
612 * ALL-FREE special case.
614 * Set bighint for higher levels to snoop.
616 if (scan
->u
.bmu_avail
== radix
) {
617 scan
->bm_bighint
= radix
;
622 * Break the free down into its components
624 if (scan
->u
.bmu_avail
> radix
) {
625 panic("blst_meta_free: freeing already "
626 "free blocks (%d) %d/%lld",
627 count
, scan
->u
.bmu_avail
, (long long)radix
);
630 radix
/= BLIST_META_RADIX
;
632 i
= (freeBlk
- blk
) / (swblk_t
)radix
;
633 blk
+= i
* (swblk_t
)radix
;
634 i
= i
* next_skip
+ 1;
636 while (i
<= skip
&& blk
< freeBlk
+ count
) {
639 v
= blk
+ (swblk_t
)radix
- freeBlk
;
643 if (scan
->bm_bighint
== (swblk_t
)-1)
644 panic("blst_meta_free: freeing unexpected range");
646 if (next_skip
== 1) {
647 blst_leaf_free(&scan
[i
], freeBlk
, v
);
649 blst_meta_free(&scan
[i
], freeBlk
, v
,
650 radix
, next_skip
- 1, blk
);
654 * After having dealt with the becomes-all-free case any
655 * partial free will not be able to bring us to the
656 * becomes-all-free state.
658 * We can raise bighint to at least the sub-segment's
661 if (scan
->bm_bighint
< scan
[i
].bm_bighint
) {
662 scan
->bm_bighint
= scan
[i
].bm_bighint
;
666 blk
+= (swblk_t
)radix
;
672 * BLST_LEAF_FILL() - allocate specific blocks in leaf bitmap
674 * Allocates all blocks in the specified range regardless of
675 * any existing allocations in that range. Returns the number
676 * of blocks allocated by the call.
679 blst_leaf_fill(blmeta_t
*scan
, swblk_t blk
, int count
)
681 int n
= blk
& (BLIST_BMAP_RADIX
- 1);
683 u_swblk_t mask
, bitmap
;
685 mask
= ((u_swblk_t
)-1 << n
) &
686 ((u_swblk_t
)-1 >> (BLIST_BMAP_RADIX
- count
- n
));
688 /* Count the number of blocks we're about to allocate */
689 bitmap
= scan
->u
.bmu_bitmap
& mask
;
690 for (nblks
= 0; bitmap
!= 0; nblks
++)
691 bitmap
&= bitmap
- 1;
693 scan
->u
.bmu_bitmap
&= ~mask
;
698 * BLST_META_FILL() - allocate specific blocks at a meta node
700 * Allocates the specified range of blocks, regardless of
701 * any existing allocations in the range. The range must
702 * be within the extent of this node. Returns the number
703 * of blocks allocated by the call.
706 blst_meta_fill(blmeta_t
*scan
, swblk_t fillBlk
, swblk_t count
,
707 int64_t radix
, int skip
, swblk_t blk
)
710 int next_skip
= ((u_int
)skip
/ BLIST_META_RADIX
);
713 if (count
== radix
|| scan
->u
.bmu_avail
== 0) {
715 * ALL-ALLOCATED special case
717 nblks
= scan
->u
.bmu_avail
;
718 scan
->u
.bmu_avail
= 0;
719 scan
->bm_bighint
= count
;
723 if (scan
->u
.bmu_avail
== radix
) {
724 radix
/= BLIST_META_RADIX
;
727 * ALL-FREE special case, initialize sublevel
729 for (i
= 1; i
<= skip
; i
+= next_skip
) {
730 if (scan
[i
].bm_bighint
== (swblk_t
)-1)
732 if (next_skip
== 1) {
733 scan
[i
].u
.bmu_bitmap
= (u_swblk_t
)-1;
734 scan
[i
].bm_bighint
= BLIST_BMAP_RADIX
;
736 scan
[i
].bm_bighint
= (swblk_t
)radix
;
737 scan
[i
].u
.bmu_avail
= (swblk_t
)radix
;
741 radix
/= BLIST_META_RADIX
;
744 if (count
> (swblk_t
)radix
)
745 panic("blst_meta_fill: allocation too large");
747 i
= (fillBlk
- blk
) / (swblk_t
)radix
;
748 blk
+= i
* (swblk_t
)radix
;
749 i
= i
* next_skip
+ 1;
751 while (i
<= skip
&& blk
< fillBlk
+ count
) {
754 v
= blk
+ (swblk_t
)radix
- fillBlk
;
758 if (scan
->bm_bighint
== (swblk_t
)-1)
759 panic("blst_meta_fill: filling unexpected range");
761 if (next_skip
== 1) {
762 nblks
+= blst_leaf_fill(&scan
[i
], fillBlk
, v
);
764 nblks
+= blst_meta_fill(&scan
[i
], fillBlk
, v
,
765 radix
, next_skip
- 1, blk
);
769 blk
+= (swblk_t
)radix
;
772 scan
->u
.bmu_avail
-= nblks
;
777 * BLIST_RADIX_COPY() - copy one radix tree to another
779 * Locates free space in the source tree and frees it in the destination
780 * tree. The space may not already be free in the destination.
784 blst_copy(blmeta_t
*scan
, swblk_t blk
, int64_t radix
,
785 swblk_t skip
, blist_t dest
, swblk_t count
)
794 if (radix
== BLIST_BMAP_RADIX
) {
795 u_swblk_t v
= scan
->u
.bmu_bitmap
;
797 if (v
== (u_swblk_t
)-1) {
798 blist_free(dest
, blk
, count
);
802 for (i
= 0; i
< BLIST_BMAP_RADIX
&& i
< count
; ++i
) {
804 blist_free(dest
, blk
+ i
, 1);
814 if (scan
->u
.bmu_avail
== 0) {
816 * Source all allocated, leave dest allocated
820 if (scan
->u
.bmu_avail
== radix
) {
822 * Source all free, free entire dest
825 blist_free(dest
, blk
, count
);
827 blist_free(dest
, blk
, (swblk_t
)radix
);
832 radix
/= BLIST_META_RADIX
;
833 next_skip
= ((u_int
)skip
/ BLIST_META_RADIX
);
835 for (i
= 1; count
&& i
<= skip
; i
+= next_skip
) {
836 if (scan
[i
].bm_bighint
== (swblk_t
)-1)
839 if (count
>= (swblk_t
)radix
) {
848 count
-= (swblk_t
)radix
;
862 blk
+= (swblk_t
)radix
;
867 * BLST_RADIX_INIT() - initialize radix tree
869 * Initialize our meta structures and bitmaps and calculate the exact
870 * amount of space required to manage 'count' blocks - this space may
871 * be considerably less then the calculated radix due to the large
872 * RADIX values we use.
876 blst_radix_init(blmeta_t
*scan
, int64_t radix
, int skip
, swblk_t count
)
880 swblk_t memindex
= 0;
886 if (radix
== BLIST_BMAP_RADIX
) {
888 scan
->bm_bighint
= 0;
889 scan
->u
.bmu_bitmap
= 0;
895 * Meta node. If allocating the entire object we can special
896 * case it. However, we need to figure out how much memory
897 * is required to manage 'count' blocks, so we continue on anyway.
901 scan
->bm_bighint
= 0;
902 scan
->u
.bmu_avail
= 0;
905 radix
/= BLIST_META_RADIX
;
906 next_skip
= ((u_int
)skip
/ BLIST_META_RADIX
);
908 for (i
= 1; i
<= skip
; i
+= next_skip
) {
909 if (count
>= (swblk_t
)radix
) {
911 * Allocate the entire object
913 memindex
= i
+ blst_radix_init(
914 ((scan
) ? &scan
[i
] : NULL
),
919 count
-= (swblk_t
)radix
;
920 } else if (count
> 0) {
922 * Allocate a partial object
924 memindex
= i
+ blst_radix_init(
925 ((scan
) ? &scan
[i
] : NULL
),
933 * Add terminator and break out
936 scan
[i
].bm_bighint
= (swblk_t
)-1;
948 blst_radix_print(blmeta_t
*scan
, swblk_t blk
, int64_t radix
, int skip
, int tab
)
953 if (radix
== BLIST_BMAP_RADIX
) {
955 "%*.*s(%04x,%lld): bitmap %08x big=%d\n",
957 blk
, (long long)radix
,
964 if (scan
->u
.bmu_avail
== 0) {
966 "%*.*s(%04x,%lld) ALL ALLOCATED\n",
973 if (scan
->u
.bmu_avail
== radix
) {
975 "%*.*s(%04x,%lld) ALL FREE\n",
984 "%*.*s(%04x,%lld): subtree (%d/%lld) big=%d {\n",
986 blk
, (long long)radix
,
992 radix
/= BLIST_META_RADIX
;
993 next_skip
= ((u_int
)skip
/ BLIST_META_RADIX
);
996 for (i
= 1; i
<= skip
; i
+= next_skip
) {
997 if (scan
[i
].bm_bighint
== (swblk_t
)-1) {
999 "%*.*s(%04x,%lld): Terminator\n",
1001 blk
, (long long)radix
1012 blk
+= (swblk_t
)radix
;
1027 main(int ac
, char **av
)
1033 for (i
= 1; i
< ac
; ++i
) {
1034 const char *ptr
= av
[i
];
1036 size
= strtol(ptr
, NULL
, 0);
1040 fprintf(stderr
, "Bad option: %s\n", ptr
- 2);
1043 bl
= blist_create(size
);
1044 blist_free(bl
, 0, size
);
1053 kprintf("%d/%d/%lld> ",
1054 bl
->bl_free
, size
, (long long)bl
->bl_radix
);
1056 if (fgets(buf
, sizeof(buf
), stdin
) == NULL
)
1060 if (sscanf(buf
+ 1, "%d", &count
) == 1) {
1061 blist_resize(&bl
, count
, 1);
1070 if (sscanf(buf
+ 1, "%d %d", &count
, &blkat
) == 1) {
1071 swblk_t blk
= blist_alloc(bl
, count
);
1072 kprintf(" R=%04x\n", blk
);
1073 } else if (sscanf(buf
+ 1, "%d %d", &count
, &blkat
) == 2) {
1074 swblk_t blk
= blist_allocat(bl
, count
, blkat
);
1075 kprintf(" R=%04x\n", blk
);
1081 if (sscanf(buf
+ 1, "%x %d", &da
, &count
) == 2) {
1082 blist_free(bl
, da
, count
);
1088 if (sscanf(buf
+ 1, "%x %d", &da
, &count
) == 2) {
1090 blist_fill(bl
, da
, count
));
1115 panic(const char *ctl
, ...)
1119 __va_start(va
, ctl
);
1120 vfprintf(stderr
, ctl
, va
);
1121 fprintf(stderr
, "\n");