2 * ALIST.C - Bitmap allocator/deallocator, using a radix tree with hinting.
3 * Unlimited-size allocations, power-of-2 only, power-of-2
4 * aligned results only.
6 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
8 * This code is derived from software contributed to The DragonFly Project
9 * by Matthew Dillon <dillon@backplane.com>
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
21 * 3. Neither the name of The DragonFly Project nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific, prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * This module has been adapted from the BLIST module, which was written
40 * by Matthew Dillon many years ago.
42 * This module implements a general power-of-2 bitmap allocator/deallocator.
43 * All allocations must be in powers of 2 and will return similarly aligned
44 * results. The module does not try to interpret the meaning of a 'block'
45 * other then to return ALIST_BLOCK_NONE on an allocation failure.
47 * A maximum of 2 billion blocks is supported so, for example, if one block
48 * represented 64 bytes a maximally sized ALIST would represent
51 * A radix tree is used to maintain the bitmap and layed out in a manner
52 * similar to the blist code. Meta nodes use a radix of 16 and 2 bits per
53 * block while leaf nodes use a radix of 32 and 1 bit per block (stored in
54 * a 32 bit bitmap field). Both meta and leaf nodes have a hint field.
55 * This field gives us a hint as to the largest free contiguous range of
56 * blocks under the node. It may contain a value that is too high, but
57 * will never contain a value that is too low. When the radix tree is
58 * searched, allocation failures in subtrees update the hint.
60 * The radix tree is layed out recursively using a linear array. Each meta
61 * node is immediately followed (layed out sequentially in memory) by
62 * ALIST_META_RADIX lower level nodes. This is a recursive structure but one
63 * that can be easily scanned through a very simple 'skip' calculation. In
64 * order to support large radixes, portions of the tree may reside outside our
65 * memory allocation. We handle this with an early-terminate optimization
66 * in the meta-node. The memory allocation is only large enough to cover
67 * the number of blocks requested at creation time even if it must be
68 * encompassed in larger root-node radix.
70 * This code can be compiled stand-alone for debugging.
75 #include <sys/param.h>
76 #include <sys/systm.h>
78 #include <sys/kernel.h>
79 #include <sys/alist.h>
80 #include <sys/malloc.h>
82 #include <vm/vm_object.h>
83 #include <vm/vm_kern.h>
84 #include <vm/vm_extern.h>
85 #include <vm/vm_page.h>
89 #ifndef ALIST_NO_DEBUG
93 #include <sys/types.h>
100 #define kmalloc(a,b,c) malloc(a)
101 #define kfree(a,b) free(a)
102 #define kprintf printf
103 #define KKASSERT(exp) assert(exp)
106 #include <sys/alist.h>
108 void panic(const char *ctl
, ...);
113 * static support functions
116 static alist_blk_t
alst_leaf_alloc(almeta_t
*scan
, alist_blk_t blk
,
117 alist_blk_t start
, alist_blk_t count
);
118 static alist_blk_t
alst_meta_alloc(almeta_t
*scan
, alist_blk_t blk
,
119 alist_blk_t start
, alist_blk_t count
,
120 alist_blk_t radix
, alist_blk_t skip
);
121 static void alst_leaf_free(almeta_t
*scan
, alist_blk_t relblk
,
123 static void alst_meta_free(almeta_t
*scan
, alist_blk_t freeBlk
,
124 alist_blk_t count
, alist_blk_t radix
,
125 alist_blk_t skip
, alist_blk_t blk
);
126 static alist_blk_t
alst_radix_init(almeta_t
*scan
, alist_blk_t blk
,
127 alist_blk_t radix
, alist_blk_t skip
,
130 static void alst_radix_print(almeta_t
*scan
, alist_blk_t blk
,
131 alist_blk_t radix
, alist_blk_t skip
,
136 * Create a alist capable of handling up to the specified number of blocks.
137 * Blocks must be greater then 0 but does not have to be a power of 2.
139 * The smallest alist consists of a single leaf node capable of
140 * managing ALIST_BMAP_RADIX blocks.
143 alist_create(alist_blk_t blocks
, struct malloc_type
*mtype
)
147 alist_blk_t skip
= 0;
150 * Calculate radix and skip field used for scanning.
152 radix
= ALIST_BMAP_RADIX
;
154 while (radix
< blocks
) {
155 radix
*= ALIST_META_RADIX
;
156 skip
= (skip
+ 1) * ALIST_META_RADIX
;
159 bl
= kmalloc(sizeof(struct alist
), mtype
, M_WAITOK
| M_ZERO
);
161 bl
->bl_blocks
= blocks
;
162 bl
->bl_radix
= radix
;
164 bl
->bl_rootblks
= 1 + alst_radix_init(NULL
, 0, bl
->bl_radix
,
165 bl
->bl_skip
, blocks
);
166 bl
->bl_root
= kmalloc(sizeof(almeta_t
) * bl
->bl_rootblks
,
169 #if defined(ALIST_DEBUG)
171 "ALIST representing %d blocks (%d MB of swap)"
172 ", requiring %dK (%d bytes) of ram\n",
174 bl
->bl_blocks
* 4 / 1024,
175 (bl
->bl_rootblks
* sizeof(almeta_t
) + 1023) / 1024,
176 (bl
->bl_rootblks
* sizeof(almeta_t
))
178 kprintf("ALIST raw radix tree contains %d records\n", bl
->bl_rootblks
);
180 alst_radix_init(bl
->bl_root
, 0, bl
->bl_radix
, bl
->bl_skip
, blocks
);
186 alist_init(alist_t bl
, alist_blk_t blocks
,
187 almeta_t
*records
, alist_blk_t nrecords
)
190 alist_blk_t skip
= 0;
193 * Calculate radix and skip field used for scanning.
195 radix
= ALIST_BMAP_RADIX
;
197 while (radix
< blocks
) {
198 radix
*= ALIST_META_RADIX
;
199 skip
= (skip
+ 1) * ALIST_META_RADIX
;
201 bzero(bl
, sizeof(*bl
));
203 bl
->bl_blocks
= blocks
;
204 bl
->bl_radix
= radix
;
206 bl
->bl_rootblks
= 1 + alst_radix_init(NULL
, 0, bl
->bl_radix
,
207 bl
->bl_skip
, blocks
);
208 KKASSERT(bl
->bl_rootblks
<= nrecords
);
209 bl
->bl_root
= records
;
211 #if defined(ALIST_DEBUG)
213 "ALIST representing %d blocks (%d MB of swap)"
214 ", requiring %dK (%d bytes) of ram\n",
216 bl
->bl_blocks
* 4 / 1024,
217 (bl
->bl_rootblks
* sizeof(almeta_t
) + 1023) / 1024,
218 (bl
->bl_rootblks
* sizeof(almeta_t
))
220 kprintf("ALIST raw radix tree contains %d records\n", bl
->bl_rootblks
);
222 alst_radix_init(bl
->bl_root
, 0, bl
->bl_radix
, bl
->bl_skip
, blocks
);
226 alist_destroy(alist_t bl
, struct malloc_type
*mtype
)
228 kfree(bl
->bl_root
, mtype
);
233 * Reserve space in the block bitmap. Return the base of a contiguous
234 * region or ALIST_BLOCK_NONE if space could not be allocated.
236 * This nominally allocates a power-of-2 number of blocks. However,
237 * non-powers of 2 are accepted and implemented by first allocating
238 * the nearest power of 2 and then freeing the remainder.
241 alist_alloc(alist_t bl
, alist_blk_t start
, alist_blk_t count
)
243 alist_blk_t blk
= ALIST_BLOCK_NONE
;
246 * Check non power-of-2
249 if ((count
| (count
- 1)) != (count
<< 1) - 1) {
250 alist_blk_t ncount
= (count
< 256) ? 1 : 256;
251 while (ncount
< count
)
253 blk
= alist_alloc(bl
, start
, ncount
);
254 if (blk
!= ALIST_BLOCK_NONE
)
255 alist_free(bl
, blk
+ count
, ncount
- count
);
262 if (bl
&& count
< bl
->bl_radix
) {
263 if (bl
->bl_radix
== ALIST_BMAP_RADIX
) {
264 blk
= alst_leaf_alloc(bl
->bl_root
, 0, start
, count
);
266 blk
= alst_meta_alloc(bl
->bl_root
, 0, start
, count
,
267 bl
->bl_radix
, bl
->bl_skip
);
269 if (blk
!= ALIST_BLOCK_NONE
)
270 bl
->bl_free
-= count
;
276 * Free up space in the block bitmap. The starting block and count do not
277 * need to be power-of-2 aligned. The related blocks must be in an allocated
281 alist_free(alist_t bl
, alist_blk_t blkno
, alist_blk_t count
)
284 KKASSERT(blkno
+ count
<= bl
->bl_blocks
);
285 if (bl
->bl_radix
== ALIST_BMAP_RADIX
) {
286 alst_leaf_free(bl
->bl_root
, blkno
, count
);
288 alst_meta_free(bl
->bl_root
, blkno
, count
,
289 bl
->bl_radix
, bl
->bl_skip
, 0);
291 bl
->bl_free
+= count
;
296 * Returns the current total number of free blocks and the
297 * approximate trailing largest contiguous free block available.
300 alist_free_info(alist_t bl
, alist_blk_t
*startp
, alist_blk_t
*countp
)
302 alist_blk_t radix
= bl
->bl_radix
;
303 alist_blk_t skip
= bl
->bl_skip
;
304 alist_blk_t next_skip
;
307 almeta_t
*scan
= bl
->bl_root
;
312 while (radix
!= ALIST_BMAP_RADIX
) {
313 radix
/= ALIST_META_RADIX
;
314 next_skip
= skip
/ ALIST_META_RADIX
;
317 * Find the biggest fully allocated chunk.
319 for (i
= ALIST_META_RADIX
- 1; i
!= ALIST_BLOCK_NONE
; --i
) {
320 mask
= (scan
->bm_bitmap
>> (i
* 2)) & 3;
323 * All allocated, continue the loop
329 * Partially allocated, push into this guy
340 * All free, we can return the chunk.
342 *startp
+= i
* radix
;
348 * If we failed to find anything stop here, otherwise push
351 if (i
== ALIST_BLOCK_NONE
)
353 *startp
+= i
* radix
;
354 scan
+= 1 + next_skip
* i
;
355 skip
= next_skip
- 1;
359 * If we got all the way down to a leaf node locate the last block,
360 * power-of-2 aligned and power-of-2 sized. Well, the easiest way
361 * to deal with this is to just return 1 block.
363 if (radix
== ALIST_BMAP_RADIX
) {
364 mask
= scan
->bm_bitmap
;
365 for (i
= ALIST_BMAP_RADIX
- 1; i
!= ALIST_BLOCK_NONE
; --i
) {
366 if ((mask
& ((alist_bmap_t
)1U << i
)))
371 * did not find free entry
373 if (i
== ALIST_BLOCK_NONE
)
389 * alist_print() - dump radix tree
393 alist_print(alist_t bl
)
395 kprintf("ALIST {\n");
396 alst_radix_print(bl
->bl_root
, 0, bl
->bl_radix
, bl
->bl_skip
, 4);
402 /************************************************************************
403 * ALLOCATION SUPPORT FUNCTIONS *
404 ************************************************************************
406 * These support functions do all the actual work. They may seem
407 * rather longish, but that's because I've commented them up. The
408 * actual code is straight forward.
413 * alist_leaf_alloc() - allocate at a leaf in the radix tree (a bitmap).
415 * This is the core of the allocator and is optimized for the 1 block
416 * and the ALIST_BMAP_RADIX block allocation cases. Other cases are
417 * somewhat slower. The 1 block allocation case is log2 and extremely
420 * mask bit is 0 allocated, not available
421 * mask bit is 1 free, available for allocation
425 alst_leaf_alloc(almeta_t
*scan
, alist_blk_t blk
, alist_blk_t start
,
428 alist_bmap_t orig
= scan
->bm_bitmap
;
431 * Allocate only beyond the start point. Mask to 0 the low bits
432 * below start. If start == blk no bits get cleared so don't
435 if (start
>= blk
+ ALIST_BMAP_RADIX
)
436 return(ALIST_BLOCK_NONE
);
438 if (start
> blk
&& start
< blk
+ ALIST_BMAP_RADIX
)
439 orig
&= ~(((alist_bmap_t
)1U << (start
- blk
)) - 1);
441 start
&= ALIST_BMAP_RADIX
- 1;
444 * Optimize bitmap all-allocated case. Also, count = 1
445 * case assumes at least 1 bit is free in the bitmap, so
446 * we have to take care of this case here.
450 scan
->bm_bighint
= 0;
451 return(ALIST_BLOCK_NONE
);
455 * Optimized code to allocate one bit out of the bitmap
459 alist_blk_t j
= ALIST_BMAP_RADIX
/2;
462 mask
= (alist_bmap_t
)-1 >> (ALIST_BMAP_RADIX
/2);
465 if ((orig
& mask
) == 0) {
472 scan
->bm_bitmap
&= ~(1 << r
);
477 * non-optimized code to allocate N bits out of the bitmap.
478 * The more bits, the faster the code runs. It will run
479 * the slowest allocating 2 bits, but since there aren't any
480 * memory ops in the core loop (or shouldn't be, anyway),
481 * you probably won't notice the difference.
483 * Similar to the blist case, the alist code also requires
484 * allocations to be power-of-2 sized and aligned to the
485 * size of the allocation, which simplifies the algorithm.
489 alist_blk_t n
= ALIST_BMAP_RADIX
- count
;
492 mask
= (alist_bmap_t
)-1 >> n
;
494 for (j
= 0; j
<= n
; j
+= count
) {
495 if ((orig
& mask
) == mask
) {
496 scan
->bm_bitmap
&= ~mask
;
499 mask
= mask
<< count
;
504 * We couldn't allocate count in this subtree, update bighint
505 * if we were able to check the entire node.
508 scan
->bm_bighint
= count
- 1;
509 return(ALIST_BLOCK_NONE
);
513 * Attempt to allocate at a meta node. If we can't, we update
514 * bighint and return a failure. Updating bighint optimize future
515 * calls that hit this node. We have to check for our collapse cases
516 * and we have a few optimizations strewn in as well.
519 alst_meta_alloc(almeta_t
*scan
, alist_blk_t blk
, alist_blk_t start
,
520 alist_blk_t count
, alist_blk_t radix
, alist_blk_t skip
)
525 alist_blk_t next_skip
= ((u_int
)skip
/ ALIST_META_RADIX
);
526 alist_blk_t orig_blk
;
529 * ALL-ALLOCATED special case
531 if (scan
->bm_bitmap
== 0) {
532 scan
->bm_bighint
= 0;
533 return(ALIST_BLOCK_NONE
);
536 radix
/= ALIST_META_RADIX
;
539 * Radix now represents each bitmap entry for this meta node. If
540 * the number of blocks being allocated can be fully represented,
541 * we allocate directly out of this meta node.
543 * Meta node bitmaps use 2 bits per block.
546 * 01 PARTIALLY-FREE/PARTIALLY-ALLOCATED
550 if (count
>= radix
) {
551 alist_blk_t n
= count
/ radix
* 2; /* number of bits */
554 mask
= (alist_bmap_t
)-1 >> (ALIST_BMAP_RADIX
- n
);
555 for (j
= 0; j
< ALIST_META_RADIX
; j
+= n
/ 2) {
556 if ((scan
->bm_bitmap
& mask
) == mask
&&
557 blk
+ j
* radix
>= start
) {
558 scan
->bm_bitmap
&= ~mask
;
559 return(blk
+ j
* radix
);
563 if (scan
->bm_bighint
>= count
&& start
<= blk
)
564 scan
->bm_bighint
= count
>> 1;
565 return(ALIST_BLOCK_NONE
);
569 * If not we have to recurse.
574 for (i
= 1; i
<= skip
; i
+= next_skip
) {
575 if (scan
[i
].bm_bighint
== (alist_blk_t
)-1) {
583 * If the element is marked completely free (11), initialize
586 if ((scan
->bm_bitmap
& mask
) == mask
) {
587 scan
[i
].bm_bitmap
= (alist_bmap_t
)-1;
588 scan
[i
].bm_bighint
= radix
;
591 if ((scan
->bm_bitmap
& mask
) == 0) {
593 * Object marked completely allocated, recursion
597 } else if (blk
+ radix
<= start
) {
599 * Object does not contain or is not beyond our
603 } else if (count
<= scan
[i
].bm_bighint
) {
605 * count fits in object. If successful and the
606 * deeper level becomes all allocated, mark our
607 * level as all-allocated.
610 if (next_skip
== 1) {
611 r
= alst_leaf_alloc(&scan
[i
], blk
, start
,
614 r
= alst_meta_alloc(&scan
[i
], blk
, start
,
616 radix
, next_skip
- 1);
618 if (r
!= ALIST_BLOCK_NONE
) {
619 if (scan
[i
].bm_bitmap
== 0) {
620 scan
->bm_bitmap
&= ~mask
;
622 scan
->bm_bitmap
&= ~mask
;
623 scan
->bm_bitmap
|= pmask
;
634 * We couldn't allocate count in this subtree, update bighint
635 * if we were able to check the entire node.
637 if (scan
->bm_bighint
>= count
&& start
<= orig_blk
)
638 scan
->bm_bighint
= count
>> 1;
639 return(ALIST_BLOCK_NONE
);
643 * Free allocated block from leaf bitmap
646 alst_leaf_free(almeta_t
*scan
, alist_blk_t blk
, alist_blk_t count
)
649 * free some data in this bitmap
652 * 0000111111111110000
656 alist_blk_t n
= blk
& (ALIST_BMAP_RADIX
- 1);
659 mask
= ((alist_bmap_t
)-1 << n
) &
660 ((alist_bmap_t
)-1 >> (ALIST_BMAP_RADIX
- count
- n
));
662 if (scan
->bm_bitmap
& mask
)
663 panic("alst_radix_free: freeing free block");
664 scan
->bm_bitmap
|= mask
;
667 * We could probably do a better job here. We are required to make
668 * bighint at least as large as the biggest contiguous block of
669 * data. If we just shoehorn it, a little extra overhead will
670 * be incured on the next allocation (but only that one typically).
672 scan
->bm_bighint
= ALIST_BMAP_RADIX
;
676 * Free allocated blocks from radix tree meta info. This support routine
677 * frees a range of blocks from the bitmap. The range must be entirely
678 * enclosed by this radix node. If a meta node, we break the range down
679 * recursively to free blocks in subnodes (which means that this code
680 * can free an arbitrary range whereas the allocation code cannot allocate
681 * an arbitrary range).
684 alst_meta_free(almeta_t
*scan
, alist_blk_t freeBlk
, alist_blk_t count
,
685 alist_blk_t radix
, alist_blk_t skip
, alist_blk_t blk
)
687 alist_blk_t next_skip
= ((u_int
)skip
/ ALIST_META_RADIX
);
693 * Break the free down into its components. Because it is so easy
694 * to implement, frees are not limited to power-of-2 sizes.
696 * Each block in a meta-node bitmap takes two bits.
698 radix
/= ALIST_META_RADIX
;
700 i
= (freeBlk
- blk
) / radix
;
702 mask
= 0x00000003 << (i
* 2);
703 pmask
= 0x00000001 << (i
* 2);
705 i
= i
* next_skip
+ 1;
707 while (i
<= skip
&& blk
< freeBlk
+ count
) {
710 v
= blk
+ radix
- freeBlk
;
714 if (scan
->bm_bighint
== (alist_blk_t
)-1)
715 panic("alst_meta_free: freeing unexpected range");
718 * WARNING on bighint updates. When we free an element in
719 * a chunk if the chunk becomes wholely free it is possible
720 * that the whole node is now free, so bighint must be set
721 * to cover the whole node. Otherwise address-specific
722 * allocations may fail.
724 * We don't bother figuring out how much of the node is
725 * actually free in this case.
727 if (freeBlk
== blk
&& count
>= radix
) {
729 * The area being freed covers the entire block,
730 * assert that we are marked all-allocated and
731 * then mark it all-free.
733 KKASSERT((scan
->bm_bitmap
& mask
) == 0);
734 scan
->bm_bitmap
|= mask
;
735 scan
->bm_bighint
= radix
* ALIST_META_RADIX
;
738 * If we were previously marked all-allocated, fix-up
739 * the next layer so we can recurse down into it.
741 if ((scan
->bm_bitmap
& mask
) == 0) {
742 scan
[i
].bm_bitmap
= (alist_bmap_t
)0;
743 scan
[i
].bm_bighint
= 0;
747 * Recursion case, then either mark all-free or
750 if (next_skip
== 1) {
751 alst_leaf_free(&scan
[i
], freeBlk
, v
);
753 alst_meta_free(&scan
[i
], freeBlk
, v
,
754 radix
, next_skip
- 1, blk
);
756 if (scan
[i
].bm_bitmap
== (alist_bmap_t
)-1) {
757 scan
->bm_bitmap
|= mask
;
758 scan
->bm_bighint
= radix
* ALIST_META_RADIX
;
760 scan
->bm_bitmap
&= ~mask
;
761 scan
->bm_bitmap
|= pmask
;
762 if (scan
->bm_bighint
< scan
[i
].bm_bighint
)
763 scan
->bm_bighint
= scan
[i
].bm_bighint
;
776 * Initialize our meta structures and bitmaps and calculate the exact
777 * amount of space required to manage 'count' blocks - this space may
778 * be considerably less then the calculated radix due to the large
779 * RADIX values we use.
782 alst_radix_init(almeta_t
*scan
, alist_blk_t blk
, alist_blk_t radix
,
783 alist_blk_t skip
, alist_blk_t count
)
786 alist_blk_t next_skip
;
789 alist_blk_t memindex
;
794 if (radix
== ALIST_BMAP_RADIX
) {
796 scan
->bm_bighint
= 0;
803 * Meta node. If allocating the entire object we can special
804 * case it. However, we need to figure out how much memory
805 * is required to manage 'count' blocks, so we continue on anyway.
808 scan
->bm_bighint
= 0;
813 radix
/= ALIST_META_RADIX
;
814 next_skip
= skip
/ ALIST_META_RADIX
;
818 for (i
= 1; i
<= skip
; i
+= next_skip
) {
819 if (count
>= blk
+ radix
) {
821 * Allocate the entire object
823 memindex
+= alst_radix_init(((scan
) ? &scan
[i
] : NULL
),
825 next_skip
- 1, count
);
826 /* already marked as wholely allocated */
827 } else if (count
> blk
) {
829 * Allocate a partial object, well it's really
830 * all-allocated, we just aren't allowed to
831 * free the whole thing.
833 memindex
+= alst_radix_init(((scan
) ? &scan
[i
] : NULL
),
835 next_skip
- 1, count
);
836 /* already marked as wholely allocated */
839 * Add terminator but continue the loop. Populate
843 scan
[i
].bm_bighint
= (alist_blk_t
)-1;
844 scan
[i
].bm_bitmap
= 0;
846 /* already marked as wholely allocated */
852 memindex
+= ALIST_META_RADIX
;
859 alst_radix_print(almeta_t
*scan
, alist_blk_t blk
, alist_blk_t radix
,
860 alist_blk_t skip
, int tab
)
863 alist_blk_t next_skip
;
866 if (radix
== ALIST_BMAP_RADIX
) {
868 "%*.*s(%04x,%d): bitmap %08x big=%d\n",
877 if (scan
->bm_bitmap
== 0) {
879 "%*.*s(%04x,%d) ALL ALLOCATED\n",
886 if (scan
->bm_bitmap
== (alist_bmap_t
)-1) {
888 "%*.*s(%04x,%d) ALL FREE\n",
897 "%*.*s(%04x,%d): subtree (%d) bitmap=%08x big=%d {\n",
905 radix
/= ALIST_META_RADIX
;
906 next_skip
= skip
/ ALIST_META_RADIX
;
910 for (i
= 1; i
<= skip
; i
+= next_skip
) {
911 if (scan
[i
].bm_bighint
== (alist_blk_t
)-1) {
913 "%*.*s(%04x,%d): Terminator\n",
919 if ((scan
->bm_bitmap
& mask
) == mask
) {
921 "%*.*s(%04x,%d): ALL FREE\n",
925 } else if ((scan
->bm_bitmap
& mask
) == 0) {
927 "%*.*s(%04x,%d): ALL ALLOCATED\n",
945 kprintf("%*.*s}\n", tab
, tab
, "");
953 main(int ac
, char **av
)
955 alist_blk_t size
= 1024;
957 alist_blk_t count
= 0;
961 for (i
= 1; i
< ac
; ++i
) {
962 const char *ptr
= av
[i
];
964 size
= strtol(ptr
, NULL
, 0);
968 fprintf(stderr
, "Bad option: %s\n", ptr
- 2);
971 bl
= alist_create(size
, NULL
);
972 alist_free(bl
, 0, size
);
979 kprintf("%d/%d/%d> ", bl
->bl_free
, size
, bl
->bl_radix
);
981 if (fgets(buf
, sizeof(buf
), stdin
) == NULL
)
988 if (sscanf(buf
+ 1, "%x %d", &da
, &count
) == 2) {
989 da
= alist_alloc(bl
, da
, count
);
990 kprintf(" R=%04x\n", da
);
991 } else if (sscanf(buf
+ 1, "%d", &count
) == 1) {
992 da
= alist_alloc(bl
, 0, count
);
993 kprintf(" R=%04x\n", da
);
995 kprintf("alloc 0x%04x/%d\n", da
, count
);
996 alist_blk_t blk
= alist_alloc(bl
, da
, count
);
997 kprintf(" R=%04x\n", blk
);
1003 if (sscanf(buf
+ 1, "%x %d", &da
, &count
) == 2) {
1004 alist_free(bl
, da
, count
);
1006 kprintf("free 0x%04x/%d\n", da
, count
);
1007 alist_free(bl
, da
, count
);
1020 bfree
= alist_free_info(bl
, &da
, &count
);
1021 kprintf("info: %d free trailing: 0x%04x/%d\n",
1033 panic(const char *ctl
, ...)
1037 __va_start(va
, ctl
);
1038 vfprintf(stderr
, ctl
, va
);
1039 fprintf(stderr
, "\n");