nrelease - fix/improve livecd
[dragonfly.git] / sys / kern / subr_alist.c
blob4a67310eba778c73c907ce4d963f80ba1bae0a01
1 /*
2 * ALIST.C - Bitmap allocator/deallocator, using a radix tree with hinting.
3 * Unlimited-size allocations, power-of-2 only, power-of-2
4 * aligned results only.
5 *
6 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
7 *
8 * This code is derived from software contributed to The DragonFly Project
9 * by Matthew Dillon <dillon@backplane.com>
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 * 3. Neither the name of The DragonFly Project nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific, prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
39 * This module has been adapted from the BLIST module, which was written
40 * by Matthew Dillon many years ago.
42 * This module implements a general power-of-2 bitmap allocator/deallocator.
43 * All allocations must be in powers of 2 and will return similarly aligned
44 * results. The module does not try to interpret the meaning of a 'block'
45 * other then to return ALIST_BLOCK_NONE on an allocation failure.
47 * A maximum of 2 billion blocks is supported so, for example, if one block
48 * represented 64 bytes a maximally sized ALIST would represent
49 * 128 gigabytes.
51 * A radix tree is used to maintain the bitmap and layed out in a manner
52 * similar to the blist code. Meta nodes use a radix of 16 and 2 bits per
53 * block while leaf nodes use a radix of 32 and 1 bit per block (stored in
54 * a 32 bit bitmap field). Both meta and leaf nodes have a hint field.
55 * This field gives us a hint as to the largest free contiguous range of
56 * blocks under the node. It may contain a value that is too high, but
57 * will never contain a value that is too low. When the radix tree is
58 * searched, allocation failures in subtrees update the hint.
60 * The radix tree is layed out recursively using a linear array. Each meta
61 * node is immediately followed (layed out sequentially in memory) by
62 * ALIST_META_RADIX lower level nodes. This is a recursive structure but one
63 * that can be easily scanned through a very simple 'skip' calculation. In
64 * order to support large radixes, portions of the tree may reside outside our
65 * memory allocation. We handle this with an early-terminate optimization
66 * in the meta-node. The memory allocation is only large enough to cover
67 * the number of blocks requested at creation time even if it must be
68 * encompassed in larger root-node radix.
70 * This code can be compiled stand-alone for debugging.
73 #ifdef _KERNEL
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/lock.h>
78 #include <sys/kernel.h>
79 #include <sys/alist.h>
80 #include <sys/malloc.h>
81 #include <vm/vm.h>
82 #include <vm/vm_object.h>
83 #include <vm/vm_kern.h>
84 #include <vm/vm_extern.h>
85 #include <vm/vm_page.h>
87 #else
89 #ifndef ALIST_NO_DEBUG
90 #define ALIST_DEBUG
91 #endif
93 #include <sys/types.h>
94 #include <stdio.h>
95 #include <assert.h>
96 #include <string.h>
97 #include <stdlib.h>
98 #include <stdarg.h>
100 #define kmalloc(a,b,c) malloc(a)
101 #define kfree(a,b) free(a)
102 #define kprintf printf
103 #define KKASSERT(exp) assert(exp)
104 struct malloc_type;
106 #include <sys/alist.h>
108 void panic(const char *ctl, ...);
110 #endif
113 * static support functions
116 static alist_blk_t alst_leaf_alloc(almeta_t *scan, alist_blk_t blk,
117 alist_blk_t start, alist_blk_t count);
118 static alist_blk_t alst_meta_alloc(almeta_t *scan, alist_blk_t blk,
119 alist_blk_t start, alist_blk_t count,
120 alist_blk_t radix, alist_blk_t skip);
121 static void alst_leaf_free(almeta_t *scan, alist_blk_t relblk,
122 alist_blk_t count);
123 static void alst_meta_free(almeta_t *scan, alist_blk_t freeBlk,
124 alist_blk_t count, alist_blk_t radix,
125 alist_blk_t skip, alist_blk_t blk);
126 static alist_blk_t alst_radix_init(almeta_t *scan, alist_blk_t blk,
127 alist_blk_t radix, alist_blk_t skip,
128 alist_blk_t count);
129 #ifndef _KERNEL
130 static void alst_radix_print(almeta_t *scan, alist_blk_t blk,
131 alist_blk_t radix, alist_blk_t skip,
132 int tab);
133 #endif
136 * Create a alist capable of handling up to the specified number of blocks.
137 * Blocks must be greater then 0 but does not have to be a power of 2.
139 * The smallest alist consists of a single leaf node capable of
140 * managing ALIST_BMAP_RADIX blocks.
142 alist_t
143 alist_create(alist_blk_t blocks, struct malloc_type *mtype)
145 alist_t bl;
146 alist_blk_t radix;
147 alist_blk_t skip = 0;
150 * Calculate radix and skip field used for scanning.
152 radix = ALIST_BMAP_RADIX;
154 while (radix < blocks) {
155 radix *= ALIST_META_RADIX;
156 skip = (skip + 1) * ALIST_META_RADIX;
159 bl = kmalloc(sizeof(struct alist), mtype, M_WAITOK | M_ZERO);
161 bl->bl_blocks = blocks;
162 bl->bl_radix = radix;
163 bl->bl_skip = skip;
164 bl->bl_rootblks = 1 + alst_radix_init(NULL, 0, bl->bl_radix,
165 bl->bl_skip, blocks);
166 bl->bl_root = kmalloc(sizeof(almeta_t) * bl->bl_rootblks,
167 mtype, M_WAITOK);
169 #if defined(ALIST_DEBUG)
170 kprintf(
171 "ALIST representing %d blocks (%d MB of swap)"
172 ", requiring %dK (%d bytes) of ram\n",
173 bl->bl_blocks,
174 bl->bl_blocks * 4 / 1024,
175 (bl->bl_rootblks * sizeof(almeta_t) + 1023) / 1024,
176 (bl->bl_rootblks * sizeof(almeta_t))
178 kprintf("ALIST raw radix tree contains %d records\n", bl->bl_rootblks);
179 #endif
180 alst_radix_init(bl->bl_root, 0, bl->bl_radix, bl->bl_skip, blocks);
182 return(bl);
185 void
186 alist_init(alist_t bl, alist_blk_t blocks,
187 almeta_t *records, alist_blk_t nrecords)
189 alist_blk_t radix;
190 alist_blk_t skip = 0;
193 * Calculate radix and skip field used for scanning.
195 radix = ALIST_BMAP_RADIX;
197 while (radix < blocks) {
198 radix *= ALIST_META_RADIX;
199 skip = (skip + 1) * ALIST_META_RADIX;
201 bzero(bl, sizeof(*bl));
203 bl->bl_blocks = blocks;
204 bl->bl_radix = radix;
205 bl->bl_skip = skip;
206 bl->bl_rootblks = 1 + alst_radix_init(NULL, 0, bl->bl_radix,
207 bl->bl_skip, blocks);
208 KKASSERT(bl->bl_rootblks <= nrecords);
209 bl->bl_root = records;
211 #if defined(ALIST_DEBUG)
212 kprintf(
213 "ALIST representing %d blocks (%d MB of swap)"
214 ", requiring %dK (%d bytes) of ram\n",
215 bl->bl_blocks,
216 bl->bl_blocks * 4 / 1024,
217 (bl->bl_rootblks * sizeof(almeta_t) + 1023) / 1024,
218 (bl->bl_rootblks * sizeof(almeta_t))
220 kprintf("ALIST raw radix tree contains %d records\n", bl->bl_rootblks);
221 #endif
222 alst_radix_init(bl->bl_root, 0, bl->bl_radix, bl->bl_skip, blocks);
225 void
226 alist_destroy(alist_t bl, struct malloc_type *mtype)
228 kfree(bl->bl_root, mtype);
229 kfree(bl, mtype);
233 * Reserve space in the block bitmap. Return the base of a contiguous
234 * region or ALIST_BLOCK_NONE if space could not be allocated.
236 * This nominally allocates a power-of-2 number of blocks. However,
237 * non-powers of 2 are accepted and implemented by first allocating
238 * the nearest power of 2 and then freeing the remainder.
240 alist_blk_t
241 alist_alloc(alist_t bl, alist_blk_t start, alist_blk_t count)
243 alist_blk_t blk = ALIST_BLOCK_NONE;
246 * Check non power-of-2
248 KKASSERT(count);
249 if ((count | (count - 1)) != (count << 1) - 1) {
250 alist_blk_t ncount = (count < 256) ? 1 : 256;
251 while (ncount < count)
252 ncount <<= 1;
253 blk = alist_alloc(bl, start, ncount);
254 if (blk != ALIST_BLOCK_NONE)
255 alist_free(bl, blk + count, ncount - count);
256 return (blk);
260 * Power of 2
262 if (bl && count < bl->bl_radix) {
263 if (bl->bl_radix == ALIST_BMAP_RADIX) {
264 blk = alst_leaf_alloc(bl->bl_root, 0, start, count);
265 } else {
266 blk = alst_meta_alloc(bl->bl_root, 0, start, count,
267 bl->bl_radix, bl->bl_skip);
269 if (blk != ALIST_BLOCK_NONE)
270 bl->bl_free -= count;
272 return(blk);
276 * Free up space in the block bitmap. The starting block and count do not
277 * need to be power-of-2 aligned. The related blocks must be in an allocated
278 * state.
280 void
281 alist_free(alist_t bl, alist_blk_t blkno, alist_blk_t count)
283 if (bl) {
284 KKASSERT(blkno + count <= bl->bl_blocks);
285 if (bl->bl_radix == ALIST_BMAP_RADIX) {
286 alst_leaf_free(bl->bl_root, blkno, count);
287 } else {
288 alst_meta_free(bl->bl_root, blkno, count,
289 bl->bl_radix, bl->bl_skip, 0);
291 bl->bl_free += count;
296 * Returns the current total number of free blocks and the
297 * approximate trailing largest contiguous free block available.
299 alist_blk_t
300 alist_free_info(alist_t bl, alist_blk_t *startp, alist_blk_t *countp)
302 alist_blk_t radix = bl->bl_radix;
303 alist_blk_t skip = bl->bl_skip;
304 alist_blk_t next_skip;
305 alist_blk_t i;
306 alist_bmap_t mask;
307 almeta_t *scan = bl->bl_root;
309 *startp = 0;
310 *countp = 0;
312 while (radix != ALIST_BMAP_RADIX) {
313 radix /= ALIST_META_RADIX;
314 next_skip = skip / ALIST_META_RADIX;
317 * Find the biggest fully allocated chunk.
319 for (i = ALIST_META_RADIX - 1; i != ALIST_BLOCK_NONE; --i) {
320 mask = (scan->bm_bitmap >> (i * 2)) & 3;
321 if (mask == 0) {
323 * All allocated, continue the loop
325 continue;
327 if (mask == 1) {
329 * Partially allocated, push into this guy
331 break;
333 if (mask == 2) {
335 * Unknown state
337 return(bl->bl_free);
340 * All free, we can return the chunk.
342 *startp += i * radix;
343 *countp = radix;
344 return(bl->bl_free);
348 * If we failed to find anything stop here, otherwise push
349 * in.
351 if (i == ALIST_BLOCK_NONE)
352 return(bl->bl_free);
353 *startp += i * radix;
354 scan += 1 + next_skip * i;
355 skip = next_skip - 1;
359 * If we got all the way down to a leaf node locate the last block,
360 * power-of-2 aligned and power-of-2 sized. Well, the easiest way
361 * to deal with this is to just return 1 block.
363 if (radix == ALIST_BMAP_RADIX) {
364 mask = scan->bm_bitmap;
365 for (i = ALIST_BMAP_RADIX - 1; i != ALIST_BLOCK_NONE; --i) {
366 if ((mask & ((alist_bmap_t)1U << i)))
367 break;
371 * did not find free entry
373 if (i == ALIST_BLOCK_NONE)
374 return(bl->bl_free);
377 * Return one block.
379 *startp += i;
380 *countp = 1;
381 return(bl->bl_free);
383 return(bl->bl_free);
386 #ifdef ALIST_DEBUG
389 * alist_print() - dump radix tree
392 void
393 alist_print(alist_t bl)
395 kprintf("ALIST {\n");
396 alst_radix_print(bl->bl_root, 0, bl->bl_radix, bl->bl_skip, 4);
397 kprintf("}\n");
400 #endif
402 /************************************************************************
403 * ALLOCATION SUPPORT FUNCTIONS *
404 ************************************************************************
406 * These support functions do all the actual work. They may seem
407 * rather longish, but that's because I've commented them up. The
408 * actual code is straight forward.
413 * alist_leaf_alloc() - allocate at a leaf in the radix tree (a bitmap).
415 * This is the core of the allocator and is optimized for the 1 block
416 * and the ALIST_BMAP_RADIX block allocation cases. Other cases are
417 * somewhat slower. The 1 block allocation case is log2 and extremely
418 * quick.
420 * mask bit is 0 allocated, not available
421 * mask bit is 1 free, available for allocation
424 static alist_blk_t
425 alst_leaf_alloc(almeta_t *scan, alist_blk_t blk, alist_blk_t start,
426 alist_blk_t count)
428 alist_bmap_t orig = scan->bm_bitmap;
431 * Allocate only beyond the start point. Mask to 0 the low bits
432 * below start. If start == blk no bits get cleared so don't
433 * bother.
435 if (start >= blk + ALIST_BMAP_RADIX)
436 return(ALIST_BLOCK_NONE);
438 if (start > blk && start < blk + ALIST_BMAP_RADIX)
439 orig &= ~(((alist_bmap_t)1U << (start - blk)) - 1);
441 start &= ALIST_BMAP_RADIX - 1;
444 * Optimize bitmap all-allocated case. Also, count = 1
445 * case assumes at least 1 bit is free in the bitmap, so
446 * we have to take care of this case here.
448 if (orig == 0) {
449 if (start <= blk)
450 scan->bm_bighint = 0;
451 return(ALIST_BLOCK_NONE);
455 * Optimized code to allocate one bit out of the bitmap
457 if (count == 1) {
458 alist_bmap_t mask;
459 alist_blk_t j = ALIST_BMAP_RADIX/2;
460 alist_blk_t r = 0;
462 mask = (alist_bmap_t)-1 >> (ALIST_BMAP_RADIX/2);
464 while (j) {
465 if ((orig & mask) == 0) {
466 r += j;
467 orig >>= j;
469 j >>= 1;
470 mask >>= j;
472 scan->bm_bitmap &= ~(1 << r);
473 return(blk + r);
477 * non-optimized code to allocate N bits out of the bitmap.
478 * The more bits, the faster the code runs. It will run
479 * the slowest allocating 2 bits, but since there aren't any
480 * memory ops in the core loop (or shouldn't be, anyway),
481 * you probably won't notice the difference.
483 * Similar to the blist case, the alist code also requires
484 * allocations to be power-of-2 sized and aligned to the
485 * size of the allocation, which simplifies the algorithm.
488 alist_blk_t j;
489 alist_blk_t n = ALIST_BMAP_RADIX - count;
490 alist_bmap_t mask;
492 mask = (alist_bmap_t)-1 >> n;
494 for (j = 0; j <= n; j += count) {
495 if ((orig & mask) == mask) {
496 scan->bm_bitmap &= ~mask;
497 return(blk + j);
499 mask = mask << count;
504 * We couldn't allocate count in this subtree, update bighint
505 * if we were able to check the entire node.
507 if (start <= blk)
508 scan->bm_bighint = count - 1;
509 return(ALIST_BLOCK_NONE);
513 * Attempt to allocate at a meta node. If we can't, we update
514 * bighint and return a failure. Updating bighint optimize future
515 * calls that hit this node. We have to check for our collapse cases
516 * and we have a few optimizations strewn in as well.
518 static alist_blk_t
519 alst_meta_alloc(almeta_t *scan, alist_blk_t blk, alist_blk_t start,
520 alist_blk_t count, alist_blk_t radix, alist_blk_t skip)
522 alist_blk_t i;
523 alist_bmap_t mask;
524 alist_bmap_t pmask;
525 alist_blk_t next_skip = ((u_int)skip / ALIST_META_RADIX);
526 alist_blk_t orig_blk;
529 * ALL-ALLOCATED special case
531 if (scan->bm_bitmap == 0) {
532 scan->bm_bighint = 0;
533 return(ALIST_BLOCK_NONE);
536 radix /= ALIST_META_RADIX;
539 * Radix now represents each bitmap entry for this meta node. If
540 * the number of blocks being allocated can be fully represented,
541 * we allocate directly out of this meta node.
543 * Meta node bitmaps use 2 bits per block.
545 * 00 ALL-ALLOCATED
546 * 01 PARTIALLY-FREE/PARTIALLY-ALLOCATED
547 * 10 (RESERVED)
548 * 11 ALL-FREE
550 if (count >= radix) {
551 alist_blk_t n = count / radix * 2; /* number of bits */
552 alist_blk_t j;
554 mask = (alist_bmap_t)-1 >> (ALIST_BMAP_RADIX - n);
555 for (j = 0; j < ALIST_META_RADIX; j += n / 2) {
556 if ((scan->bm_bitmap & mask) == mask &&
557 blk + j * radix >= start) {
558 scan->bm_bitmap &= ~mask;
559 return(blk + j * radix);
561 mask <<= n;
563 if (scan->bm_bighint >= count && start <= blk)
564 scan->bm_bighint = count >> 1;
565 return(ALIST_BLOCK_NONE);
569 * If not we have to recurse.
571 mask = 0x00000003;
572 pmask = 0x00000001;
573 orig_blk = blk;
574 for (i = 1; i <= skip; i += next_skip) {
575 if (scan[i].bm_bighint == (alist_blk_t)-1) {
577 * Terminator
579 break;
583 * If the element is marked completely free (11), initialize
584 * the recursion.
586 if ((scan->bm_bitmap & mask) == mask) {
587 scan[i].bm_bitmap = (alist_bmap_t)-1;
588 scan[i].bm_bighint = radix;
591 if ((scan->bm_bitmap & mask) == 0) {
593 * Object marked completely allocated, recursion
594 * contains garbage.
596 /* Skip it */
597 } else if (blk + radix <= start) {
599 * Object does not contain or is not beyond our
600 * start point.
602 /* Skip it */
603 } else if (count <= scan[i].bm_bighint) {
605 * count fits in object. If successful and the
606 * deeper level becomes all allocated, mark our
607 * level as all-allocated.
609 alist_blk_t r;
610 if (next_skip == 1) {
611 r = alst_leaf_alloc(&scan[i], blk, start,
612 count);
613 } else {
614 r = alst_meta_alloc(&scan[i], blk, start,
615 count,
616 radix, next_skip - 1);
618 if (r != ALIST_BLOCK_NONE) {
619 if (scan[i].bm_bitmap == 0) {
620 scan->bm_bitmap &= ~mask;
621 } else {
622 scan->bm_bitmap &= ~mask;
623 scan->bm_bitmap |= pmask;
625 return(r);
628 blk += radix;
629 mask <<= 2;
630 pmask <<= 2;
634 * We couldn't allocate count in this subtree, update bighint
635 * if we were able to check the entire node.
637 if (scan->bm_bighint >= count && start <= orig_blk)
638 scan->bm_bighint = count >> 1;
639 return(ALIST_BLOCK_NONE);
643 * Free allocated block from leaf bitmap
645 static void
646 alst_leaf_free(almeta_t *scan, alist_blk_t blk, alist_blk_t count)
649 * free some data in this bitmap
651 * e.g.
652 * 0000111111111110000
653 * \_________/\__/
654 * v n
656 alist_blk_t n = blk & (ALIST_BMAP_RADIX - 1);
657 alist_bmap_t mask;
659 mask = ((alist_bmap_t)-1 << n) &
660 ((alist_bmap_t)-1 >> (ALIST_BMAP_RADIX - count - n));
662 if (scan->bm_bitmap & mask)
663 panic("%s: freeing free block", __func__);
664 scan->bm_bitmap |= mask;
667 * We could probably do a better job here. We are required to make
668 * bighint at least as large as the biggest contiguous block of
669 * data. If we just shoehorn it, a little extra overhead will
670 * be incured on the next allocation (but only that one typically).
672 scan->bm_bighint = ALIST_BMAP_RADIX;
676 * Free allocated blocks from radix tree meta info. This support routine
677 * frees a range of blocks from the bitmap. The range must be entirely
678 * enclosed by this radix node. If a meta node, we break the range down
679 * recursively to free blocks in subnodes (which means that this code
680 * can free an arbitrary range whereas the allocation code cannot allocate
681 * an arbitrary range).
683 static void
684 alst_meta_free(almeta_t *scan, alist_blk_t freeBlk, alist_blk_t count,
685 alist_blk_t radix, alist_blk_t skip, alist_blk_t blk)
687 alist_blk_t next_skip = ((u_int)skip / ALIST_META_RADIX);
688 alist_bmap_t mask;
689 alist_bmap_t pmask;
690 alist_blk_t i;
693 * Break the free down into its components. Because it is so easy
694 * to implement, frees are not limited to power-of-2 sizes.
696 * Each block in a meta-node bitmap takes two bits.
698 radix /= ALIST_META_RADIX;
700 i = (freeBlk - blk) / radix;
701 blk += i * radix;
702 mask = 0x00000003 << (i * 2);
703 pmask = 0x00000001 << (i * 2);
705 i = i * next_skip + 1;
707 while (i <= skip && blk < freeBlk + count) {
708 alist_blk_t v;
710 v = blk + radix - freeBlk;
711 if (v > count)
712 v = count;
714 if (scan->bm_bighint == (alist_blk_t)-1)
715 panic("%s: freeing unexpected range", __func__);
718 * WARNING on bighint updates. When we free an element in
719 * a chunk if the chunk becomes wholely free it is possible
720 * that the whole node is now free, so bighint must be set
721 * to cover the whole node. Otherwise address-specific
722 * allocations may fail.
724 * We don't bother figuring out how much of the node is
725 * actually free in this case.
727 if (freeBlk == blk && count >= radix) {
729 * The area being freed covers the entire block,
730 * assert that we are marked all-allocated and
731 * then mark it all-free.
733 KKASSERT((scan->bm_bitmap & mask) == 0);
734 scan->bm_bitmap |= mask;
735 scan->bm_bighint = radix * ALIST_META_RADIX;
736 } else {
738 * If we were previously marked all-allocated, fix-up
739 * the next layer so we can recurse down into it.
741 if ((scan->bm_bitmap & mask) == 0) {
742 scan[i].bm_bitmap = (alist_bmap_t)0;
743 scan[i].bm_bighint = 0;
747 * Recursion case, then either mark all-free or
748 * partially free.
750 if (next_skip == 1) {
751 alst_leaf_free(&scan[i], freeBlk, v);
752 } else {
753 alst_meta_free(&scan[i], freeBlk, v,
754 radix, next_skip - 1, blk);
756 if (scan[i].bm_bitmap == (alist_bmap_t)-1) {
757 scan->bm_bitmap |= mask;
758 scan->bm_bighint = radix * ALIST_META_RADIX;
759 } else {
760 scan->bm_bitmap &= ~mask;
761 scan->bm_bitmap |= pmask;
762 if (scan->bm_bighint < scan[i].bm_bighint)
763 scan->bm_bighint = scan[i].bm_bighint;
766 mask <<= 2;
767 pmask <<= 2;
768 count -= v;
769 freeBlk += v;
770 blk += radix;
771 i += next_skip;
776 * Initialize our meta structures and bitmaps and calculate the exact
777 * amount of space required to manage 'count' blocks - this space may
778 * be considerably less then the calculated radix due to the large
779 * RADIX values we use.
781 static alist_blk_t
782 alst_radix_init(almeta_t *scan, alist_blk_t blk, alist_blk_t radix,
783 alist_blk_t skip, alist_blk_t count)
785 alist_blk_t i;
786 alist_blk_t next_skip;
787 alist_bmap_t mask;
788 alist_bmap_t pmask;
789 alist_blk_t memindex;
792 * Leaf node
794 if (radix == ALIST_BMAP_RADIX) {
795 if (scan) {
796 scan->bm_bighint = 0;
797 scan->bm_bitmap = 0;
799 return(0);
803 * Meta node. If allocating the entire object we can special
804 * case it. However, we need to figure out how much memory
805 * is required to manage 'count' blocks, so we continue on anyway.
807 if (scan) {
808 scan->bm_bighint = 0;
809 scan->bm_bitmap = 0;
811 memindex = 0;
813 radix /= ALIST_META_RADIX;
814 next_skip = skip / ALIST_META_RADIX;
815 mask = 0x00000003;
816 pmask = 0x00000001;
818 for (i = 1; i <= skip; i += next_skip) {
819 if (count >= blk + radix) {
821 * Allocate the entire object
823 memindex += alst_radix_init(((scan) ? &scan[i] : NULL),
824 blk, radix,
825 next_skip - 1, count);
826 /* already marked as wholely allocated */
827 } else if (count > blk) {
829 * Allocate a partial object, well it's really
830 * all-allocated, we just aren't allowed to
831 * free the whole thing.
833 memindex += alst_radix_init(((scan) ? &scan[i] : NULL),
834 blk, radix,
835 next_skip - 1, count);
836 /* already marked as wholely allocated */
837 } else {
839 * Add terminator but continue the loop. Populate
840 * all terminators.
842 if (scan) {
843 scan[i].bm_bighint = (alist_blk_t)-1;
844 scan[i].bm_bitmap = 0;
846 /* already marked as wholely allocated */
848 mask <<= 2;
849 pmask <<= 2;
850 blk += radix;
852 memindex += ALIST_META_RADIX;
853 return(memindex);
856 #ifdef ALIST_DEBUG
858 static void
859 alst_radix_print(almeta_t *scan, alist_blk_t blk, alist_blk_t radix,
860 alist_blk_t skip, int tab)
862 alist_blk_t i;
863 alist_blk_t next_skip;
864 alist_bmap_t mask;
866 if (radix == ALIST_BMAP_RADIX) {
867 kprintf(
868 "%*.*s(%04x,%d): bitmap %08x big=%d\n",
869 tab, tab, "",
870 blk, radix,
871 scan->bm_bitmap,
872 scan->bm_bighint
874 return;
877 if (scan->bm_bitmap == 0) {
878 kprintf(
879 "%*.*s(%04x,%d) ALL ALLOCATED\n",
880 tab, tab, "",
881 blk,
882 radix
884 return;
886 if (scan->bm_bitmap == (alist_bmap_t)-1) {
887 kprintf(
888 "%*.*s(%04x,%d) ALL FREE\n",
889 tab, tab, "",
890 blk,
891 radix
893 return;
896 kprintf(
897 "%*.*s(%04x,%d): subtree (%d) bitmap=%08x big=%d {\n",
898 tab, tab, "",
899 blk, radix,
900 radix,
901 scan->bm_bitmap,
902 scan->bm_bighint
905 radix /= ALIST_META_RADIX;
906 next_skip = skip / ALIST_META_RADIX;
907 tab += 4;
908 mask = 0x00000003;
910 for (i = 1; i <= skip; i += next_skip) {
911 if (scan[i].bm_bighint == (alist_blk_t)-1) {
912 kprintf(
913 "%*.*s(%04x,%d): Terminator\n",
914 tab, tab, "",
915 blk, radix
917 break;
919 if ((scan->bm_bitmap & mask) == mask) {
920 kprintf(
921 "%*.*s(%04x,%d): ALL FREE\n",
922 tab, tab, "",
923 blk, radix
925 } else if ((scan->bm_bitmap & mask) == 0) {
926 kprintf(
927 "%*.*s(%04x,%d): ALL ALLOCATED\n",
928 tab, tab, "",
929 blk, radix
931 } else {
932 alst_radix_print(
933 &scan[i],
934 blk,
935 radix,
936 next_skip - 1,
940 blk += radix;
941 mask <<= 2;
943 tab -= 4;
945 kprintf("%*.*s}\n", tab, tab, "");
948 #endif
950 #ifdef ALIST_DEBUG
953 main(int ac, char **av)
955 alist_blk_t size = 1024;
956 alist_blk_t da = 0;
957 alist_blk_t count = 0;
958 alist_t bl;
959 int i;
961 for (i = 1; i < ac; ++i) {
962 const char *ptr = av[i];
963 if (*ptr != '-') {
964 size = strtol(ptr, NULL, 0);
965 continue;
967 ptr += 2;
968 fprintf(stderr, "Bad option: %s\n", ptr - 2);
969 exit(1);
971 bl = alist_create(size, NULL);
972 alist_free(bl, 0, size);
974 for (;;) {
975 char buf[1024];
976 alist_blk_t bfree;
979 kprintf("%d/%d/%d> ", bl->bl_free, size, bl->bl_radix);
980 fflush(stdout);
981 if (fgets(buf, sizeof(buf), stdin) == NULL)
982 break;
983 switch(buf[0]) {
984 case 'p':
985 alist_print(bl);
986 break;
987 case 'a':
988 if (sscanf(buf + 1, "%x %d", &da, &count) == 2) {
989 da = alist_alloc(bl, da, count);
990 kprintf(" R=%04x\n", da);
991 } else if (sscanf(buf + 1, "%d", &count) == 1) {
992 da = alist_alloc(bl, 0, count);
993 kprintf(" R=%04x\n", da);
994 } else if (count) {
995 kprintf("alloc 0x%04x/%d\n", da, count);
996 alist_blk_t blk = alist_alloc(bl, da, count);
997 kprintf(" R=%04x\n", blk);
998 } else {
999 kprintf("?\n");
1001 break;
1002 case 'f':
1003 if (sscanf(buf + 1, "%x %d", &da, &count) == 2) {
1004 alist_free(bl, da, count);
1005 } else if (count) {
1006 kprintf("free 0x%04x/%d\n", da, count);
1007 alist_free(bl, da, count);
1008 } else {
1009 kprintf("?\n");
1011 break;
1012 case '?':
1013 case 'h':
1014 puts("p -print\n"
1015 "a %d -allocate\n"
1016 "f %x %d -free\n"
1017 "h/? -help");
1018 break;
1019 case 'i':
1020 bfree = alist_free_info(bl, &da, &count);
1021 kprintf("info: %d free trailing: 0x%04x/%d\n",
1022 bfree, da, count);
1023 break;
1024 default:
1025 kprintf("?\n");
1026 break;
1029 return(0);
1032 void
1033 panic(const char *ctl, ...)
1035 __va_list va;
1037 __va_start(va, ctl);
1038 vfprintf(stderr, ctl, va);
1039 fprintf(stderr, "\n");
1040 __va_end(va);
1041 exit(1);
1044 #endif