hammer2 - Implement meta-data statistics rollup
[dragonfly.git] / sys / vfs / hammer2 / hammer2_freemap.c
blob633ce84c0904b11d255540f645e54b2c253a2b8c
1 /*
2 * Copyright (c) 2011-2014 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/fcntl.h>
39 #include <sys/buf.h>
40 #include <sys/proc.h>
41 #include <sys/namei.h>
42 #include <sys/mount.h>
43 #include <sys/vnode.h>
44 #include <sys/mountctl.h>
46 #include "hammer2.h"
48 #define FREEMAP_DEBUG 0
50 struct hammer2_fiterate {
51 hammer2_off_t bpref;
52 hammer2_off_t bnext;
53 int loops;
56 typedef struct hammer2_fiterate hammer2_fiterate_t;
58 static int hammer2_freemap_try_alloc(hammer2_trans_t *trans,
59 hammer2_chain_t **parentp, hammer2_blockref_t *bref,
60 int radix, hammer2_fiterate_t *iter);
61 static void hammer2_freemap_init(hammer2_trans_t *trans, hammer2_mount_t *hmp,
62 hammer2_key_t key, hammer2_chain_t *chain);
63 static int hammer2_bmap_alloc(hammer2_trans_t *trans, hammer2_mount_t *hmp,
64 hammer2_bmap_data_t *bmap, uint16_t class,
65 int n, int radix, hammer2_key_t *basep);
66 static int hammer2_freemap_iterate(hammer2_trans_t *trans,
67 hammer2_chain_t **parentp, hammer2_chain_t **chainp,
68 hammer2_fiterate_t *iter);
70 static __inline
71 int
72 hammer2_freemapradix(int radix)
74 return(radix);
78 * Calculate the device offset for the specified FREEMAP_NODE or FREEMAP_LEAF
79 * bref. Return a combined media offset and physical size radix. Freemap
80 * chains use fixed storage offsets in the 4MB reserved area at the
81 * beginning of each 2GB zone
83 * Rotate between four possibilities. Theoretically this means we have three
84 * good freemaps in case of a crash which we can use as a base for the fixup
85 * scan at mount-time.
87 #define H2FMBASE(key, radix) ((key) & ~(((hammer2_off_t)1 << (radix)) - 1))
88 #define H2FMSHIFT(radix) ((hammer2_off_t)1 << (radix))
90 static
91 int
92 hammer2_freemap_reserve(hammer2_trans_t *trans, hammer2_chain_t *chain,
93 int radix)
95 hammer2_blockref_t *bref = &chain->bref;
96 hammer2_off_t off;
97 int index;
98 size_t bytes;
101 * Physical allocation size -> radix. Typically either 256 for
102 * a level 0 freemap leaf or 65536 for a level N freemap node.
104 * NOTE: A 256 byte bitmap represents 256 x 8 x 1024 = 2MB of storage.
105 * Do not use hammer2_allocsize() here as it has a min cap.
107 bytes = 1 << radix;
110 * Calculate block selection index 0..7 of current block.
112 if ((bref->data_off & ~HAMMER2_OFF_MASK_RADIX) == 0) {
113 index = 0;
114 } else {
115 off = bref->data_off & ~HAMMER2_OFF_MASK_RADIX &
116 (((hammer2_off_t)1 << HAMMER2_FREEMAP_LEVEL1_RADIX) - 1);
117 off = off / HAMMER2_PBUFSIZE;
118 KKASSERT(off >= HAMMER2_ZONE_FREEMAP_00 &&
119 off < HAMMER2_ZONE_FREEMAP_END);
120 index = (int)(off - HAMMER2_ZONE_FREEMAP_00) / 4;
121 KKASSERT(index >= 0 && index < HAMMER2_ZONE_FREEMAP_COPIES);
125 * Calculate new index (our 'allocation').
127 index = (index + 1) % HAMMER2_ZONE_FREEMAP_COPIES;
130 * Calculate the block offset of the reserved block. This will
131 * point into the 4MB reserved area at the base of the appropriate
132 * 2GB zone, once added to the FREEMAP_x selection above.
134 switch(bref->keybits) {
135 /* case HAMMER2_FREEMAP_LEVEL5_RADIX: not applicable */
136 case HAMMER2_FREEMAP_LEVEL4_RADIX: /* 2EB */
137 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE);
138 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
139 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL4_RADIX) +
140 (index * 4 + HAMMER2_ZONE_FREEMAP_00 +
141 HAMMER2_ZONEFM_LEVEL4) * HAMMER2_PBUFSIZE;
142 break;
143 case HAMMER2_FREEMAP_LEVEL3_RADIX: /* 2PB */
144 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE);
145 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
146 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL3_RADIX) +
147 (index * 4 + HAMMER2_ZONE_FREEMAP_00 +
148 HAMMER2_ZONEFM_LEVEL3) * HAMMER2_PBUFSIZE;
149 break;
150 case HAMMER2_FREEMAP_LEVEL2_RADIX: /* 2TB */
151 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE);
152 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
153 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL2_RADIX) +
154 (index * 4 + HAMMER2_ZONE_FREEMAP_00 +
155 HAMMER2_ZONEFM_LEVEL2) * HAMMER2_PBUFSIZE;
156 break;
157 case HAMMER2_FREEMAP_LEVEL1_RADIX: /* 2GB */
158 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_LEAF);
159 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
160 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL1_RADIX) +
161 (index * 4 + HAMMER2_ZONE_FREEMAP_00 +
162 HAMMER2_ZONEFM_LEVEL1) * HAMMER2_PBUFSIZE;
163 break;
164 default:
165 panic("freemap: bad radix(2) %p %d\n", bref, bref->keybits);
166 /* NOT REACHED */
167 off = (hammer2_off_t)-1;
168 break;
170 bref->data_off = off | radix;
171 #if FREEMAP_DEBUG
172 kprintf("FREEMAP BLOCK TYPE %d %016jx/%d DATA_OFF=%016jx\n",
173 bref->type, bref->key, bref->keybits, bref->data_off);
174 #endif
175 return (0);
179 * Normal freemap allocator
181 * Use available hints to allocate space using the freemap. Create missing
182 * freemap infrastructure on-the-fly as needed (including marking initial
183 * allocations using the iterator as allocated, instantiating new 2GB zones,
184 * and dealing with the end-of-media edge case).
186 * ip and bpref are only used as a heuristic to determine locality of
187 * reference. bref->key may also be used heuristically.
190 hammer2_freemap_alloc(hammer2_trans_t *trans, hammer2_chain_t *chain,
191 size_t bytes)
193 hammer2_mount_t *hmp = chain->hmp;
194 hammer2_blockref_t *bref = &chain->bref;
195 hammer2_chain_t *parent;
196 int radix;
197 int error;
198 unsigned int hindex;
199 hammer2_fiterate_t iter;
202 * Validate the allocation size. It must be a power of 2.
204 * For now require that the caller be aware of the minimum
205 * allocation (1K).
207 radix = hammer2_getradix(bytes);
208 KKASSERT((size_t)1 << radix == bytes);
210 if (bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
211 bref->type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
213 * Freemap blocks themselves are assigned from the reserve
214 * area, not allocated from the freemap.
216 error = hammer2_freemap_reserve(trans, chain, radix);
217 return error;
220 KKASSERT(bytes >= HAMMER2_ALLOC_MIN && bytes <= HAMMER2_ALLOC_MAX);
222 if (trans->flags & (HAMMER2_TRANS_ISFLUSH | HAMMER2_TRANS_PREFLUSH))
223 ++trans->sync_xid;
226 * Calculate the starting point for our allocation search.
228 * Each freemap leaf is dedicated to a specific freemap_radix.
229 * The freemap_radix can be more fine-grained than the device buffer
230 * radix which results in inodes being grouped together in their
231 * own segment, terminal-data (16K or less) and initial indirect
232 * block being grouped together, and then full-indirect and full-data
233 * blocks (64K) being grouped together.
235 * The single most important aspect of this is the inode grouping
236 * because that is what allows 'find' and 'ls' and other filesystem
237 * topology operations to run fast.
239 #if 0
240 if (bref->data_off & ~HAMMER2_OFF_MASK_RADIX)
241 bpref = bref->data_off & ~HAMMER2_OFF_MASK_RADIX;
242 else if (trans->tmp_bpref)
243 bpref = trans->tmp_bpref;
244 else if (trans->tmp_ip)
245 bpref = trans->tmp_ip->chain->bref.data_off;
246 else
247 #endif
249 * Heuristic tracking index. We would like one for each distinct
250 * bref type if possible. heur_freemap[] has room for two classes
251 * for each type. At a minimum we have to break-up our heuristic
252 * by device block sizes.
254 hindex = hammer2_devblkradix(radix) - HAMMER2_MINIORADIX;
255 KKASSERT(hindex < HAMMER2_FREEMAP_HEUR_NRADIX);
256 hindex += bref->type * HAMMER2_FREEMAP_HEUR_NRADIX;
257 hindex &= HAMMER2_FREEMAP_HEUR_TYPES * HAMMER2_FREEMAP_HEUR_NRADIX - 1;
258 KKASSERT(hindex < HAMMER2_FREEMAP_HEUR);
260 iter.bpref = hmp->heur_freemap[hindex];
263 * Make sure bpref is in-bounds. It's ok if bpref covers a zone's
264 * reserved area, the try code will iterate past it.
266 if (iter.bpref > hmp->voldata.volu_size)
267 iter.bpref = hmp->voldata.volu_size - 1;
270 * Iterate the freemap looking for free space before and after.
272 parent = &hmp->fchain;
273 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
274 error = EAGAIN;
275 iter.bnext = iter.bpref;
276 iter.loops = 0;
278 while (error == EAGAIN) {
279 error = hammer2_freemap_try_alloc(trans, &parent, bref,
280 radix, &iter);
282 hmp->heur_freemap[hindex] = iter.bnext;
283 hammer2_chain_unlock(parent);
285 if (trans->flags & (HAMMER2_TRANS_ISFLUSH | HAMMER2_TRANS_PREFLUSH))
286 --trans->sync_xid;
288 return (error);
291 static int
292 hammer2_freemap_try_alloc(hammer2_trans_t *trans, hammer2_chain_t **parentp,
293 hammer2_blockref_t *bref, int radix,
294 hammer2_fiterate_t *iter)
296 hammer2_mount_t *hmp = (*parentp)->hmp;
297 hammer2_off_t l0size;
298 hammer2_off_t l1size;
299 hammer2_off_t l1mask;
300 hammer2_key_t key_dummy;
301 hammer2_chain_t *chain;
302 hammer2_off_t key;
303 size_t bytes;
304 uint16_t class;
305 int error = 0;
306 int cache_index = -1;
307 int ddflag;
311 * Calculate the number of bytes being allocated, the number
312 * of contiguous bits of bitmap being allocated, and the bitmap
313 * mask.
315 * WARNING! cpu hardware may mask bits == 64 -> 0 and blow up the
316 * mask calculation.
318 bytes = (size_t)1 << radix;
319 class = (bref->type << 8) | hammer2_devblkradix(radix);
322 * Lookup the level1 freemap chain, creating and initializing one
323 * if necessary. Intermediate levels will be created automatically
324 * when necessary by hammer2_chain_create().
326 key = H2FMBASE(iter->bnext, HAMMER2_FREEMAP_LEVEL1_RADIX);
327 l0size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX);
328 l1size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX);
329 l1mask = l1size - 1;
331 chain = hammer2_chain_lookup(parentp, &key_dummy, key, key + l1mask,
332 &cache_index,
333 HAMMER2_LOOKUP_ALWAYS |
334 HAMMER2_LOOKUP_MATCHIND, &ddflag);
336 if (chain == NULL) {
338 * Create the missing leaf, be sure to initialize
339 * the auxillary freemap tracking information in
340 * the bref.check.freemap structure.
342 #if 0
343 kprintf("freemap create L1 @ %016jx bpref %016jx\n",
344 key, iter->bpref);
345 #endif
346 error = hammer2_chain_create(trans, parentp, &chain, hmp->spmp,
347 key, HAMMER2_FREEMAP_LEVEL1_RADIX,
348 HAMMER2_BREF_TYPE_FREEMAP_LEAF,
349 HAMMER2_FREEMAP_LEVELN_PSIZE,
351 KKASSERT(error == 0);
352 if (error == 0) {
353 hammer2_chain_modify(trans, chain, 0);
354 bzero(&chain->data->bmdata[0],
355 HAMMER2_FREEMAP_LEVELN_PSIZE);
356 chain->bref.check.freemap.bigmask = (uint32_t)-1;
357 chain->bref.check.freemap.avail = l1size;
358 /* bref.methods should already be inherited */
360 hammer2_freemap_init(trans, hmp, key, chain);
362 } else if ((chain->bref.check.freemap.bigmask & (1 << radix)) == 0) {
364 * Already flagged as not having enough space
366 error = ENOSPC;
367 } else {
369 * Modify existing chain to setup for adjustment.
371 hammer2_chain_modify(trans, chain, 0);
375 * Scan 2MB entries.
377 if (error == 0) {
378 hammer2_bmap_data_t *bmap;
379 hammer2_key_t base_key;
380 int count;
381 int start;
382 int n;
384 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF);
385 start = (int)((iter->bnext - key) >>
386 HAMMER2_FREEMAP_LEVEL0_RADIX);
387 KKASSERT(start >= 0 && start < HAMMER2_FREEMAP_COUNT);
388 hammer2_chain_modify(trans, chain, 0);
390 error = ENOSPC;
391 for (count = 0; count < HAMMER2_FREEMAP_COUNT; ++count) {
392 if (start + count >= HAMMER2_FREEMAP_COUNT &&
393 start - count < 0) {
394 break;
396 n = start + count;
397 bmap = &chain->data->bmdata[n];
398 if (n < HAMMER2_FREEMAP_COUNT && bmap->avail &&
399 (bmap->class == 0 || bmap->class == class)) {
400 base_key = key + n * l0size;
401 error = hammer2_bmap_alloc(trans, hmp, bmap,
402 class, n, radix,
403 &base_key);
404 if (error != ENOSPC) {
405 key = base_key;
406 break;
409 n = start - count;
410 bmap = &chain->data->bmdata[n];
411 if (n >= 0 && bmap->avail &&
412 (bmap->class == 0 || bmap->class == class)) {
413 base_key = key + n * l0size;
414 error = hammer2_bmap_alloc(trans, hmp, bmap,
415 class, n, radix,
416 &base_key);
417 if (error != ENOSPC) {
418 key = base_key;
419 break;
423 if (error == ENOSPC)
424 chain->bref.check.freemap.bigmask &= ~(1 << radix);
425 /* XXX also scan down from original count */
428 if (error == 0) {
430 * Assert validity. Must be beyond the static allocator used
431 * by newfs_hammer2 (and thus also beyond the aux area),
432 * not go past the volume size, and must not be in the
433 * reserved segment area for a zone.
435 KKASSERT(key >= hmp->voldata.allocator_beg &&
436 key + bytes <= hmp->voldata.volu_size);
437 KKASSERT((key & HAMMER2_ZONE_MASK64) >= HAMMER2_ZONE_SEG);
438 bref->data_off = key | radix;
440 #if 0
441 kprintf("alloc cp=%p %016jx %016jx using %016jx\n",
442 chain,
443 bref->key, bref->data_off, chain->bref.data_off);
444 #endif
445 } else if (error == ENOSPC) {
447 * Return EAGAIN with next iteration in iter->bnext, or
448 * return ENOSPC if the allocation map has been exhausted.
450 error = hammer2_freemap_iterate(trans, parentp, &chain, iter);
454 * Cleanup
456 if (chain)
457 hammer2_chain_unlock(chain);
458 return (error);
462 * Allocate (1<<radix) bytes from the bmap whos base data offset is (*basep).
464 * If the linear iterator is mid-block we use it directly (the bitmap should
465 * already be marked allocated), otherwise we search for a block in the bitmap
466 * that fits the allocation request.
468 * A partial bitmap allocation sets the minimum bitmap granularity (16KB)
469 * to fully allocated and adjusts the linear allocator to allow the
470 * remaining space to be allocated.
472 static
474 hammer2_bmap_alloc(hammer2_trans_t *trans, hammer2_mount_t *hmp,
475 hammer2_bmap_data_t *bmap,
476 uint16_t class, int n, int radix, hammer2_key_t *basep)
478 hammer2_io_t *dio;
479 size_t size;
480 size_t bsize;
481 int bmradix;
482 uint32_t bmmask;
483 int offset;
484 int error;
485 int i;
486 int j;
489 * Take into account 2-bits per block when calculating bmradix.
491 size = (size_t)1 << radix;
493 if (radix <= HAMMER2_FREEMAP_BLOCK_RADIX) {
494 bmradix = 2;
495 bsize = HAMMER2_FREEMAP_BLOCK_SIZE;
496 /* (16K) 2 bits per allocation block */
497 } else {
498 bmradix = 2 << (radix - HAMMER2_FREEMAP_BLOCK_RADIX);
499 bsize = size;
500 /* (32K-256K) 4, 8, 16, 32 bits per allocation block */
504 * Use the linear iterator to pack small allocations, otherwise
505 * fall-back to finding a free 16KB chunk. The linear iterator
506 * is only valid when *NOT* on a freemap chunking boundary (16KB).
507 * If it is the bitmap must be scanned. It can become invalid
508 * once we pack to the boundary. We adjust it after a bitmap
509 * allocation only for sub-16KB allocations (so the perfectly good
510 * previous value can still be used for fragments when 16KB+
511 * allocations are made).
513 * Beware of hardware artifacts when bmradix == 32 (intermediate
514 * result can wind up being '1' instead of '0' if hardware masks
515 * bit-count & 31).
517 * NOTE: j needs to be even in the j= calculation. As an artifact
518 * of the /2 division, our bitmask has to clear bit 0.
520 * NOTE: TODO this can leave little unallocatable fragments lying
521 * around.
523 if (((uint32_t)bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK) + size <=
524 HAMMER2_FREEMAP_BLOCK_SIZE &&
525 (bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK) &&
526 bmap->linear < HAMMER2_SEGSIZE) {
527 KKASSERT(bmap->linear >= 0 &&
528 bmap->linear + size <= HAMMER2_SEGSIZE &&
529 (bmap->linear & (HAMMER2_ALLOC_MIN - 1)) == 0);
530 offset = bmap->linear;
531 i = offset / (HAMMER2_SEGSIZE / 8);
532 j = (offset / (HAMMER2_FREEMAP_BLOCK_SIZE / 2)) & 30;
533 bmmask = (bmradix == 32) ?
534 0xFFFFFFFFU : (1 << bmradix) - 1;
535 bmmask <<= j;
536 bmap->linear = offset + size;
537 } else {
538 for (i = 0; i < 8; ++i) {
539 bmmask = (bmradix == 32) ?
540 0xFFFFFFFFU : (1 << bmradix) - 1;
541 for (j = 0; j < 32; j += bmradix) {
542 if ((bmap->bitmap[i] & bmmask) == 0)
543 goto success;
544 bmmask <<= bmradix;
547 /*fragments might remain*/
548 /*KKASSERT(bmap->avail == 0);*/
549 return (ENOSPC);
550 success:
551 offset = i * (HAMMER2_SEGSIZE / 8) +
552 (j * (HAMMER2_FREEMAP_BLOCK_SIZE / 2));
553 if (size & HAMMER2_FREEMAP_BLOCK_MASK)
554 bmap->linear = offset + size;
557 KKASSERT(i >= 0 && i < 8); /* 8 x 16 -> 128 x 16K -> 2MB */
560 * Optimize the buffer cache to avoid unnecessary read-before-write
561 * operations.
563 * The device block size could be larger than the allocation size
564 * so the actual bitmap test is somewhat more involved. We have
565 * to use a compatible buffer size for this operation.
567 if ((bmap->bitmap[i] & bmmask) == 0 &&
568 hammer2_devblksize(size) != size) {
569 size_t psize = hammer2_devblksize(size);
570 hammer2_off_t pmask = (hammer2_off_t)psize - 1;
571 int pbmradix = 2 << (hammer2_devblkradix(radix) -
572 HAMMER2_FREEMAP_BLOCK_RADIX);
573 uint32_t pbmmask;
574 int pradix = hammer2_getradix(psize);
576 pbmmask = (pbmradix == 32) ? 0xFFFFFFFFU : (1 << pbmradix) - 1;
577 while ((pbmmask & bmmask) == 0)
578 pbmmask <<= pbmradix;
580 #if 0
581 kprintf("%016jx mask %08x %08x %08x (%zd/%zd)\n",
582 *basep + offset, bmap->bitmap[i],
583 pbmmask, bmmask, size, psize);
584 #endif
586 if ((bmap->bitmap[i] & pbmmask) == 0) {
587 error = hammer2_io_newq(hmp,
588 (*basep + (offset & ~pmask)) |
589 pradix,
590 psize, &dio);
591 hammer2_io_bqrelse(&dio);
595 #if 0
597 * When initializing a new inode segment also attempt to initialize
598 * an adjacent segment. Be careful not to index beyond the array
599 * bounds.
601 * We do this to try to localize inode accesses to improve
602 * directory scan rates. XXX doesn't improve scan rates.
604 if (size == HAMMER2_INODE_BYTES) {
605 if (n & 1) {
606 if (bmap[-1].radix == 0 && bmap[-1].avail)
607 bmap[-1].radix = radix;
608 } else {
609 if (bmap[1].radix == 0 && bmap[1].avail)
610 bmap[1].radix = radix;
613 #endif
616 * Adjust the linear iterator, set the radix if necessary (might as
617 * well just set it unconditionally), adjust *basep to return the
618 * allocated data offset.
620 bmap->bitmap[i] |= bmmask;
621 bmap->class = class;
622 bmap->avail -= size;
623 *basep += offset;
625 hammer2_voldata_lock(hmp);
626 hammer2_voldata_modify(hmp);
627 hmp->voldata.allocator_free -= size; /* XXX */
628 hammer2_voldata_unlock(hmp);
630 return(0);
633 static
634 void
635 hammer2_freemap_init(hammer2_trans_t *trans, hammer2_mount_t *hmp,
636 hammer2_key_t key, hammer2_chain_t *chain)
638 hammer2_off_t l1size;
639 hammer2_off_t lokey;
640 hammer2_off_t hikey;
641 hammer2_bmap_data_t *bmap;
642 int count;
644 l1size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX);
647 * Calculate the portion of the 2GB map that should be initialized
648 * as free. Portions below or after will be initialized as allocated.
649 * SEGMASK-align the areas so we don't have to worry about sub-scans
650 * or endianess when using memset.
652 * (1) Ensure that all statically allocated space from newfs_hammer2
653 * is marked allocated.
655 * (2) Ensure that the reserved area is marked allocated (typically
656 * the first 4MB of the 2GB area being represented).
658 * (3) Ensure that any trailing space at the end-of-volume is marked
659 * allocated.
661 * WARNING! It is possible for lokey to be larger than hikey if the
662 * entire 2GB segment is within the static allocation.
664 lokey = (hmp->voldata.allocator_beg + HAMMER2_SEGMASK64) &
665 ~HAMMER2_SEGMASK64;
667 if (lokey < H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX) +
668 HAMMER2_ZONE_SEG64) {
669 lokey = H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX) +
670 HAMMER2_ZONE_SEG64;
673 hikey = key + H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX);
674 if (hikey > hmp->voldata.volu_size) {
675 hikey = hmp->voldata.volu_size & ~HAMMER2_SEGMASK64;
678 chain->bref.check.freemap.avail =
679 H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX);
680 bmap = &chain->data->bmdata[0];
682 for (count = 0; count < HAMMER2_FREEMAP_COUNT; ++count) {
683 if (key < lokey || key >= hikey) {
684 memset(bmap->bitmap, -1,
685 sizeof(bmap->bitmap));
686 bmap->avail = 0;
687 bmap->linear = HAMMER2_SEGSIZE;
688 chain->bref.check.freemap.avail -=
689 H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX);
690 } else {
691 bmap->avail = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX);
693 key += H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX);
694 ++bmap;
699 * The current Level 1 freemap has been exhausted, iterate to the next
700 * one, return ENOSPC if no freemaps remain.
702 * XXX this should rotate back to the beginning to handle freed-up space
703 * XXX or use intermediate entries to locate free space. TODO
705 static int
706 hammer2_freemap_iterate(hammer2_trans_t *trans, hammer2_chain_t **parentp,
707 hammer2_chain_t **chainp, hammer2_fiterate_t *iter)
709 hammer2_mount_t *hmp = (*parentp)->hmp;
711 iter->bnext &= ~(H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX) - 1);
712 iter->bnext += H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX);
713 if (iter->bnext >= hmp->voldata.volu_size) {
714 iter->bnext = 0;
715 if (++iter->loops == 2)
716 return (ENOSPC);
718 return(EAGAIN);
722 * Adjust the bit-pattern for data in the freemap bitmap according to
723 * (how). This code is called from on-mount recovery to fixup (mark
724 * as allocated) blocks whos freemap upates might not have been committed
725 * in the last crash and is used by the bulk freemap scan to stage frees.
727 * XXX currently disabled when how == 0 (the normal real-time case). At
728 * the moment we depend on the bulk freescan to actually free blocks. It
729 * will still call this routine with a non-zero how to stage possible frees
730 * and to do the actual free.
732 void
733 hammer2_freemap_adjust(hammer2_trans_t *trans, hammer2_mount_t *hmp,
734 hammer2_blockref_t *bref, int how)
736 hammer2_off_t data_off = bref->data_off;
737 hammer2_chain_t *chain;
738 hammer2_chain_t *parent;
739 hammer2_bmap_data_t *bmap;
740 hammer2_key_t key;
741 hammer2_key_t key_dummy;
742 hammer2_off_t l0size;
743 hammer2_off_t l1size;
744 hammer2_off_t l1mask;
745 uint32_t *bitmap;
746 const uint32_t bmmask00 = 0;
747 uint32_t bmmask01;
748 uint32_t bmmask10;
749 uint32_t bmmask11;
750 size_t bytes;
751 uint16_t class;
752 int radix;
753 int start;
754 int count;
755 int modified = 0;
756 int cache_index = -1;
757 int error;
758 int ddflag;
760 radix = (int)data_off & HAMMER2_OFF_MASK_RADIX;
761 data_off &= ~HAMMER2_OFF_MASK_RADIX;
762 KKASSERT(radix <= HAMMER2_RADIX_MAX);
764 bytes = (size_t)1 << radix;
765 class = (bref->type << 8) | hammer2_devblkradix(radix);
768 * We can't adjust thre freemap for data allocations made by
769 * newfs_hammer2.
771 if (data_off < hmp->voldata.allocator_beg)
772 return;
774 KKASSERT((data_off & HAMMER2_ZONE_MASK64) >= HAMMER2_ZONE_SEG);
777 * Lookup the level1 freemap chain. The chain must exist.
779 key = H2FMBASE(data_off, HAMMER2_FREEMAP_LEVEL1_RADIX);
780 l0size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX);
781 l1size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX);
782 l1mask = l1size - 1;
784 parent = &hmp->fchain;
785 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
787 chain = hammer2_chain_lookup(&parent, &key_dummy, key, key + l1mask,
788 &cache_index,
789 HAMMER2_LOOKUP_ALWAYS |
790 HAMMER2_LOOKUP_MATCHIND, &ddflag);
793 * Stop early if we are trying to free something but no leaf exists.
795 if (chain == NULL && how != HAMMER2_FREEMAP_DORECOVER) {
796 kprintf("hammer2_freemap_adjust: %016jx: no chain\n",
797 (intmax_t)bref->data_off);
798 goto done;
802 * Create any missing leaf(s) if we are doing a recovery (marking
803 * the block(s) as being allocated instead of being freed). Be sure
804 * to initialize the auxillary freemap tracking info in the
805 * bref.check.freemap structure.
807 if (chain == NULL && how == HAMMER2_FREEMAP_DORECOVER) {
808 error = hammer2_chain_create(trans, &parent, &chain, hmp->spmp,
809 key, HAMMER2_FREEMAP_LEVEL1_RADIX,
810 HAMMER2_BREF_TYPE_FREEMAP_LEAF,
811 HAMMER2_FREEMAP_LEVELN_PSIZE,
814 if (hammer2_debug & 0x0040) {
815 kprintf("fixup create chain %p %016jx:%d\n",
816 chain, chain->bref.key, chain->bref.keybits);
819 if (error == 0) {
820 hammer2_chain_modify(trans, chain, 0);
821 bzero(&chain->data->bmdata[0],
822 HAMMER2_FREEMAP_LEVELN_PSIZE);
823 chain->bref.check.freemap.bigmask = (uint32_t)-1;
824 chain->bref.check.freemap.avail = l1size;
825 /* bref.methods should already be inherited */
827 hammer2_freemap_init(trans, hmp, key, chain);
829 /* XXX handle error */
832 #if FREEMAP_DEBUG
833 kprintf("FREEMAP ADJUST TYPE %d %016jx/%d DATA_OFF=%016jx\n",
834 chain->bref.type, chain->bref.key,
835 chain->bref.keybits, chain->bref.data_off);
836 #endif
839 * Calculate the bitmask (runs in 2-bit pairs).
841 start = ((int)(data_off >> HAMMER2_FREEMAP_BLOCK_RADIX) & 15) * 2;
842 bmmask01 = 1 << start;
843 bmmask10 = 2 << start;
844 bmmask11 = 3 << start;
847 * Fixup the bitmap. Partial blocks cannot be fully freed unless
848 * a bulk scan is able to roll them up.
850 if (radix < HAMMER2_FREEMAP_BLOCK_RADIX) {
851 count = 1;
852 if (how == HAMMER2_FREEMAP_DOREALFREE)
853 how = HAMMER2_FREEMAP_DOMAYFREE;
854 } else {
855 count = 1 << (radix - HAMMER2_FREEMAP_BLOCK_RADIX);
859 * [re]load the bmap and bitmap pointers. Each bmap entry covers
860 * a 2MB swath. The bmap itself (LEVEL1) covers 2GB.
862 * Be sure to reset the linear iterator to ensure that the adjustment
863 * is not ignored.
865 again:
866 bmap = &chain->data->bmdata[(int)(data_off >> HAMMER2_SEGRADIX) &
867 (HAMMER2_FREEMAP_COUNT - 1)];
868 bitmap = &bmap->bitmap[(int)(data_off >> (HAMMER2_SEGRADIX - 3)) & 7];
869 bmap->linear = 0;
871 while (count) {
872 KKASSERT(bmmask11);
873 if (how == HAMMER2_FREEMAP_DORECOVER) {
875 * Recovery request, mark as allocated.
877 if ((*bitmap & bmmask11) != bmmask11) {
878 if (modified == 0) {
879 hammer2_chain_modify(trans, chain, 0);
880 modified = 1;
881 goto again;
883 if ((*bitmap & bmmask11) == bmmask00)
884 bmap->avail -= 1 << radix;
885 if (bmap->class == 0)
886 bmap->class = class;
887 *bitmap |= bmmask11;
888 if (hammer2_debug & 0x0040) {
889 kprintf("hammer2_freemap_recover: "
890 "fixup type=%02x "
891 "block=%016jx/%zd\n",
892 bref->type, data_off, bytes);
894 } else {
896 kprintf("hammer2_freemap_recover: good "
897 "type=%02x block=%016jx/%zd\n",
898 bref->type, data_off, bytes);
901 } else if ((*bitmap & bmmask11) == bmmask11) {
903 * Mayfree/Realfree request and bitmap is currently
904 * marked as being fully allocated.
906 if (!modified) {
907 hammer2_chain_modify(trans, chain, 0);
908 modified = 1;
909 goto again;
911 if (how == HAMMER2_FREEMAP_DOREALFREE)
912 *bitmap &= ~bmmask11;
913 else
914 *bitmap = (*bitmap & ~bmmask11) | bmmask10;
915 } else if ((*bitmap & bmmask11) == bmmask10) {
917 * Mayfree/Realfree request and bitmap is currently
918 * marked as being possibly freeable.
920 if (how == HAMMER2_FREEMAP_DOREALFREE) {
921 if (!modified) {
922 hammer2_chain_modify(trans, chain, 0);
923 modified = 1;
924 goto again;
926 *bitmap &= ~bmmask11;
928 } else {
930 * 01 - Not implemented, currently illegal state
931 * 00 - Not allocated at all, illegal free.
933 panic("hammer2_freemap_adjust: "
934 "Illegal state %08x(%08x)",
935 *bitmap, *bitmap & bmmask11);
937 --count;
938 bmmask01 <<= 2;
939 bmmask10 <<= 2;
940 bmmask11 <<= 2;
942 if (how == HAMMER2_FREEMAP_DOREALFREE && modified) {
943 bmap->avail += 1 << radix;
944 KKASSERT(bmap->avail <= HAMMER2_SEGSIZE);
945 if (bmap->avail == HAMMER2_SEGSIZE &&
946 bmap->bitmap[0] == 0 &&
947 bmap->bitmap[1] == 0 &&
948 bmap->bitmap[2] == 0 &&
949 bmap->bitmap[3] == 0 &&
950 bmap->bitmap[4] == 0 &&
951 bmap->bitmap[5] == 0 &&
952 bmap->bitmap[6] == 0 &&
953 bmap->bitmap[7] == 0) {
954 key = H2FMBASE(data_off, HAMMER2_FREEMAP_LEVEL0_RADIX);
955 kprintf("Freeseg %016jx\n", (intmax_t)key);
956 bmap->class = 0;
961 * chain->bref.check.freemap.bigmask (XXX)
963 * Setting bigmask is a hint to the allocation code that there might
964 * be something allocatable. We also set this in recovery... it
965 * doesn't hurt and we might want to use the hint for other validation
966 * operations later on.
968 if (modified)
969 chain->bref.check.freemap.bigmask |= 1 << radix;
971 hammer2_chain_unlock(chain);
972 done:
973 hammer2_chain_unlock(parent);