Fix UTIME_OMIT handling
[dragonfly.git] / sys / vfs / hammer2 / hammer2_freemap.c
blob435e7f204f5fdb51fb1863ff3636f5549a22232e
1 /*
2 * Copyright (c) 2011-2018 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/proc.h>
39 #include <sys/mount.h>
41 #include "hammer2.h"
43 #define FREEMAP_DEBUG 0
45 struct hammer2_fiterate {
46 hammer2_off_t bpref;
47 hammer2_off_t bnext;
48 int loops;
49 int relaxed;
52 typedef struct hammer2_fiterate hammer2_fiterate_t;
54 static int hammer2_freemap_try_alloc(hammer2_chain_t **parentp,
55 hammer2_blockref_t *bref, int radix,
56 hammer2_fiterate_t *iter, hammer2_tid_t mtid);
57 static void hammer2_freemap_init(hammer2_dev_t *hmp,
58 hammer2_key_t key, hammer2_chain_t *chain);
59 static int hammer2_bmap_alloc(hammer2_dev_t *hmp,
60 hammer2_bmap_data_t *bmap, uint16_t class,
61 int n, int sub_key, int radix, hammer2_key_t *basep);
62 static int hammer2_freemap_iterate(hammer2_chain_t **parentp,
63 hammer2_chain_t **chainp,
64 hammer2_fiterate_t *iter);
67 * Calculate the device offset for the specified FREEMAP_NODE or FREEMAP_LEAF
68 * bref. Return a combined media offset and physical size radix. Freemap
69 * chains use fixed storage offsets in the 4MB reserved area at the
70 * beginning of each 1GB zone.
72 * Rotate between eight possibilities. Theoretically this means we have seven
73 * good freemaps in case of a crash which we can use as a base for the fixup
74 * scan at mount-time.
76 static
77 int
78 hammer2_freemap_reserve(hammer2_chain_t *chain, int radix)
80 hammer2_blockref_t *bref = &chain->bref;
81 hammer2_off_t off;
82 int index;
83 int index_inc;
84 size_t bytes;
87 * Physical allocation size.
89 bytes = (size_t)1 << radix;
92 * Calculate block selection index 0..7 of current block. If this
93 * is the first allocation of the block (verses a modification of an
94 * existing block), we use index 0, otherwise we use the next rotating
95 * index.
97 if ((bref->data_off & ~HAMMER2_OFF_MASK_RADIX) == 0) {
98 index = 0;
99 } else {
100 off = bref->data_off & ~HAMMER2_OFF_MASK_RADIX &
101 HAMMER2_SEGMASK;
102 off = off / HAMMER2_PBUFSIZE;
103 KKASSERT(off >= HAMMER2_ZONE_FREEMAP_00 &&
104 off < HAMMER2_ZONE_FREEMAP_END);
105 index = (int)(off - HAMMER2_ZONE_FREEMAP_00) /
106 HAMMER2_ZONE_FREEMAP_INC;
107 KKASSERT(index >= 0 && index < HAMMER2_NFREEMAPS);
108 if (++index == HAMMER2_NFREEMAPS)
109 index = 0;
113 * Calculate the block offset of the reserved block. This will
114 * point into the 4MB reserved area at the base of the appropriate
115 * 2GB zone, once added to the FREEMAP_x selection above.
117 index_inc = index * HAMMER2_ZONE_FREEMAP_INC;
119 switch(bref->keybits) {
120 /* case HAMMER2_FREEMAP_LEVEL6_RADIX: not applicable */
121 case HAMMER2_FREEMAP_LEVEL5_RADIX: /* 4EB */
122 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE);
123 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
124 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL5_RADIX) +
125 (index_inc + HAMMER2_ZONE_FREEMAP_00 +
126 HAMMER2_ZONEFM_LEVEL5) * HAMMER2_PBUFSIZE;
127 break;
128 case HAMMER2_FREEMAP_LEVEL4_RADIX: /* 16PB */
129 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE);
130 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
131 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL4_RADIX) +
132 (index_inc + HAMMER2_ZONE_FREEMAP_00 +
133 HAMMER2_ZONEFM_LEVEL4) * HAMMER2_PBUFSIZE;
134 break;
135 case HAMMER2_FREEMAP_LEVEL3_RADIX: /* 64TB */
136 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE);
137 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
138 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL3_RADIX) +
139 (index_inc + HAMMER2_ZONE_FREEMAP_00 +
140 HAMMER2_ZONEFM_LEVEL3) * HAMMER2_PBUFSIZE;
141 break;
142 case HAMMER2_FREEMAP_LEVEL2_RADIX: /* 256GB */
143 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE);
144 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
145 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL2_RADIX) +
146 (index_inc + HAMMER2_ZONE_FREEMAP_00 +
147 HAMMER2_ZONEFM_LEVEL2) * HAMMER2_PBUFSIZE;
148 break;
149 case HAMMER2_FREEMAP_LEVEL1_RADIX: /* 1GB */
150 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_LEAF);
151 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
152 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL1_RADIX) +
153 (index_inc + HAMMER2_ZONE_FREEMAP_00 +
154 HAMMER2_ZONEFM_LEVEL1) * HAMMER2_PBUFSIZE;
155 break;
156 default:
157 panic("freemap: bad radix(2) %p %d\n", bref, bref->keybits);
158 /* NOT REACHED */
159 off = (hammer2_off_t)-1;
160 break;
162 bref->data_off = off | radix;
163 #if FREEMAP_DEBUG
164 kprintf("FREEMAP BLOCK TYPE %d %016jx/%d DATA_OFF=%016jx\n",
165 bref->type, bref->key, bref->keybits, bref->data_off);
166 #endif
167 return (0);
171 * Normal freemap allocator
173 * Use available hints to allocate space using the freemap. Create missing
174 * freemap infrastructure on-the-fly as needed (including marking initial
175 * allocations using the iterator as allocated, instantiating new 2GB zones,
176 * and dealing with the end-of-media edge case).
178 * bpref is only used as a heuristic to determine locality of reference.
180 * This function is a NOP if bytes is 0.
183 hammer2_freemap_alloc(hammer2_chain_t *chain, size_t bytes)
185 hammer2_dev_t *hmp = chain->hmp;
186 hammer2_blockref_t *bref = &chain->bref;
187 hammer2_chain_t *parent;
188 hammer2_tid_t mtid;
189 int radix;
190 int error;
191 unsigned int hindex;
192 hammer2_fiterate_t iter;
195 * If allocating or downsizing to zero we just get rid of whatever
196 * data_off we had.
198 if (bytes == 0) {
199 chain->bref.data_off = 0;
200 return 0;
203 KKASSERT(hmp->spmp);
204 mtid = hammer2_trans_sub(hmp->spmp);
207 * Validate the allocation size. It must be a power of 2.
209 * For now require that the caller be aware of the minimum
210 * allocation (1K).
212 radix = hammer2_getradix(bytes);
213 KKASSERT((size_t)1 << radix == bytes);
215 if (bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
216 bref->type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
218 * Freemap blocks themselves are assigned from the reserve
219 * area, not allocated from the freemap.
221 error = hammer2_freemap_reserve(chain, radix);
223 return error;
226 KKASSERT(bytes >= HAMMER2_ALLOC_MIN && bytes <= HAMMER2_ALLOC_MAX);
229 * Heuristic tracking index. We would like one for each distinct
230 * bref type if possible. heur_freemap[] has room for two classes
231 * for each type. At a minimum we have to break-up our heuristic
232 * by device block sizes.
234 hindex = HAMMER2_PBUFRADIX - HAMMER2_LBUFRADIX;
235 KKASSERT(hindex < HAMMER2_FREEMAP_HEUR_NRADIX);
236 hindex += bref->type * HAMMER2_FREEMAP_HEUR_NRADIX;
237 hindex &= HAMMER2_FREEMAP_HEUR_TYPES * HAMMER2_FREEMAP_HEUR_NRADIX - 1;
238 KKASSERT(hindex < HAMMER2_FREEMAP_HEUR_SIZE);
240 iter.bpref = hmp->heur_freemap[hindex];
241 iter.relaxed = hmp->freemap_relaxed;
244 * Make sure bpref is in-bounds. It's ok if bpref covers a zone's
245 * reserved area, the try code will iterate past it.
247 if (iter.bpref > hmp->total_size)
248 iter.bpref = hmp->total_size - 1;
251 * Iterate the freemap looking for free space before and after.
253 parent = &hmp->fchain;
254 hammer2_chain_ref(parent);
255 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
256 error = HAMMER2_ERROR_EAGAIN;
257 iter.bnext = iter.bpref;
258 iter.loops = 0;
260 while (error == HAMMER2_ERROR_EAGAIN) {
261 error = hammer2_freemap_try_alloc(&parent, bref, radix,
262 &iter, mtid);
264 hmp->freemap_relaxed |= iter.relaxed; /* heuristical, SMP race ok */
265 hmp->heur_freemap[hindex] = iter.bnext;
266 hammer2_chain_unlock(parent);
267 hammer2_chain_drop(parent);
269 return (error);
272 static int
273 hammer2_freemap_try_alloc(hammer2_chain_t **parentp,
274 hammer2_blockref_t *bref, int radix,
275 hammer2_fiterate_t *iter, hammer2_tid_t mtid)
277 hammer2_dev_t *hmp = (*parentp)->hmp;
278 hammer2_off_t l0size;
279 hammer2_off_t l1size;
280 hammer2_off_t l1mask;
281 hammer2_key_t key_dummy;
282 hammer2_chain_t *chain;
283 hammer2_off_t key;
284 size_t bytes;
285 uint16_t class;
286 int error;
289 * Calculate the number of bytes being allocated.
291 bytes = (size_t)1 << radix;
292 class = (bref->type << 8) | HAMMER2_PBUFRADIX;
295 * Lookup the level1 freemap chain, creating and initializing one
296 * if necessary. Intermediate levels will be created automatically
297 * when necessary by hammer2_chain_create().
299 key = H2FMBASE(iter->bnext, HAMMER2_FREEMAP_LEVEL1_RADIX);
300 l0size = HAMMER2_FREEMAP_LEVEL0_SIZE;
301 l1size = HAMMER2_FREEMAP_LEVEL1_SIZE;
302 l1mask = l1size - 1;
304 chain = hammer2_chain_lookup(parentp, &key_dummy, key, key + l1mask,
305 &error,
306 HAMMER2_LOOKUP_ALWAYS |
307 HAMMER2_LOOKUP_MATCHIND);
309 if (chain == NULL) {
311 * Create the missing leaf, be sure to initialize
312 * the auxillary freemap tracking information in
313 * the bref.check.freemap structure.
315 #if 0
316 kprintf("freemap create L1 @ %016jx bpref %016jx\n",
317 key, iter->bpref);
318 #endif
319 error = hammer2_chain_create(parentp, &chain, NULL, hmp->spmp,
320 HAMMER2_METH_DEFAULT,
321 key, HAMMER2_FREEMAP_LEVEL1_RADIX,
322 HAMMER2_BREF_TYPE_FREEMAP_LEAF,
323 HAMMER2_FREEMAP_LEVELN_PSIZE,
324 mtid, 0, 0);
325 KKASSERT(error == 0);
326 if (error == 0) {
327 hammer2_chain_modify(chain, mtid, 0, 0);
328 bzero(&chain->data->bmdata[0],
329 HAMMER2_FREEMAP_LEVELN_PSIZE);
330 chain->bref.check.freemap.bigmask = (uint32_t)-1;
331 chain->bref.check.freemap.avail = l1size;
332 /* bref.methods should already be inherited */
334 hammer2_freemap_init(hmp, key, chain);
336 } else if (chain->error) {
338 * Error during lookup.
340 kprintf("hammer2_freemap_try_alloc: %016jx: error %s\n",
341 (intmax_t)bref->data_off,
342 hammer2_error_str(chain->error));
343 error = HAMMER2_ERROR_EIO;
344 } else if ((chain->bref.check.freemap.bigmask &
345 ((size_t)1 << radix)) == 0) {
347 * Already flagged as not having enough space
349 error = HAMMER2_ERROR_ENOSPC;
350 } else {
352 * Modify existing chain to setup for adjustment.
354 hammer2_chain_modify(chain, mtid, 0, 0);
358 * Scan 4MB entries.
360 if (error == 0) {
361 hammer2_bmap_data_t *bmap;
362 hammer2_key_t base_key;
363 int count;
364 int start;
365 int n;
367 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF);
368 start = (int)((iter->bnext - key) >>
369 HAMMER2_FREEMAP_LEVEL0_RADIX);
370 KKASSERT(start >= 0 && start < HAMMER2_FREEMAP_COUNT);
371 hammer2_chain_modify(chain, mtid, 0, 0);
373 error = HAMMER2_ERROR_ENOSPC;
374 for (count = 0; count < HAMMER2_FREEMAP_COUNT; ++count) {
375 int availchk;
377 if (start + count >= HAMMER2_FREEMAP_COUNT &&
378 start - count < 0) {
379 break;
383 * Calculate bmap pointer from thart starting index
384 * forwards.
386 * NOTE: bmap pointer is invalid if n >= FREEMAP_COUNT.
388 n = start + count;
389 bmap = &chain->data->bmdata[n];
391 if (n >= HAMMER2_FREEMAP_COUNT) {
392 availchk = 0;
393 } else if (bmap->avail) {
394 availchk = 1;
395 } else if (radix < HAMMER2_FREEMAP_BLOCK_RADIX &&
396 (bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK)) {
397 availchk = 1;
398 } else {
399 availchk = 0;
403 * Try to allocate from a matching freemap class
404 * superblock. If we are in relaxed mode we allocate
405 * from any freemap class superblock.
407 if (availchk &&
408 (bmap->class == 0 || bmap->class == class ||
409 iter->relaxed)) {
410 base_key = key + n * l0size;
411 error = hammer2_bmap_alloc(hmp, bmap,
412 class, n,
413 (int)bref->key,
414 radix,
415 &base_key);
416 if (error != HAMMER2_ERROR_ENOSPC) {
417 key = base_key;
418 break;
423 * Calculate bmap pointer from the starting index
424 * backwards (locality).
426 * Must recalculate after potentially having called
427 * hammer2_bmap_alloc() above in case chain was
428 * reallocated.
430 * NOTE: bmap pointer is invalid if n < 0.
432 n = start - count;
433 bmap = &chain->data->bmdata[n];
434 if (n < 0) {
435 availchk = 0;
436 } else if (bmap->avail) {
437 availchk = 1;
438 } else if (radix < HAMMER2_FREEMAP_BLOCK_RADIX &&
439 (bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK)) {
440 availchk = 1;
441 } else {
442 availchk = 0;
446 * Try to allocate from a matching freemap class
447 * superblock. If we are in relaxed mode we allocate
448 * from any freemap class superblock.
450 if (availchk &&
451 (bmap->class == 0 || bmap->class == class ||
452 iter->relaxed)) {
453 base_key = key + n * l0size;
454 error = hammer2_bmap_alloc(hmp, bmap,
455 class, n,
456 (int)bref->key,
457 radix,
458 &base_key);
459 if (error != HAMMER2_ERROR_ENOSPC) {
460 key = base_key;
461 break;
467 * We only know for sure that we can clear the bitmap bit
468 * if we scanned the entire array (start == 0) in relaxed
469 * mode.
471 if (error == HAMMER2_ERROR_ENOSPC &&
472 start == 0 &&
473 iter->relaxed)
475 chain->bref.check.freemap.bigmask &=
476 (uint32_t)~((size_t)1 << radix);
478 /* XXX also scan down from original count */
481 if (error == 0) {
483 * Assert validity. Must be beyond the static allocator used
484 * by newfs_hammer2 (and thus also beyond the aux area),
485 * not go past the volume size, and must not be in the
486 * reserved segment area for a zone.
488 KKASSERT(key >= hmp->voldata.allocator_beg &&
489 key + bytes <= hmp->total_size);
490 KKASSERT((key & HAMMER2_ZONE_MASK64) >= HAMMER2_ZONE_SEG);
491 bref->data_off = key | radix;
494 * Record dedupability. The dedup bits are cleared
495 * when bulkfree transitions the freemap from 11->10,
496 * and asserted to be clear on the 10->00 transition.
498 * We must record the bitmask with the chain locked
499 * at the time we set the allocation bits to avoid
500 * racing a bulkfree.
502 if (bref->type == HAMMER2_BREF_TYPE_DATA)
503 hammer2_io_dedup_set(hmp, bref);
504 #if 0
505 kprintf("alloc cp=%p %016jx %016jx using %016jx\n",
506 chain,
507 bref->key, bref->data_off, chain->bref.data_off);
508 #endif
509 } else if (error == HAMMER2_ERROR_ENOSPC) {
511 * Return EAGAIN with next iteration in iter->bnext, or
512 * return ENOSPC if the allocation map has been exhausted.
514 error = hammer2_freemap_iterate(parentp, &chain, iter);
518 * Cleanup
520 if (chain) {
521 hammer2_chain_unlock(chain);
522 hammer2_chain_drop(chain);
524 return (error);
528 * Allocate (1<<radix) bytes from the bmap whos base data offset is (*basep).
530 * If the linear iterator is mid-block we use it directly (the bitmap should
531 * already be marked allocated), otherwise we search for a block in the
532 * bitmap that fits the allocation request.
534 * A partial bitmap allocation sets the minimum bitmap granularity (16KB)
535 * to fully allocated and adjusts the linear allocator to allow the
536 * remaining space to be allocated.
538 * sub_key is the lower 32 bits of the chain->bref.key for the chain whos
539 * bref is being allocated. If the radix represents an allocation >= 16KB
540 * (aka HAMMER2_FREEMAP_BLOCK_RADIX) we try to use this key to select the
541 * blocks directly out of the bmap.
543 static
545 hammer2_bmap_alloc(hammer2_dev_t *hmp, hammer2_bmap_data_t *bmap,
546 uint16_t class, int n, int sub_key,
547 int radix, hammer2_key_t *basep)
549 size_t size;
550 size_t bgsize;
551 int bmradix;
552 hammer2_bitmap_t bmmask;
553 int offset;
554 int i;
555 int j;
558 * Take into account 2-bits per block when calculating bmradix.
560 size = (size_t)1 << radix;
562 if (radix <= HAMMER2_FREEMAP_BLOCK_RADIX) {
563 bmradix = 2;
564 /* (16K) 2 bits per allocation block */
565 } else {
566 bmradix = (hammer2_bitmap_t)2 <<
567 (radix - HAMMER2_FREEMAP_BLOCK_RADIX);
568 /* (32K-64K) 4, 8 bits per allocation block */
572 * Use the linear iterator to pack small allocations, otherwise
573 * fall-back to finding a free 16KB chunk. The linear iterator
574 * is only valid when *NOT* on a freemap chunking boundary (16KB).
575 * If it is the bitmap must be scanned. It can become invalid
576 * once we pack to the boundary. We adjust it after a bitmap
577 * allocation only for sub-16KB allocations (so the perfectly good
578 * previous value can still be used for fragments when 16KB+
579 * allocations are made inbetween fragmentary allocations).
581 * Beware of hardware artifacts when bmradix == 64 (intermediate
582 * result can wind up being '1' instead of '0' if hardware masks
583 * bit-count & 63).
585 * NOTE: j needs to be even in the j= calculation. As an artifact
586 * of the /2 division, our bitmask has to clear bit 0.
588 * NOTE: TODO this can leave little unallocatable fragments lying
589 * around.
591 if (((uint32_t)bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK) + size <=
592 HAMMER2_FREEMAP_BLOCK_SIZE &&
593 (bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK) &&
594 bmap->linear < HAMMER2_SEGSIZE) {
596 * Use linear iterator if it is not block-aligned to avoid
597 * wasting space.
599 * Calculate the bitmapq[] index (i) and calculate the
600 * shift count within the 64-bit bitmapq[] entry.
602 * The freemap block size is 16KB, but each bitmap
603 * entry is two bits so use a little trick to get
604 * a (j) shift of 0, 2, 4, ... 62 in 16KB chunks.
606 KKASSERT(bmap->linear >= 0 &&
607 bmap->linear + size <= HAMMER2_SEGSIZE &&
608 (bmap->linear & (HAMMER2_ALLOC_MIN - 1)) == 0);
609 offset = bmap->linear;
610 i = offset / (HAMMER2_SEGSIZE / HAMMER2_BMAP_ELEMENTS);
611 j = (offset / (HAMMER2_FREEMAP_BLOCK_SIZE / 2)) & 62;
612 bmmask = (bmradix == HAMMER2_BMAP_BITS_PER_ELEMENT) ?
613 HAMMER2_BMAP_ALLONES :
614 ((hammer2_bitmap_t)1 << bmradix) - 1;
615 bmmask <<= j;
616 bmap->linear = offset + size;
617 } else {
619 * Try to index a starting point based on sub_key. This
620 * attempts to restore sequential block ordering on-disk
621 * whenever possible, even if data is committed out of
622 * order.
624 * i - Index bitmapq[], full data range represented is
625 * HAMMER2_BMAP_SIZE.
627 * j - Index within bitmapq[i], full data range represented is
628 * HAMMER2_BMAP_INDEX_SIZE.
630 * WARNING!
632 i = -1;
633 j = -1;
635 switch(class >> 8) {
636 case HAMMER2_BREF_TYPE_DATA:
637 if (radix >= HAMMER2_FREEMAP_BLOCK_RADIX) {
638 i = (sub_key & HAMMER2_BMAP_MASK) /
639 (HAMMER2_BMAP_SIZE / HAMMER2_BMAP_ELEMENTS);
640 j = (sub_key & HAMMER2_BMAP_INDEX_MASK) /
641 (HAMMER2_BMAP_INDEX_SIZE /
642 HAMMER2_BMAP_BLOCKS_PER_ELEMENT);
643 j = j * 2;
645 break;
646 case HAMMER2_BREF_TYPE_INODE:
647 break;
648 default:
649 break;
651 if (i >= 0) {
652 KKASSERT(i < HAMMER2_BMAP_ELEMENTS &&
653 j < 2 * HAMMER2_BMAP_BLOCKS_PER_ELEMENT);
654 KKASSERT(j + bmradix <= HAMMER2_BMAP_BITS_PER_ELEMENT);
655 bmmask = (bmradix == HAMMER2_BMAP_BITS_PER_ELEMENT) ?
656 HAMMER2_BMAP_ALLONES :
657 ((hammer2_bitmap_t)1 << bmradix) - 1;
658 bmmask <<= j;
660 if ((bmap->bitmapq[i] & bmmask) == 0)
661 goto success;
665 * General element scan.
667 * WARNING: (j) is iterating a bit index (by 2's)
669 for (i = 0; i < HAMMER2_BMAP_ELEMENTS; ++i) {
670 bmmask = (bmradix == HAMMER2_BMAP_BITS_PER_ELEMENT) ?
671 HAMMER2_BMAP_ALLONES :
672 ((hammer2_bitmap_t)1 << bmradix) - 1;
673 for (j = 0;
674 j < HAMMER2_BMAP_BITS_PER_ELEMENT;
675 j += bmradix) {
676 if ((bmap->bitmapq[i] & bmmask) == 0)
677 goto success;
678 bmmask <<= bmradix;
681 /*fragments might remain*/
682 /*KKASSERT(bmap->avail == 0);*/
683 return (HAMMER2_ERROR_ENOSPC);
684 success:
685 offset = i * (HAMMER2_SEGSIZE / HAMMER2_BMAP_ELEMENTS) +
686 (j * (HAMMER2_FREEMAP_BLOCK_SIZE / 2));
687 if (size & HAMMER2_FREEMAP_BLOCK_MASK)
688 bmap->linear = offset + size;
691 /* 8 x (64/2) -> 256 x 16K -> 4MB */
692 KKASSERT(i >= 0 && i < HAMMER2_BMAP_ELEMENTS);
695 * Optimize the buffer cache to avoid unnecessary read-before-write
696 * operations.
698 * The device block size could be larger than the allocation size
699 * so the actual bitmap test is somewhat more involved. We have
700 * to use a compatible buffer size for this operation.
702 if ((bmap->bitmapq[i] & bmmask) == 0 &&
703 HAMMER2_PBUFSIZE != size) {
704 size_t psize = HAMMER2_PBUFSIZE;
705 hammer2_off_t pmask = (hammer2_off_t)psize - 1;
706 int pbmradix = (hammer2_bitmap_t)2 <<
707 (HAMMER2_PBUFRADIX -
708 HAMMER2_FREEMAP_BLOCK_RADIX);
709 hammer2_bitmap_t pbmmask;
710 int pradix = hammer2_getradix(psize);
712 pbmmask = (pbmradix == HAMMER2_BMAP_BITS_PER_ELEMENT) ?
713 HAMMER2_BMAP_ALLONES :
714 ((hammer2_bitmap_t)1 << pbmradix) - 1;
715 while ((pbmmask & bmmask) == 0)
716 pbmmask <<= pbmradix;
718 #if 0
719 kprintf("%016jx mask %016jx %016jx %016jx (%zd/%zd)\n",
720 *basep + offset, bmap->bitmapq[i],
721 pbmmask, bmmask, size, psize);
722 #endif
724 if ((bmap->bitmapq[i] & pbmmask) == 0) {
725 hammer2_io_t *dio;
727 hammer2_io_newnz(hmp, class >> 8,
728 (*basep + (offset & ~pmask)) |
729 pradix, psize, &dio);
730 hammer2_io_putblk(&dio);
734 #if 0
736 * When initializing a new inode segment also attempt to initialize
737 * an adjacent segment. Be careful not to index beyond the array
738 * bounds.
740 * We do this to try to localize inode accesses to improve
741 * directory scan rates. XXX doesn't improve scan rates.
743 if (size == HAMMER2_INODE_BYTES) {
744 if (n & 1) {
745 if (bmap[-1].radix == 0 && bmap[-1].avail)
746 bmap[-1].radix = radix;
747 } else {
748 if (bmap[1].radix == 0 && bmap[1].avail)
749 bmap[1].radix = radix;
752 #endif
754 * Calculate the bitmap-granular change in bgsize for the volume
755 * header. We cannot use the fine-grained change here because
756 * the bulkfree code can't undo it. If the bitmap element is already
757 * marked allocated it has already been accounted for.
759 if (radix < HAMMER2_FREEMAP_BLOCK_RADIX) {
760 if (bmap->bitmapq[i] & bmmask)
761 bgsize = 0;
762 else
763 bgsize = HAMMER2_FREEMAP_BLOCK_SIZE;
764 } else {
765 bgsize = size;
769 * Adjust the bitmap, set the class (it might have been 0),
770 * and available bytes, update the allocation offset (*basep)
771 * from the L0 base to the actual offset.
773 * Do not override the class if doing a relaxed class allocation.
775 * avail must reflect the bitmap-granular availability. The allocator
776 * tests will also check the linear iterator.
778 bmap->bitmapq[i] |= bmmask;
779 if (bmap->class == 0)
780 bmap->class = class;
781 bmap->avail -= bgsize;
782 *basep += offset;
785 * Adjust the volume header's allocator_free parameter. This
786 * parameter has to be fixed up by bulkfree which has no way to
787 * figure out sub-16K chunking, so it must be adjusted by the
788 * bitmap-granular size.
790 if (bgsize) {
791 hammer2_voldata_lock(hmp);
792 hammer2_voldata_modify(hmp);
793 hmp->voldata.allocator_free -= bgsize;
794 hammer2_voldata_unlock(hmp);
797 return(0);
801 * Initialize a freemap for the storage area (in bytes) that begins at (key).
803 static
804 void
805 hammer2_freemap_init(hammer2_dev_t *hmp, hammer2_key_t key,
806 hammer2_chain_t *chain)
808 hammer2_off_t lokey;
809 hammer2_off_t hikey;
810 hammer2_bmap_data_t *bmap;
811 int count;
814 * Calculate the portion of the 1GB map that should be initialized
815 * as free. Portions below or after will be initialized as allocated.
816 * SEGMASK-align the areas so we don't have to worry about sub-scans
817 * or endianess when using memset.
819 * WARNING! It is possible for lokey to be larger than hikey if the
820 * entire 2GB segment is within the static allocation.
823 * (1) Ensure that all statically allocated space from newfs_hammer2
824 * is marked allocated, and take it up to the level1 base for
825 * this key.
827 lokey = (hmp->voldata.allocator_beg + HAMMER2_SEGMASK64) &
828 ~HAMMER2_SEGMASK64;
829 if (lokey < H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX))
830 lokey = H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX);
833 * (2) Ensure that the reserved area is marked allocated (typically
834 * the first 4MB of each 2GB area being represented). Since
835 * each LEAF represents 1GB of storage and the zone is 2GB, we
836 * have to adjust lowkey upward every other LEAF sequentially.
838 if (lokey < H2FMZONEBASE(key) + HAMMER2_ZONE_SEG64)
839 lokey = H2FMZONEBASE(key) + HAMMER2_ZONE_SEG64;
842 * (3) Ensure that any trailing space at the end-of-volume is marked
843 * allocated.
845 hikey = key + HAMMER2_FREEMAP_LEVEL1_SIZE;
846 if (hikey > hmp->total_size) {
847 hikey = hmp->total_size & ~HAMMER2_SEGMASK64;
851 * Heuristic highest possible value
853 chain->bref.check.freemap.avail = HAMMER2_FREEMAP_LEVEL1_SIZE;
854 bmap = &chain->data->bmdata[0];
857 * Initialize bitmap (bzero'd by caller)
859 for (count = 0; count < HAMMER2_FREEMAP_COUNT; ++count) {
860 if (key < lokey || key >= hikey) {
861 memset(bmap->bitmapq, -1,
862 sizeof(bmap->bitmapq));
863 bmap->avail = 0;
864 bmap->linear = HAMMER2_SEGSIZE;
865 chain->bref.check.freemap.avail -=
866 HAMMER2_FREEMAP_LEVEL0_SIZE;
867 } else {
868 bmap->avail = HAMMER2_FREEMAP_LEVEL0_SIZE;
870 key += HAMMER2_FREEMAP_LEVEL0_SIZE;
871 ++bmap;
876 * The current Level 1 freemap has been exhausted, iterate to the next
877 * one, return ENOSPC if no freemaps remain.
879 * At least two loops are required. If we are not in relaxed mode and
880 * we run out of storage we enter relaxed mode and do a third loop.
881 * The relaxed mode is recorded back in the hmp so once we enter the mode
882 * we remain relaxed until stuff begins to get freed and only do 2 loops.
884 * XXX this should rotate back to the beginning to handle freed-up space
885 * XXX or use intermediate entries to locate free space. TODO
887 static int
888 hammer2_freemap_iterate(hammer2_chain_t **parentp, hammer2_chain_t **chainp,
889 hammer2_fiterate_t *iter)
891 hammer2_dev_t *hmp = (*parentp)->hmp;
893 iter->bnext &= ~HAMMER2_FREEMAP_LEVEL1_MASK;
894 iter->bnext += HAMMER2_FREEMAP_LEVEL1_SIZE;
895 if (iter->bnext >= hmp->total_size) {
896 iter->bnext = 0;
897 if (++iter->loops >= 2) {
898 if (iter->relaxed == 0)
899 iter->relaxed = 1;
900 else
901 return (HAMMER2_ERROR_ENOSPC);
904 return(HAMMER2_ERROR_EAGAIN);
908 * Adjust the bit-pattern for data in the freemap bitmap according to
909 * (how). This code is called from on-mount recovery to fixup (mark
910 * as allocated) blocks whos freemap upates might not have been committed
911 * in the last crash and is used by the bulk freemap scan to stage frees.
913 * WARNING! Cannot be called with a empty-data bref (radix == 0).
915 * XXX currently disabled when how == 0 (the normal real-time case). At
916 * the moment we depend on the bulk freescan to actually free blocks. It
917 * will still call this routine with a non-zero how to stage possible frees
918 * and to do the actual free.
920 void
921 hammer2_freemap_adjust(hammer2_dev_t *hmp, hammer2_blockref_t *bref,
922 int how)
924 hammer2_off_t data_off = bref->data_off;
925 hammer2_chain_t *chain;
926 hammer2_chain_t *parent;
927 hammer2_bmap_data_t *bmap;
928 hammer2_key_t key;
929 hammer2_key_t key_dummy;
930 hammer2_off_t l1size;
931 hammer2_off_t l1mask;
932 hammer2_tid_t mtid;
933 hammer2_bitmap_t *bitmap;
934 const hammer2_bitmap_t bmmask00 = 0;
935 //hammer2_bitmap_t bmmask01;
936 //hammer2_bitmap_t bmmask10;
937 hammer2_bitmap_t bmmask11;
938 size_t bytes;
939 uint16_t class;
940 int radix;
941 int start;
942 int count;
943 int modified = 0;
944 int error;
945 size_t bgsize = 0;
947 KKASSERT(how == HAMMER2_FREEMAP_DORECOVER);
949 KKASSERT(hmp->spmp);
950 mtid = hammer2_trans_sub(hmp->spmp);
952 radix = (int)data_off & HAMMER2_OFF_MASK_RADIX;
953 KKASSERT(radix != 0);
954 data_off &= ~HAMMER2_OFF_MASK_RADIX;
955 KKASSERT(radix <= HAMMER2_RADIX_MAX);
957 if (radix)
958 bytes = (size_t)1 << radix;
959 else
960 bytes = 0;
961 class = (bref->type << 8) | HAMMER2_PBUFRADIX;
964 * We can't adjust the freemap for data allocations made by
965 * newfs_hammer2.
967 if (data_off < hmp->voldata.allocator_beg)
968 return;
970 KKASSERT((data_off & HAMMER2_ZONE_MASK64) >= HAMMER2_ZONE_SEG);
973 * Lookup the level1 freemap chain. The chain must exist.
975 key = H2FMBASE(data_off, HAMMER2_FREEMAP_LEVEL1_RADIX);
976 l1size = HAMMER2_FREEMAP_LEVEL1_SIZE;
977 l1mask = l1size - 1;
979 parent = &hmp->fchain;
980 hammer2_chain_ref(parent);
981 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
983 chain = hammer2_chain_lookup(&parent, &key_dummy, key, key + l1mask,
984 &error,
985 HAMMER2_LOOKUP_ALWAYS |
986 HAMMER2_LOOKUP_MATCHIND);
989 * Stop early if we are trying to free something but no leaf exists.
991 if (chain == NULL && how != HAMMER2_FREEMAP_DORECOVER) {
992 kprintf("hammer2_freemap_adjust: %016jx: no chain\n",
993 (intmax_t)bref->data_off);
994 goto done;
996 if (chain->error) {
997 kprintf("hammer2_freemap_adjust: %016jx: error %s\n",
998 (intmax_t)bref->data_off,
999 hammer2_error_str(chain->error));
1000 hammer2_chain_unlock(chain);
1001 hammer2_chain_drop(chain);
1002 chain = NULL;
1003 goto done;
1007 * Create any missing leaf(s) if we are doing a recovery (marking
1008 * the block(s) as being allocated instead of being freed). Be sure
1009 * to initialize the auxillary freemap tracking info in the
1010 * bref.check.freemap structure.
1012 if (chain == NULL && how == HAMMER2_FREEMAP_DORECOVER) {
1013 error = hammer2_chain_create(&parent, &chain, NULL, hmp->spmp,
1014 HAMMER2_METH_DEFAULT,
1015 key, HAMMER2_FREEMAP_LEVEL1_RADIX,
1016 HAMMER2_BREF_TYPE_FREEMAP_LEAF,
1017 HAMMER2_FREEMAP_LEVELN_PSIZE,
1018 mtid, 0, 0);
1020 if (hammer2_debug & 0x0040) {
1021 kprintf("fixup create chain %p %016jx:%d\n",
1022 chain, chain->bref.key, chain->bref.keybits);
1025 if (error == 0) {
1026 error = hammer2_chain_modify(chain, mtid, 0, 0);
1027 KKASSERT(error == 0);
1028 bzero(&chain->data->bmdata[0],
1029 HAMMER2_FREEMAP_LEVELN_PSIZE);
1030 chain->bref.check.freemap.bigmask = (uint32_t)-1;
1031 chain->bref.check.freemap.avail = l1size;
1032 /* bref.methods should already be inherited */
1034 hammer2_freemap_init(hmp, key, chain);
1036 /* XXX handle error */
1039 #if FREEMAP_DEBUG
1040 kprintf("FREEMAP ADJUST TYPE %d %016jx/%d DATA_OFF=%016jx\n",
1041 chain->bref.type, chain->bref.key,
1042 chain->bref.keybits, chain->bref.data_off);
1043 #endif
1046 * Calculate the bitmask (runs in 2-bit pairs).
1048 start = ((int)(data_off >> HAMMER2_FREEMAP_BLOCK_RADIX) & 15) * 2;
1049 //bmmask01 = (hammer2_bitmap_t)1 << start;
1050 //bmmask10 = (hammer2_bitmap_t)2 << start;
1051 bmmask11 = (hammer2_bitmap_t)3 << start;
1054 * Fixup the bitmap. Partial blocks cannot be fully freed unless
1055 * a bulk scan is able to roll them up.
1057 if (radix < HAMMER2_FREEMAP_BLOCK_RADIX) {
1058 count = 1;
1059 #if 0
1060 if (how == HAMMER2_FREEMAP_DOREALFREE)
1061 how = HAMMER2_FREEMAP_DOMAYFREE;
1062 #endif
1063 } else {
1064 count = 1 << (radix - HAMMER2_FREEMAP_BLOCK_RADIX);
1068 * [re]load the bmap and bitmap pointers. Each bmap entry covers
1069 * a 4MB swath. The bmap itself (LEVEL1) covers 2GB.
1071 * Be sure to reset the linear iterator to ensure that the adjustment
1072 * is not ignored.
1074 again:
1075 bmap = &chain->data->bmdata[(int)(data_off >> HAMMER2_SEGRADIX) &
1076 (HAMMER2_FREEMAP_COUNT - 1)];
1077 bitmap = &bmap->bitmapq[(int)(data_off >> (HAMMER2_SEGRADIX - 3)) & 7];
1079 if (modified)
1080 bmap->linear = 0;
1082 while (count) {
1083 KKASSERT(bmmask11);
1084 if (how == HAMMER2_FREEMAP_DORECOVER) {
1086 * Recovery request, mark as allocated.
1088 if ((*bitmap & bmmask11) != bmmask11) {
1089 if (modified == 0) {
1090 hammer2_chain_modify(chain, mtid, 0, 0);
1091 modified = 1;
1092 goto again;
1094 if ((*bitmap & bmmask11) == bmmask00) {
1095 bmap->avail -=
1096 HAMMER2_FREEMAP_BLOCK_SIZE;
1097 bgsize += HAMMER2_FREEMAP_BLOCK_SIZE;
1099 if (bmap->class == 0)
1100 bmap->class = class;
1101 *bitmap |= bmmask11;
1102 if (hammer2_debug & 0x0040) {
1103 kprintf("hammer2_freemap_adjust: "
1104 "fixup type=%02x "
1105 "block=%016jx/%zd\n",
1106 bref->type, data_off, bytes);
1108 } else {
1110 kprintf("hammer2_freemap_adjust: good "
1111 "type=%02x block=%016jx/%zd\n",
1112 bref->type, data_off, bytes);
1116 #if 0
1118 * XXX this stuff doesn't work, avail is miscalculated and
1119 * code 10 means something else now.
1121 else if ((*bitmap & bmmask11) == bmmask11) {
1123 * Mayfree/Realfree request and bitmap is currently
1124 * marked as being fully allocated.
1126 if (!modified) {
1127 hammer2_chain_modify(chain, 0);
1128 modified = 1;
1129 goto again;
1131 if (how == HAMMER2_FREEMAP_DOREALFREE)
1132 *bitmap &= ~bmmask11;
1133 else
1134 *bitmap = (*bitmap & ~bmmask11) | bmmask10;
1135 } else if ((*bitmap & bmmask11) == bmmask10) {
1137 * Mayfree/Realfree request and bitmap is currently
1138 * marked as being possibly freeable.
1140 if (how == HAMMER2_FREEMAP_DOREALFREE) {
1141 if (!modified) {
1142 hammer2_chain_modify(chain, 0);
1143 modified = 1;
1144 goto again;
1146 *bitmap &= ~bmmask11;
1148 } else {
1150 * 01 - Not implemented, currently illegal state
1151 * 00 - Not allocated at all, illegal free.
1153 panic("hammer2_freemap_adjust: "
1154 "Illegal state %08x(%08x)",
1155 *bitmap, *bitmap & bmmask11);
1157 #endif
1158 --count;
1159 //bmmask01 <<= 2;
1160 //bmmask10 <<= 2;
1161 bmmask11 <<= 2;
1163 #if 0
1164 #if HAMMER2_BMAP_ELEMENTS != 8
1165 #error "hammer2_freemap.c: HAMMER2_BMAP_ELEMENTS expected to be 8"
1166 #endif
1167 if (how == HAMMER2_FREEMAP_DOREALFREE && modified) {
1168 bmap->avail += 1 << radix;
1169 KKASSERT(bmap->avail <= HAMMER2_SEGSIZE);
1170 if (bmap->avail == HAMMER2_SEGSIZE &&
1171 bmap->bitmapq[0] == 0 &&
1172 bmap->bitmapq[1] == 0 &&
1173 bmap->bitmapq[2] == 0 &&
1174 bmap->bitmapq[3] == 0 &&
1175 bmap->bitmapq[4] == 0 &&
1176 bmap->bitmapq[5] == 0 &&
1177 bmap->bitmapq[6] == 0 &&
1178 bmap->bitmapq[7] == 0) {
1179 key = H2FMBASE(data_off, HAMMER2_FREEMAP_LEVEL0_RADIX);
1180 kprintf("Freeseg %016jx\n", (intmax_t)key);
1181 bmap->class = 0;
1184 #endif
1187 * chain->bref.check.freemap.bigmask (XXX)
1189 * Setting bigmask is a hint to the allocation code that there might
1190 * be something allocatable. We also set this in recovery... it
1191 * doesn't hurt and we might want to use the hint for other validation
1192 * operations later on.
1194 * We could calculate the largest possible allocation and set the
1195 * radixes that could fit, but its easier just to set bigmask to -1.
1197 if (modified) {
1198 chain->bref.check.freemap.bigmask = -1;
1199 hmp->freemap_relaxed = 0; /* reset heuristic */
1202 hammer2_chain_unlock(chain);
1203 hammer2_chain_drop(chain);
1204 done:
1205 hammer2_chain_unlock(parent);
1206 hammer2_chain_drop(parent);
1208 if (bgsize) {
1209 hammer2_voldata_lock(hmp);
1210 hammer2_voldata_modify(hmp);
1211 hmp->voldata.allocator_free -= bgsize;
1212 hammer2_voldata_unlock(hmp);
1217 * Validate the freemap, in three stages.
1219 * stage-1 ALLOCATED -> POSSIBLY FREE
1220 * POSSIBLY FREE -> POSSIBLY FREE (type corrected)
1222 * This transitions bitmap entries from ALLOCATED to POSSIBLY FREE.
1223 * The POSSIBLY FREE state does not mean that a block is actually free
1224 * and may be transitioned back to ALLOCATED in stage-2.
1226 * This is typically done during normal filesystem operations when
1227 * something is deleted or a block is replaced.
1229 * This is done by bulkfree in-bulk after a memory-bounded meta-data
1230 * scan to try to determine what might be freeable.
1232 * This can be done unconditionally through a freemap scan when the
1233 * intention is to brute-force recover the proper state of the freemap.
1235 * stage-2 POSSIBLY FREE -> ALLOCATED (scan metadata topology)
1237 * This is done by bulkfree during a meta-data scan to ensure that
1238 * all blocks still actually allocated by the filesystem are marked
1239 * as such.
1241 * NOTE! Live filesystem transitions to POSSIBLY FREE can occur while
1242 * the bulkfree stage-2 and stage-3 is running. The live filesystem
1243 * will use the alternative POSSIBLY FREE type (2) to prevent
1244 * stage-3 from improperly transitioning unvetted possibly-free
1245 * blocks to FREE.
1247 * stage-3 POSSIBLY FREE (type 1) -> FREE (scan freemap)
1249 * This is done by bulkfree to finalize POSSIBLY FREE states.