2 * Copyright (c) 2011-2014 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/fcntl.h>
41 #include <sys/namei.h>
42 #include <sys/mount.h>
43 #include <sys/vnode.h>
44 #include <sys/mountctl.h>
48 #define FREEMAP_DEBUG 0
50 struct hammer2_fiterate
{
56 typedef struct hammer2_fiterate hammer2_fiterate_t
;
58 static int hammer2_freemap_try_alloc(hammer2_chain_t
**parentp
,
59 hammer2_blockref_t
*bref
, int radix
,
60 hammer2_fiterate_t
*iter
, hammer2_tid_t mtid
);
61 static void hammer2_freemap_init(hammer2_dev_t
*hmp
,
62 hammer2_key_t key
, hammer2_chain_t
*chain
);
63 static int hammer2_bmap_alloc(hammer2_dev_t
*hmp
,
64 hammer2_bmap_data_t
*bmap
, uint16_t class,
65 int n
, int sub_key
, int radix
, hammer2_key_t
*basep
);
66 static int hammer2_freemap_iterate(hammer2_chain_t
**parentp
,
67 hammer2_chain_t
**chainp
,
68 hammer2_fiterate_t
*iter
);
72 hammer2_freemapradix(int radix
)
78 * Calculate the device offset for the specified FREEMAP_NODE or FREEMAP_LEAF
79 * bref. Return a combined media offset and physical size radix. Freemap
80 * chains use fixed storage offsets in the 4MB reserved area at the
81 * beginning of each 2GB zone
83 * Rotate between four possibilities. Theoretically this means we have three
84 * good freemaps in case of a crash which we can use as a base for the fixup
87 #define H2FMBASE(key, radix) ((key) & ~(((hammer2_off_t)1 << (radix)) - 1))
88 #define H2FMSHIFT(radix) ((hammer2_off_t)1 << (radix))
92 hammer2_freemap_reserve(hammer2_chain_t
*chain
, int radix
)
94 hammer2_blockref_t
*bref
= &chain
->bref
;
101 * Physical allocation size.
103 bytes
= (size_t)1 << radix
;
106 * Calculate block selection index 0..7 of current block. If this
107 * is the first allocation of the block (verses a modification of an
108 * existing block), we use index 0, otherwise we use the next rotating
111 if ((bref
->data_off
& ~HAMMER2_OFF_MASK_RADIX
) == 0) {
114 off
= bref
->data_off
& ~HAMMER2_OFF_MASK_RADIX
&
115 (((hammer2_off_t
)1 <<
116 HAMMER2_FREEMAP_LEVEL1_RADIX
) - 1);
117 off
= off
/ HAMMER2_PBUFSIZE
;
118 KKASSERT(off
>= HAMMER2_ZONE_FREEMAP_00
&&
119 off
< HAMMER2_ZONE_FREEMAP_END
);
120 index
= (int)(off
- HAMMER2_ZONE_FREEMAP_00
) /
121 HAMMER2_ZONE_FREEMAP_INC
;
122 KKASSERT(index
>= 0 && index
< HAMMER2_NFREEMAPS
);
123 if (++index
== HAMMER2_NFREEMAPS
)
128 * Calculate the block offset of the reserved block. This will
129 * point into the 4MB reserved area at the base of the appropriate
130 * 2GB zone, once added to the FREEMAP_x selection above.
132 index_inc
= index
* HAMMER2_ZONE_FREEMAP_INC
;
134 switch(bref
->keybits
) {
135 /* case HAMMER2_FREEMAP_LEVEL6_RADIX: not applicable */
136 case HAMMER2_FREEMAP_LEVEL5_RADIX
: /* 2EB */
137 KKASSERT(bref
->type
== HAMMER2_BREF_TYPE_FREEMAP_NODE
);
138 KKASSERT(bytes
== HAMMER2_FREEMAP_LEVELN_PSIZE
);
139 off
= H2FMBASE(bref
->key
, HAMMER2_FREEMAP_LEVEL5_RADIX
) +
140 (index_inc
+ HAMMER2_ZONE_FREEMAP_00
+
141 HAMMER2_ZONEFM_LEVEL5
) * HAMMER2_PBUFSIZE
;
143 case HAMMER2_FREEMAP_LEVEL4_RADIX
: /* 2EB */
144 KKASSERT(bref
->type
== HAMMER2_BREF_TYPE_FREEMAP_NODE
);
145 KKASSERT(bytes
== HAMMER2_FREEMAP_LEVELN_PSIZE
);
146 off
= H2FMBASE(bref
->key
, HAMMER2_FREEMAP_LEVEL4_RADIX
) +
147 (index_inc
+ HAMMER2_ZONE_FREEMAP_00
+
148 HAMMER2_ZONEFM_LEVEL4
) * HAMMER2_PBUFSIZE
;
150 case HAMMER2_FREEMAP_LEVEL3_RADIX
: /* 2PB */
151 KKASSERT(bref
->type
== HAMMER2_BREF_TYPE_FREEMAP_NODE
);
152 KKASSERT(bytes
== HAMMER2_FREEMAP_LEVELN_PSIZE
);
153 off
= H2FMBASE(bref
->key
, HAMMER2_FREEMAP_LEVEL3_RADIX
) +
154 (index_inc
+ HAMMER2_ZONE_FREEMAP_00
+
155 HAMMER2_ZONEFM_LEVEL3
) * HAMMER2_PBUFSIZE
;
157 case HAMMER2_FREEMAP_LEVEL2_RADIX
: /* 2TB */
158 KKASSERT(bref
->type
== HAMMER2_BREF_TYPE_FREEMAP_NODE
);
159 KKASSERT(bytes
== HAMMER2_FREEMAP_LEVELN_PSIZE
);
160 off
= H2FMBASE(bref
->key
, HAMMER2_FREEMAP_LEVEL2_RADIX
) +
161 (index_inc
+ HAMMER2_ZONE_FREEMAP_00
+
162 HAMMER2_ZONEFM_LEVEL2
) * HAMMER2_PBUFSIZE
;
164 case HAMMER2_FREEMAP_LEVEL1_RADIX
: /* 2GB */
165 KKASSERT(bref
->type
== HAMMER2_BREF_TYPE_FREEMAP_LEAF
);
166 KKASSERT(bytes
== HAMMER2_FREEMAP_LEVELN_PSIZE
);
167 off
= H2FMBASE(bref
->key
, HAMMER2_FREEMAP_LEVEL1_RADIX
) +
168 (index_inc
+ HAMMER2_ZONE_FREEMAP_00
+
169 HAMMER2_ZONEFM_LEVEL1
) * HAMMER2_PBUFSIZE
;
172 panic("freemap: bad radix(2) %p %d\n", bref
, bref
->keybits
);
174 off
= (hammer2_off_t
)-1;
177 bref
->data_off
= off
| radix
;
179 kprintf("FREEMAP BLOCK TYPE %d %016jx/%d DATA_OFF=%016jx\n",
180 bref
->type
, bref
->key
, bref
->keybits
, bref
->data_off
);
186 * Normal freemap allocator
188 * Use available hints to allocate space using the freemap. Create missing
189 * freemap infrastructure on-the-fly as needed (including marking initial
190 * allocations using the iterator as allocated, instantiating new 2GB zones,
191 * and dealing with the end-of-media edge case).
193 * ip and bpref are only used as a heuristic to determine locality of
194 * reference. bref->key may also be used heuristically.
196 * This function is a NOP if bytes is 0.
199 hammer2_freemap_alloc(hammer2_chain_t
*chain
, size_t bytes
)
201 hammer2_dev_t
*hmp
= chain
->hmp
;
202 hammer2_blockref_t
*bref
= &chain
->bref
;
203 hammer2_chain_t
*parent
;
208 hammer2_fiterate_t iter
;
211 * If allocating or downsizing to zero we just get rid of whatever
215 chain
->bref
.data_off
= 0;
219 mtid
= hammer2_trans_sub(hmp
->spmp
);
222 * Validate the allocation size. It must be a power of 2.
224 * For now require that the caller be aware of the minimum
227 radix
= hammer2_getradix(bytes
);
228 KKASSERT((size_t)1 << radix
== bytes
);
230 if (bref
->type
== HAMMER2_BREF_TYPE_FREEMAP_NODE
||
231 bref
->type
== HAMMER2_BREF_TYPE_FREEMAP_LEAF
) {
233 * Freemap blocks themselves are assigned from the reserve
234 * area, not allocated from the freemap.
236 error
= hammer2_freemap_reserve(chain
, radix
);
237 KKASSERT(error
== 0);
242 KKASSERT(bytes
>= HAMMER2_ALLOC_MIN
&& bytes
<= HAMMER2_ALLOC_MAX
);
245 * Calculate the starting point for our allocation search.
247 * Each freemap leaf is dedicated to a specific freemap_radix.
248 * The freemap_radix can be more fine-grained than the device buffer
249 * radix which results in inodes being grouped together in their
250 * own segment, terminal-data (16K or less) and initial indirect
251 * block being grouped together, and then full-indirect and full-data
252 * blocks (64K) being grouped together.
254 * The single most important aspect of this is the inode grouping
255 * because that is what allows 'find' and 'ls' and other filesystem
256 * topology operations to run fast.
259 if (bref
->data_off
& ~HAMMER2_OFF_MASK_RADIX
)
260 bpref
= bref
->data_off
& ~HAMMER2_OFF_MASK_RADIX
;
261 else if (trans
->tmp_bpref
)
262 bpref
= trans
->tmp_bpref
;
263 else if (trans
->tmp_ip
)
264 bpref
= trans
->tmp_ip
->chain
->bref
.data_off
;
268 * Heuristic tracking index. We would like one for each distinct
269 * bref type if possible. heur_freemap[] has room for two classes
270 * for each type. At a minimum we have to break-up our heuristic
271 * by device block sizes.
273 hindex
= hammer2_devblkradix(radix
) - HAMMER2_MINIORADIX
;
274 KKASSERT(hindex
< HAMMER2_FREEMAP_HEUR_NRADIX
);
275 hindex
+= bref
->type
* HAMMER2_FREEMAP_HEUR_NRADIX
;
276 hindex
&= HAMMER2_FREEMAP_HEUR_TYPES
* HAMMER2_FREEMAP_HEUR_NRADIX
- 1;
277 KKASSERT(hindex
< HAMMER2_FREEMAP_HEUR_SIZE
);
279 iter
.bpref
= hmp
->heur_freemap
[hindex
];
282 * Make sure bpref is in-bounds. It's ok if bpref covers a zone's
283 * reserved area, the try code will iterate past it.
285 if (iter
.bpref
> hmp
->voldata
.volu_size
)
286 iter
.bpref
= hmp
->voldata
.volu_size
- 1;
289 * Iterate the freemap looking for free space before and after.
291 parent
= &hmp
->fchain
;
292 hammer2_chain_ref(parent
);
293 hammer2_chain_lock(parent
, HAMMER2_RESOLVE_ALWAYS
);
295 iter
.bnext
= iter
.bpref
;
298 while (error
== EAGAIN
) {
299 error
= hammer2_freemap_try_alloc(&parent
, bref
, radix
,
302 hmp
->heur_freemap
[hindex
] = iter
.bnext
;
303 hammer2_chain_unlock(parent
);
304 hammer2_chain_drop(parent
);
306 KKASSERT(error
== 0);
312 hammer2_freemap_try_alloc(hammer2_chain_t
**parentp
,
313 hammer2_blockref_t
*bref
, int radix
,
314 hammer2_fiterate_t
*iter
, hammer2_tid_t mtid
)
316 hammer2_dev_t
*hmp
= (*parentp
)->hmp
;
317 hammer2_off_t l0size
;
318 hammer2_off_t l1size
;
319 hammer2_off_t l1mask
;
320 hammer2_key_t key_dummy
;
321 hammer2_chain_t
*chain
;
328 * Calculate the number of bytes being allocated, the number
329 * of contiguous bits of bitmap being allocated, and the bitmap
332 * WARNING! cpu hardware may mask bits == 64 -> 0 and blow up the
335 bytes
= (size_t)1 << radix
;
336 class = (bref
->type
<< 8) | hammer2_devblkradix(radix
);
339 * Lookup the level1 freemap chain, creating and initializing one
340 * if necessary. Intermediate levels will be created automatically
341 * when necessary by hammer2_chain_create().
343 key
= H2FMBASE(iter
->bnext
, HAMMER2_FREEMAP_LEVEL1_RADIX
);
344 l0size
= H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX
);
345 l1size
= H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX
);
348 chain
= hammer2_chain_lookup(parentp
, &key_dummy
, key
, key
+ l1mask
,
350 HAMMER2_LOOKUP_ALWAYS
|
351 HAMMER2_LOOKUP_MATCHIND
);
352 error
= hammer2_error_to_errno(error
);
356 * Create the missing leaf, be sure to initialize
357 * the auxillary freemap tracking information in
358 * the bref.check.freemap structure.
361 kprintf("freemap create L1 @ %016jx bpref %016jx\n",
364 error
= hammer2_chain_create(parentp
, &chain
,
365 hmp
->spmp
, HAMMER2_METH_DEFAULT
,
366 key
, HAMMER2_FREEMAP_LEVEL1_RADIX
,
367 HAMMER2_BREF_TYPE_FREEMAP_LEAF
,
368 HAMMER2_FREEMAP_LEVELN_PSIZE
,
370 KKASSERT(error
== 0);
372 hammer2_chain_modify(chain
, mtid
, 0, 0);
373 bzero(&chain
->data
->bmdata
[0],
374 HAMMER2_FREEMAP_LEVELN_PSIZE
);
375 chain
->bref
.check
.freemap
.bigmask
= (uint32_t)-1;
376 chain
->bref
.check
.freemap
.avail
= l1size
;
377 /* bref.methods should already be inherited */
379 hammer2_freemap_init(hmp
, key
, chain
);
381 } else if (chain
->error
) {
383 * Error during lookup.
385 kprintf("hammer2_freemap_try_alloc: %016jx: error %s\n",
386 (intmax_t)bref
->data_off
,
387 hammer2_error_str(chain
->error
));
389 } else if ((chain
->bref
.check
.freemap
.bigmask
&
390 ((size_t)1 << radix
)) == 0) {
392 * Already flagged as not having enough space
397 * Modify existing chain to setup for adjustment.
399 hammer2_chain_modify(chain
, mtid
, 0, 0);
406 hammer2_bmap_data_t
*bmap
;
407 hammer2_key_t base_key
;
412 KKASSERT(chain
->bref
.type
== HAMMER2_BREF_TYPE_FREEMAP_LEAF
);
413 start
= (int)((iter
->bnext
- key
) >>
414 HAMMER2_FREEMAP_LEVEL0_RADIX
);
415 KKASSERT(start
>= 0 && start
< HAMMER2_FREEMAP_COUNT
);
416 hammer2_chain_modify(chain
, mtid
, 0, 0);
419 for (count
= 0; count
< HAMMER2_FREEMAP_COUNT
; ++count
) {
422 if (start
+ count
>= HAMMER2_FREEMAP_COUNT
&&
428 * Calculate bmap pointer
430 * NOTE: bmap pointer is invalid if n >= FREEMAP_COUNT.
433 bmap
= &chain
->data
->bmdata
[n
];
435 if (n
>= HAMMER2_FREEMAP_COUNT
) {
437 } else if (bmap
->avail
) {
439 } else if (radix
< HAMMER2_FREEMAP_BLOCK_RADIX
&&
440 (bmap
->linear
& HAMMER2_FREEMAP_BLOCK_MASK
)) {
447 (bmap
->class == 0 || bmap
->class == class)) {
448 base_key
= key
+ n
* l0size
;
449 error
= hammer2_bmap_alloc(hmp
, bmap
,
454 if (error
!= ENOSPC
) {
461 * Must recalculate after potentially having called
462 * hammer2_bmap_alloc() above in case chain was
465 * NOTE: bmap pointer is invalid if n < 0.
468 bmap
= &chain
->data
->bmdata
[n
];
471 } else if (bmap
->avail
) {
473 } else if (radix
< HAMMER2_FREEMAP_BLOCK_RADIX
&&
474 (bmap
->linear
& HAMMER2_FREEMAP_BLOCK_MASK
)) {
481 (bmap
->class == 0 || bmap
->class == class)) {
482 base_key
= key
+ n
* l0size
;
483 error
= hammer2_bmap_alloc(hmp
, bmap
,
488 if (error
!= ENOSPC
) {
494 if (error
== ENOSPC
) {
495 chain
->bref
.check
.freemap
.bigmask
&=
496 (uint32_t)~((size_t)1 << radix
);
498 /* XXX also scan down from original count */
503 * Assert validity. Must be beyond the static allocator used
504 * by newfs_hammer2 (and thus also beyond the aux area),
505 * not go past the volume size, and must not be in the
506 * reserved segment area for a zone.
508 KKASSERT(key
>= hmp
->voldata
.allocator_beg
&&
509 key
+ bytes
<= hmp
->voldata
.volu_size
);
510 KKASSERT((key
& HAMMER2_ZONE_MASK64
) >= HAMMER2_ZONE_SEG
);
511 bref
->data_off
= key
| radix
;
514 * Record dedupability. The dedup bits are cleared
515 * when bulkfree transitions the freemap from 11->10,
516 * and asserted to be clear on the 10->00 transition.
518 * We must record the bitmask with the chain locked
519 * at the time we set the allocation bits to avoid
522 if (bref
->type
== HAMMER2_BREF_TYPE_DATA
)
523 hammer2_io_dedup_set(hmp
, bref
);
525 kprintf("alloc cp=%p %016jx %016jx using %016jx\n",
527 bref
->key
, bref
->data_off
, chain
->bref
.data_off
);
529 } else if (error
== ENOSPC
) {
531 * Return EAGAIN with next iteration in iter->bnext, or
532 * return ENOSPC if the allocation map has been exhausted.
534 error
= hammer2_freemap_iterate(parentp
, &chain
, iter
);
541 hammer2_chain_unlock(chain
);
542 hammer2_chain_drop(chain
);
548 * Allocate (1<<radix) bytes from the bmap whos base data offset is (*basep).
550 * If the linear iterator is mid-block we use it directly (the bitmap should
551 * already be marked allocated), otherwise we search for a block in the
552 * bitmap that fits the allocation request.
554 * A partial bitmap allocation sets the minimum bitmap granularity (16KB)
555 * to fully allocated and adjusts the linear allocator to allow the
556 * remaining space to be allocated.
558 * sub_key is the lower 32 bits of the chain->bref.key for the chain whos
559 * bref is being allocated. If the radix represents an allocation >= 16KB
560 * (aka HAMMER2_FREEMAP_BLOCK_RADIX) we try to use this key to select the
561 * blocks directly out of the bmap.
565 hammer2_bmap_alloc(hammer2_dev_t
*hmp
, hammer2_bmap_data_t
*bmap
,
566 uint16_t class, int n
, int sub_key
,
567 int radix
, hammer2_key_t
*basep
)
572 hammer2_bitmap_t bmmask
;
578 * Take into account 2-bits per block when calculating bmradix.
580 size
= (size_t)1 << radix
;
582 if (radix
<= HAMMER2_FREEMAP_BLOCK_RADIX
) {
584 /* (16K) 2 bits per allocation block */
586 bmradix
= (hammer2_bitmap_t
)2 <<
587 (radix
- HAMMER2_FREEMAP_BLOCK_RADIX
);
588 /* (32K-256K) 4, 8, 16, 32 bits per allocation block */
592 * Use the linear iterator to pack small allocations, otherwise
593 * fall-back to finding a free 16KB chunk. The linear iterator
594 * is only valid when *NOT* on a freemap chunking boundary (16KB).
595 * If it is the bitmap must be scanned. It can become invalid
596 * once we pack to the boundary. We adjust it after a bitmap
597 * allocation only for sub-16KB allocations (so the perfectly good
598 * previous value can still be used for fragments when 16KB+
599 * allocations are made inbetween fragmentary allocations).
601 * Beware of hardware artifacts when bmradix == 64 (intermediate
602 * result can wind up being '1' instead of '0' if hardware masks
605 * NOTE: j needs to be even in the j= calculation. As an artifact
606 * of the /2 division, our bitmask has to clear bit 0.
608 * NOTE: TODO this can leave little unallocatable fragments lying
611 if (((uint32_t)bmap
->linear
& HAMMER2_FREEMAP_BLOCK_MASK
) + size
<=
612 HAMMER2_FREEMAP_BLOCK_SIZE
&&
613 (bmap
->linear
& HAMMER2_FREEMAP_BLOCK_MASK
) &&
614 bmap
->linear
< HAMMER2_SEGSIZE
) {
616 * Use linear iterator if it is not block-aligned to avoid
619 KKASSERT(bmap
->linear
>= 0 &&
620 bmap
->linear
+ size
<= HAMMER2_SEGSIZE
&&
621 (bmap
->linear
& (HAMMER2_ALLOC_MIN
- 1)) == 0);
622 offset
= bmap
->linear
;
623 i
= offset
/ (HAMMER2_SEGSIZE
/ 8);
624 j
= (offset
/ (HAMMER2_FREEMAP_BLOCK_SIZE
/ 2)) & 30;
625 bmmask
= (bmradix
== HAMMER2_BMAP_BITS_PER_ELEMENT
) ?
626 HAMMER2_BMAP_ALLONES
:
627 ((hammer2_bitmap_t
)1 << bmradix
) - 1;
629 bmap
->linear
= offset
+ size
;
632 * Try to index a starting point based on sub_key. This
633 * attempts to restore sequential block ordering on-disk
634 * whenever possible, even if data is committed out of
637 * i - Index bitmapq[], full data range represented is
640 * j - Index within bitmapq[i], full data range represented is
641 * HAMMER2_BMAP_INDEX_SIZE.
649 case HAMMER2_BREF_TYPE_DATA
:
650 if (radix
>= HAMMER2_FREEMAP_BLOCK_RADIX
) {
651 i
= (sub_key
& HAMMER2_BMAP_MASK
) /
652 (HAMMER2_BMAP_SIZE
/ HAMMER2_BMAP_ELEMENTS
);
653 j
= (sub_key
& HAMMER2_BMAP_INDEX_MASK
) /
654 (HAMMER2_BMAP_INDEX_SIZE
/
655 HAMMER2_BMAP_BLOCKS_PER_ELEMENT
);
659 case HAMMER2_BREF_TYPE_INODE
:
665 KKASSERT(i
< HAMMER2_BMAP_ELEMENTS
&&
666 j
< 2 * HAMMER2_BMAP_BLOCKS_PER_ELEMENT
);
667 KKASSERT(j
+ bmradix
<= HAMMER2_BMAP_BITS_PER_ELEMENT
);
668 bmmask
= (bmradix
== HAMMER2_BMAP_BITS_PER_ELEMENT
) ?
669 HAMMER2_BMAP_ALLONES
:
670 ((hammer2_bitmap_t
)1 << bmradix
) - 1;
673 if ((bmap
->bitmapq
[i
] & bmmask
) == 0)
678 * General element scan.
680 * WARNING: (j) is iterating a bit index (by 2's)
682 for (i
= 0; i
< HAMMER2_BMAP_ELEMENTS
; ++i
) {
683 bmmask
= (bmradix
== HAMMER2_BMAP_BITS_PER_ELEMENT
) ?
684 HAMMER2_BMAP_ALLONES
:
685 ((hammer2_bitmap_t
)1 << bmradix
) - 1;
687 j
< HAMMER2_BMAP_BITS_PER_ELEMENT
;
689 if ((bmap
->bitmapq
[i
] & bmmask
) == 0)
694 /*fragments might remain*/
695 /*KKASSERT(bmap->avail == 0);*/
698 offset
= i
* (HAMMER2_SEGSIZE
/ HAMMER2_BMAP_ELEMENTS
) +
699 (j
* (HAMMER2_FREEMAP_BLOCK_SIZE
/ 2));
700 if (size
& HAMMER2_FREEMAP_BLOCK_MASK
)
701 bmap
->linear
= offset
+ size
;
704 /* 8 x (64/2) -> 256 x 16K -> 4MB */
705 KKASSERT(i
>= 0 && i
< HAMMER2_BMAP_ELEMENTS
);
708 * Optimize the buffer cache to avoid unnecessary read-before-write
711 * The device block size could be larger than the allocation size
712 * so the actual bitmap test is somewhat more involved. We have
713 * to use a compatible buffer size for this operation.
715 if ((bmap
->bitmapq
[i
] & bmmask
) == 0 &&
716 hammer2_devblksize(size
) != size
) {
717 size_t psize
= hammer2_devblksize(size
);
718 hammer2_off_t pmask
= (hammer2_off_t
)psize
- 1;
719 int pbmradix
= (hammer2_bitmap_t
)2 <<
720 (hammer2_devblkradix(radix
) -
721 HAMMER2_FREEMAP_BLOCK_RADIX
);
722 hammer2_bitmap_t pbmmask
;
723 int pradix
= hammer2_getradix(psize
);
725 pbmmask
= (pbmradix
== HAMMER2_BMAP_BITS_PER_ELEMENT
) ?
726 HAMMER2_BMAP_ALLONES
:
727 ((hammer2_bitmap_t
)1 << pbmradix
) - 1;
728 while ((pbmmask
& bmmask
) == 0)
729 pbmmask
<<= pbmradix
;
732 kprintf("%016jx mask %016jx %016jx %016jx (%zd/%zd)\n",
733 *basep
+ offset
, bmap
->bitmapq
[i
],
734 pbmmask
, bmmask
, size
, psize
);
737 if ((bmap
->bitmapq
[i
] & pbmmask
) == 0) {
740 hammer2_io_newnz(hmp
, class >> 8,
741 (*basep
+ (offset
& ~pmask
)) |
742 pradix
, psize
, &dio
);
743 hammer2_io_putblk(&dio
);
749 * When initializing a new inode segment also attempt to initialize
750 * an adjacent segment. Be careful not to index beyond the array
753 * We do this to try to localize inode accesses to improve
754 * directory scan rates. XXX doesn't improve scan rates.
756 if (size
== HAMMER2_INODE_BYTES
) {
758 if (bmap
[-1].radix
== 0 && bmap
[-1].avail
)
759 bmap
[-1].radix
= radix
;
761 if (bmap
[1].radix
== 0 && bmap
[1].avail
)
762 bmap
[1].radix
= radix
;
767 * Calculate the bitmap-granular change in bgsize for the volume
768 * header. We cannot use the fine-grained change here because
769 * the bulkfree code can't undo it. If the bitmap element is already
770 * marked allocated it has already been accounted for.
772 if (radix
< HAMMER2_FREEMAP_BLOCK_RADIX
) {
773 if (bmap
->bitmapq
[i
] & bmmask
)
776 bgsize
= HAMMER2_FREEMAP_BLOCK_SIZE
;
782 * Adjust the bitmap, set the class (it might have been 0),
783 * and available bytes, update the allocation offset (*basep)
784 * from the L0 base to the actual offset.
786 * avail must reflect the bitmap-granular availability. The allocator
787 * tests will also check the linear iterator.
789 bmap
->bitmapq
[i
] |= bmmask
;
791 bmap
->avail
-= bgsize
;
795 * Adjust the volume header's allocator_free parameter. This
796 * parameter has to be fixed up by bulkfree which has no way to
797 * figure out sub-16K chunking, so it must be adjusted by the
798 * bitmap-granular size.
801 hammer2_voldata_lock(hmp
);
802 hammer2_voldata_modify(hmp
);
803 hmp
->voldata
.allocator_free
-= bgsize
;
804 hammer2_voldata_unlock(hmp
);
812 hammer2_freemap_init(hammer2_dev_t
*hmp
, hammer2_key_t key
,
813 hammer2_chain_t
*chain
)
815 hammer2_off_t l1size
;
818 hammer2_bmap_data_t
*bmap
;
821 l1size
= H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX
);
824 * Calculate the portion of the 2GB map that should be initialized
825 * as free. Portions below or after will be initialized as allocated.
826 * SEGMASK-align the areas so we don't have to worry about sub-scans
827 * or endianess when using memset.
829 * (1) Ensure that all statically allocated space from newfs_hammer2
830 * is marked allocated.
832 * (2) Ensure that the reserved area is marked allocated (typically
833 * the first 4MB of the 2GB area being represented).
835 * (3) Ensure that any trailing space at the end-of-volume is marked
838 * WARNING! It is possible for lokey to be larger than hikey if the
839 * entire 2GB segment is within the static allocation.
841 lokey
= (hmp
->voldata
.allocator_beg
+ HAMMER2_SEGMASK64
) &
844 if (lokey
< H2FMBASE(key
, HAMMER2_FREEMAP_LEVEL1_RADIX
) +
845 HAMMER2_ZONE_SEG64
) {
846 lokey
= H2FMBASE(key
, HAMMER2_FREEMAP_LEVEL1_RADIX
) +
850 hikey
= key
+ H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX
);
851 if (hikey
> hmp
->voldata
.volu_size
) {
852 hikey
= hmp
->voldata
.volu_size
& ~HAMMER2_SEGMASK64
;
855 chain
->bref
.check
.freemap
.avail
=
856 H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX
);
857 bmap
= &chain
->data
->bmdata
[0];
859 for (count
= 0; count
< HAMMER2_FREEMAP_COUNT
; ++count
) {
860 if (key
< lokey
|| key
>= hikey
) {
861 memset(bmap
->bitmapq
, -1,
862 sizeof(bmap
->bitmapq
));
864 bmap
->linear
= HAMMER2_SEGSIZE
;
865 chain
->bref
.check
.freemap
.avail
-=
866 H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX
);
868 bmap
->avail
= H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX
);
870 key
+= H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX
);
876 * The current Level 1 freemap has been exhausted, iterate to the next
877 * one, return ENOSPC if no freemaps remain.
879 * XXX this should rotate back to the beginning to handle freed-up space
880 * XXX or use intermediate entries to locate free space. TODO
883 hammer2_freemap_iterate(hammer2_chain_t
**parentp
, hammer2_chain_t
**chainp
,
884 hammer2_fiterate_t
*iter
)
886 hammer2_dev_t
*hmp
= (*parentp
)->hmp
;
888 iter
->bnext
&= ~(H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX
) - 1);
889 iter
->bnext
+= H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX
);
890 if (iter
->bnext
>= hmp
->voldata
.volu_size
) {
892 if (++iter
->loops
== 2)
899 * Adjust the bit-pattern for data in the freemap bitmap according to
900 * (how). This code is called from on-mount recovery to fixup (mark
901 * as allocated) blocks whos freemap upates might not have been committed
902 * in the last crash and is used by the bulk freemap scan to stage frees.
904 * WARNING! Cannot be called with a empty-data bref (radix == 0).
906 * XXX currently disabled when how == 0 (the normal real-time case). At
907 * the moment we depend on the bulk freescan to actually free blocks. It
908 * will still call this routine with a non-zero how to stage possible frees
909 * and to do the actual free.
912 hammer2_freemap_adjust(hammer2_dev_t
*hmp
, hammer2_blockref_t
*bref
,
915 hammer2_off_t data_off
= bref
->data_off
;
916 hammer2_chain_t
*chain
;
917 hammer2_chain_t
*parent
;
918 hammer2_bmap_data_t
*bmap
;
920 hammer2_key_t key_dummy
;
921 hammer2_off_t l0size
;
922 hammer2_off_t l1size
;
923 hammer2_off_t l1mask
;
925 hammer2_bitmap_t
*bitmap
;
926 const hammer2_bitmap_t bmmask00
= 0;
927 hammer2_bitmap_t bmmask01
;
928 hammer2_bitmap_t bmmask10
;
929 hammer2_bitmap_t bmmask11
;
939 KKASSERT(how
== HAMMER2_FREEMAP_DORECOVER
);
941 mtid
= hammer2_trans_sub(hmp
->spmp
);
943 radix
= (int)data_off
& HAMMER2_OFF_MASK_RADIX
;
944 KKASSERT(radix
!= 0);
945 data_off
&= ~HAMMER2_OFF_MASK_RADIX
;
946 KKASSERT(radix
<= HAMMER2_RADIX_MAX
);
949 bytes
= (size_t)1 << radix
;
952 class = (bref
->type
<< 8) | hammer2_devblkradix(radix
);
955 * We can't adjust the freemap for data allocations made by
958 if (data_off
< hmp
->voldata
.allocator_beg
)
961 KKASSERT((data_off
& HAMMER2_ZONE_MASK64
) >= HAMMER2_ZONE_SEG
);
964 * Lookup the level1 freemap chain. The chain must exist.
966 key
= H2FMBASE(data_off
, HAMMER2_FREEMAP_LEVEL1_RADIX
);
967 l0size
= H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX
);
968 l1size
= H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX
);
971 parent
= &hmp
->fchain
;
972 hammer2_chain_ref(parent
);
973 hammer2_chain_lock(parent
, HAMMER2_RESOLVE_ALWAYS
);
975 chain
= hammer2_chain_lookup(&parent
, &key_dummy
, key
, key
+ l1mask
,
977 HAMMER2_LOOKUP_ALWAYS
|
978 HAMMER2_LOOKUP_MATCHIND
);
979 error
= hammer2_error_to_errno(error
);
982 * Stop early if we are trying to free something but no leaf exists.
984 if (chain
== NULL
&& how
!= HAMMER2_FREEMAP_DORECOVER
) {
985 kprintf("hammer2_freemap_adjust: %016jx: no chain\n",
986 (intmax_t)bref
->data_off
);
990 kprintf("hammer2_freemap_adjust: %016jx: error %s\n",
991 (intmax_t)bref
->data_off
,
992 hammer2_error_str(chain
->error
));
993 hammer2_chain_unlock(chain
);
994 hammer2_chain_drop(chain
);
1000 * Create any missing leaf(s) if we are doing a recovery (marking
1001 * the block(s) as being allocated instead of being freed). Be sure
1002 * to initialize the auxillary freemap tracking info in the
1003 * bref.check.freemap structure.
1005 if (chain
== NULL
&& how
== HAMMER2_FREEMAP_DORECOVER
) {
1006 error
= hammer2_chain_create(&parent
, &chain
,
1007 hmp
->spmp
, HAMMER2_METH_DEFAULT
,
1008 key
, HAMMER2_FREEMAP_LEVEL1_RADIX
,
1009 HAMMER2_BREF_TYPE_FREEMAP_LEAF
,
1010 HAMMER2_FREEMAP_LEVELN_PSIZE
,
1013 if (hammer2_debug
& 0x0040) {
1014 kprintf("fixup create chain %p %016jx:%d\n",
1015 chain
, chain
->bref
.key
, chain
->bref
.keybits
);
1019 hammer2_chain_modify(chain
, mtid
, 0, 0);
1020 bzero(&chain
->data
->bmdata
[0],
1021 HAMMER2_FREEMAP_LEVELN_PSIZE
);
1022 chain
->bref
.check
.freemap
.bigmask
= (uint32_t)-1;
1023 chain
->bref
.check
.freemap
.avail
= l1size
;
1024 /* bref.methods should already be inherited */
1026 hammer2_freemap_init(hmp
, key
, chain
);
1028 /* XXX handle error */
1032 kprintf("FREEMAP ADJUST TYPE %d %016jx/%d DATA_OFF=%016jx\n",
1033 chain
->bref
.type
, chain
->bref
.key
,
1034 chain
->bref
.keybits
, chain
->bref
.data_off
);
1038 * Calculate the bitmask (runs in 2-bit pairs).
1040 start
= ((int)(data_off
>> HAMMER2_FREEMAP_BLOCK_RADIX
) & 15) * 2;
1041 bmmask01
= (hammer2_bitmap_t
)1 << start
;
1042 bmmask10
= (hammer2_bitmap_t
)2 << start
;
1043 bmmask11
= (hammer2_bitmap_t
)3 << start
;
1046 * Fixup the bitmap. Partial blocks cannot be fully freed unless
1047 * a bulk scan is able to roll them up.
1049 if (radix
< HAMMER2_FREEMAP_BLOCK_RADIX
) {
1051 if (how
== HAMMER2_FREEMAP_DOREALFREE
)
1052 how
= HAMMER2_FREEMAP_DOMAYFREE
;
1054 count
= 1 << (radix
- HAMMER2_FREEMAP_BLOCK_RADIX
);
1058 * [re]load the bmap and bitmap pointers. Each bmap entry covers
1059 * a 4MB swath. The bmap itself (LEVEL1) covers 2GB.
1061 * Be sure to reset the linear iterator to ensure that the adjustment
1065 bmap
= &chain
->data
->bmdata
[(int)(data_off
>> HAMMER2_SEGRADIX
) &
1066 (HAMMER2_FREEMAP_COUNT
- 1)];
1067 bitmap
= &bmap
->bitmapq
[(int)(data_off
>> (HAMMER2_SEGRADIX
- 3)) & 7];
1074 if (how
== HAMMER2_FREEMAP_DORECOVER
) {
1076 * Recovery request, mark as allocated.
1078 if ((*bitmap
& bmmask11
) != bmmask11
) {
1079 if (modified
== 0) {
1080 hammer2_chain_modify(chain
, mtid
, 0, 0);
1084 if ((*bitmap
& bmmask11
) == bmmask00
) {
1086 HAMMER2_FREEMAP_BLOCK_SIZE
;
1087 bgsize
+= HAMMER2_FREEMAP_BLOCK_SIZE
;
1089 if (bmap
->class == 0)
1090 bmap
->class = class;
1091 *bitmap
|= bmmask11
;
1092 if (hammer2_debug
& 0x0040) {
1093 kprintf("hammer2_freemap_recover: "
1095 "block=%016jx/%zd\n",
1096 bref
->type
, data_off
, bytes
);
1100 kprintf("hammer2_freemap_recover: good "
1101 "type=%02x block=%016jx/%zd\n",
1102 bref->type, data_off, bytes);
1108 * XXX this stuff doesn't work, avail is miscalculated and
1109 * code 10 means something else now.
1111 else if ((*bitmap
& bmmask11
) == bmmask11
) {
1113 * Mayfree/Realfree request and bitmap is currently
1114 * marked as being fully allocated.
1117 hammer2_chain_modify(chain
, 0);
1121 if (how
== HAMMER2_FREEMAP_DOREALFREE
)
1122 *bitmap
&= ~bmmask11
;
1124 *bitmap
= (*bitmap
& ~bmmask11
) | bmmask10
;
1125 } else if ((*bitmap
& bmmask11
) == bmmask10
) {
1127 * Mayfree/Realfree request and bitmap is currently
1128 * marked as being possibly freeable.
1130 if (how
== HAMMER2_FREEMAP_DOREALFREE
) {
1132 hammer2_chain_modify(chain
, 0);
1136 *bitmap
&= ~bmmask11
;
1140 * 01 - Not implemented, currently illegal state
1141 * 00 - Not allocated at all, illegal free.
1143 panic("hammer2_freemap_adjust: "
1144 "Illegal state %08x(%08x)",
1145 *bitmap
, *bitmap
& bmmask11
);
1153 #if HAMMER2_BMAP_ELEMENTS != 8
1154 #error "hammer2_freemap.c: HAMMER2_BMAP_ELEMENTS expected to be 8"
1156 if (how
== HAMMER2_FREEMAP_DOREALFREE
&& modified
) {
1157 bmap
->avail
+= 1 << radix
;
1158 KKASSERT(bmap
->avail
<= HAMMER2_SEGSIZE
);
1159 if (bmap
->avail
== HAMMER2_SEGSIZE
&&
1160 bmap
->bitmapq
[0] == 0 &&
1161 bmap
->bitmapq
[1] == 0 &&
1162 bmap
->bitmapq
[2] == 0 &&
1163 bmap
->bitmapq
[3] == 0 &&
1164 bmap
->bitmapq
[4] == 0 &&
1165 bmap
->bitmapq
[5] == 0 &&
1166 bmap
->bitmapq
[6] == 0 &&
1167 bmap
->bitmapq
[7] == 0) {
1168 key
= H2FMBASE(data_off
, HAMMER2_FREEMAP_LEVEL0_RADIX
);
1169 kprintf("Freeseg %016jx\n", (intmax_t)key
);
1175 * chain->bref.check.freemap.bigmask (XXX)
1177 * Setting bigmask is a hint to the allocation code that there might
1178 * be something allocatable. We also set this in recovery... it
1179 * doesn't hurt and we might want to use the hint for other validation
1180 * operations later on.
1183 chain
->bref
.check
.freemap
.bigmask
|= 1 << radix
;
1185 hammer2_chain_unlock(chain
);
1186 hammer2_chain_drop(chain
);
1188 hammer2_chain_unlock(parent
);
1189 hammer2_chain_drop(parent
);
1192 hammer2_voldata_lock(hmp
);
1193 hammer2_voldata_modify(hmp
);
1194 hmp
->voldata
.allocator_free
-= bgsize
;
1195 hammer2_voldata_unlock(hmp
);
1200 * Validate the freemap, in three stages.
1202 * stage-1 ALLOCATED -> POSSIBLY FREE
1203 * POSSIBLY FREE -> POSSIBLY FREE (type corrected)
1205 * This transitions bitmap entries from ALLOCATED to POSSIBLY FREE.
1206 * The POSSIBLY FREE state does not mean that a block is actually free
1207 * and may be transitioned back to ALLOCATED in stage-2.
1209 * This is typically done during normal filesystem operations when
1210 * something is deleted or a block is replaced.
1212 * This is done by bulkfree in-bulk after a memory-bounded meta-data
1213 * scan to try to determine what might be freeable.
1215 * This can be done unconditionally through a freemap scan when the
1216 * intention is to brute-force recover the proper state of the freemap.
1218 * stage-2 POSSIBLY FREE -> ALLOCATED (scan metadata topology)
1220 * This is done by bulkfree during a meta-data scan to ensure that
1221 * all blocks still actually allocated by the filesystem are marked
1224 * NOTE! Live filesystem transitions to POSSIBLY FREE can occur while
1225 * the bulkfree stage-2 and stage-3 is running. The live filesystem
1226 * will use the alternative POSSIBLY FREE type (2) to prevent
1227 * stage-3 from improperly transitioning unvetted possibly-free
1230 * stage-3 POSSIBLY FREE (type 1) -> FREE (scan freemap)
1232 * This is done by bulkfree to finalize POSSIBLY FREE states.