2 * Copyright (c) 2011-2014 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/fcntl.h>
41 #include <sys/namei.h>
42 #include <sys/mount.h>
43 #include <sys/vnode.h>
44 #include <sys/mountctl.h>
48 #define FREEMAP_DEBUG 0
50 struct hammer2_fiterate
{
56 typedef struct hammer2_fiterate hammer2_fiterate_t
;
58 static int hammer2_freemap_try_alloc(hammer2_chain_t
**parentp
,
59 hammer2_blockref_t
*bref
, int radix
,
60 hammer2_fiterate_t
*iter
, hammer2_tid_t mtid
);
61 static void hammer2_freemap_init(hammer2_dev_t
*hmp
,
62 hammer2_key_t key
, hammer2_chain_t
*chain
);
63 static int hammer2_bmap_alloc(hammer2_dev_t
*hmp
,
64 hammer2_bmap_data_t
*bmap
, uint16_t class,
65 int n
, int radix
, hammer2_key_t
*basep
);
66 static int hammer2_freemap_iterate(hammer2_chain_t
**parentp
,
67 hammer2_chain_t
**chainp
,
68 hammer2_fiterate_t
*iter
);
72 hammer2_freemapradix(int radix
)
78 * Calculate the device offset for the specified FREEMAP_NODE or FREEMAP_LEAF
79 * bref. Return a combined media offset and physical size radix. Freemap
80 * chains use fixed storage offsets in the 4MB reserved area at the
81 * beginning of each 2GB zone
83 * Rotate between four possibilities. Theoretically this means we have three
84 * good freemaps in case of a crash which we can use as a base for the fixup
87 #define H2FMBASE(key, radix) ((key) & ~(((hammer2_off_t)1 << (radix)) - 1))
88 #define H2FMSHIFT(radix) ((hammer2_off_t)1 << (radix))
92 hammer2_freemap_reserve(hammer2_chain_t
*chain
, int radix
)
94 hammer2_blockref_t
*bref
= &chain
->bref
;
101 * Physical allocation size.
103 bytes
= (size_t)1 << radix
;
106 * Calculate block selection index 0..7 of current block. If this
107 * is the first allocation of the block (verses a modification of an
108 * existing block), we use index 0, otherwise we use the next rotating
111 if ((bref
->data_off
& ~HAMMER2_OFF_MASK_RADIX
) == 0) {
114 off
= bref
->data_off
& ~HAMMER2_OFF_MASK_RADIX
&
115 (((hammer2_off_t
)1 <<
116 HAMMER2_FREEMAP_LEVEL1_RADIX
) - 1);
117 off
= off
/ HAMMER2_PBUFSIZE
;
118 KKASSERT(off
>= HAMMER2_ZONE_FREEMAP_00
&&
119 off
< HAMMER2_ZONE_FREEMAP_END
);
120 index
= (int)(off
- HAMMER2_ZONE_FREEMAP_00
) /
121 HAMMER2_ZONE_FREEMAP_INC
;
122 KKASSERT(index
>= 0 && index
< HAMMER2_NFREEMAPS
);
123 if (++index
== HAMMER2_NFREEMAPS
)
128 * Calculate the block offset of the reserved block. This will
129 * point into the 4MB reserved area at the base of the appropriate
130 * 2GB zone, once added to the FREEMAP_x selection above.
132 index_inc
= index
* HAMMER2_ZONE_FREEMAP_INC
;
134 switch(bref
->keybits
) {
135 /* case HAMMER2_FREEMAP_LEVEL6_RADIX: not applicable */
136 case HAMMER2_FREEMAP_LEVEL5_RADIX
: /* 2EB */
137 KKASSERT(bref
->type
== HAMMER2_BREF_TYPE_FREEMAP_NODE
);
138 KKASSERT(bytes
== HAMMER2_FREEMAP_LEVELN_PSIZE
);
139 off
= H2FMBASE(bref
->key
, HAMMER2_FREEMAP_LEVEL5_RADIX
) +
140 (index_inc
+ HAMMER2_ZONE_FREEMAP_00
+
141 HAMMER2_ZONEFM_LEVEL5
) * HAMMER2_PBUFSIZE
;
143 case HAMMER2_FREEMAP_LEVEL4_RADIX
: /* 2EB */
144 KKASSERT(bref
->type
== HAMMER2_BREF_TYPE_FREEMAP_NODE
);
145 KKASSERT(bytes
== HAMMER2_FREEMAP_LEVELN_PSIZE
);
146 off
= H2FMBASE(bref
->key
, HAMMER2_FREEMAP_LEVEL4_RADIX
) +
147 (index_inc
+ HAMMER2_ZONE_FREEMAP_00
+
148 HAMMER2_ZONEFM_LEVEL4
) * HAMMER2_PBUFSIZE
;
150 case HAMMER2_FREEMAP_LEVEL3_RADIX
: /* 2PB */
151 KKASSERT(bref
->type
== HAMMER2_BREF_TYPE_FREEMAP_NODE
);
152 KKASSERT(bytes
== HAMMER2_FREEMAP_LEVELN_PSIZE
);
153 off
= H2FMBASE(bref
->key
, HAMMER2_FREEMAP_LEVEL3_RADIX
) +
154 (index_inc
+ HAMMER2_ZONE_FREEMAP_00
+
155 HAMMER2_ZONEFM_LEVEL3
) * HAMMER2_PBUFSIZE
;
157 case HAMMER2_FREEMAP_LEVEL2_RADIX
: /* 2TB */
158 KKASSERT(bref
->type
== HAMMER2_BREF_TYPE_FREEMAP_NODE
);
159 KKASSERT(bytes
== HAMMER2_FREEMAP_LEVELN_PSIZE
);
160 off
= H2FMBASE(bref
->key
, HAMMER2_FREEMAP_LEVEL2_RADIX
) +
161 (index_inc
+ HAMMER2_ZONE_FREEMAP_00
+
162 HAMMER2_ZONEFM_LEVEL2
) * HAMMER2_PBUFSIZE
;
164 case HAMMER2_FREEMAP_LEVEL1_RADIX
: /* 2GB */
165 KKASSERT(bref
->type
== HAMMER2_BREF_TYPE_FREEMAP_LEAF
);
166 KKASSERT(bytes
== HAMMER2_FREEMAP_LEVELN_PSIZE
);
167 off
= H2FMBASE(bref
->key
, HAMMER2_FREEMAP_LEVEL1_RADIX
) +
168 (index_inc
+ HAMMER2_ZONE_FREEMAP_00
+
169 HAMMER2_ZONEFM_LEVEL1
) * HAMMER2_PBUFSIZE
;
172 panic("freemap: bad radix(2) %p %d\n", bref
, bref
->keybits
);
174 off
= (hammer2_off_t
)-1;
177 bref
->data_off
= off
| radix
;
179 kprintf("FREEMAP BLOCK TYPE %d %016jx/%d DATA_OFF=%016jx\n",
180 bref
->type
, bref
->key
, bref
->keybits
, bref
->data_off
);
186 * Normal freemap allocator
188 * Use available hints to allocate space using the freemap. Create missing
189 * freemap infrastructure on-the-fly as needed (including marking initial
190 * allocations using the iterator as allocated, instantiating new 2GB zones,
191 * and dealing with the end-of-media edge case).
193 * ip and bpref are only used as a heuristic to determine locality of
194 * reference. bref->key may also be used heuristically.
196 * This function is a NOP if bytes is 0.
199 hammer2_freemap_alloc(hammer2_chain_t
*chain
, size_t bytes
)
201 hammer2_dev_t
*hmp
= chain
->hmp
;
202 hammer2_blockref_t
*bref
= &chain
->bref
;
203 hammer2_chain_t
*parent
;
208 hammer2_fiterate_t iter
;
211 * If allocating or downsizing to zero we just get rid of whatever
215 chain
->bref
.data_off
= 0;
219 mtid
= hammer2_trans_sub(hmp
->spmp
);
222 * Validate the allocation size. It must be a power of 2.
224 * For now require that the caller be aware of the minimum
227 radix
= hammer2_getradix(bytes
);
228 KKASSERT((size_t)1 << radix
== bytes
);
230 if (bref
->type
== HAMMER2_BREF_TYPE_FREEMAP_NODE
||
231 bref
->type
== HAMMER2_BREF_TYPE_FREEMAP_LEAF
) {
233 * Freemap blocks themselves are assigned from the reserve
234 * area, not allocated from the freemap.
236 error
= hammer2_freemap_reserve(chain
, radix
);
237 KKASSERT(error
== 0);
242 KKASSERT(bytes
>= HAMMER2_ALLOC_MIN
&& bytes
<= HAMMER2_ALLOC_MAX
);
245 * Calculate the starting point for our allocation search.
247 * Each freemap leaf is dedicated to a specific freemap_radix.
248 * The freemap_radix can be more fine-grained than the device buffer
249 * radix which results in inodes being grouped together in their
250 * own segment, terminal-data (16K or less) and initial indirect
251 * block being grouped together, and then full-indirect and full-data
252 * blocks (64K) being grouped together.
254 * The single most important aspect of this is the inode grouping
255 * because that is what allows 'find' and 'ls' and other filesystem
256 * topology operations to run fast.
259 if (bref
->data_off
& ~HAMMER2_OFF_MASK_RADIX
)
260 bpref
= bref
->data_off
& ~HAMMER2_OFF_MASK_RADIX
;
261 else if (trans
->tmp_bpref
)
262 bpref
= trans
->tmp_bpref
;
263 else if (trans
->tmp_ip
)
264 bpref
= trans
->tmp_ip
->chain
->bref
.data_off
;
268 * Heuristic tracking index. We would like one for each distinct
269 * bref type if possible. heur_freemap[] has room for two classes
270 * for each type. At a minimum we have to break-up our heuristic
271 * by device block sizes.
273 hindex
= hammer2_devblkradix(radix
) - HAMMER2_MINIORADIX
;
274 KKASSERT(hindex
< HAMMER2_FREEMAP_HEUR_NRADIX
);
275 hindex
+= bref
->type
* HAMMER2_FREEMAP_HEUR_NRADIX
;
276 hindex
&= HAMMER2_FREEMAP_HEUR_TYPES
* HAMMER2_FREEMAP_HEUR_NRADIX
- 1;
277 KKASSERT(hindex
< HAMMER2_FREEMAP_HEUR_SIZE
);
279 iter
.bpref
= hmp
->heur_freemap
[hindex
];
282 * Make sure bpref is in-bounds. It's ok if bpref covers a zone's
283 * reserved area, the try code will iterate past it.
285 if (iter
.bpref
> hmp
->voldata
.volu_size
)
286 iter
.bpref
= hmp
->voldata
.volu_size
- 1;
289 * Iterate the freemap looking for free space before and after.
291 parent
= &hmp
->fchain
;
292 hammer2_chain_ref(parent
);
293 hammer2_chain_lock(parent
, HAMMER2_RESOLVE_ALWAYS
);
295 iter
.bnext
= iter
.bpref
;
298 while (error
== EAGAIN
) {
299 error
= hammer2_freemap_try_alloc(&parent
, bref
, radix
,
302 hmp
->heur_freemap
[hindex
] = iter
.bnext
;
303 hammer2_chain_unlock(parent
);
304 hammer2_chain_drop(parent
);
306 KKASSERT(error
== 0);
312 hammer2_freemap_try_alloc(hammer2_chain_t
**parentp
,
313 hammer2_blockref_t
*bref
, int radix
,
314 hammer2_fiterate_t
*iter
, hammer2_tid_t mtid
)
316 hammer2_dev_t
*hmp
= (*parentp
)->hmp
;
317 hammer2_off_t l0size
;
318 hammer2_off_t l1size
;
319 hammer2_off_t l1mask
;
320 hammer2_key_t key_dummy
;
321 hammer2_chain_t
*chain
;
326 int cache_index
= -1;
329 * Calculate the number of bytes being allocated, the number
330 * of contiguous bits of bitmap being allocated, and the bitmap
333 * WARNING! cpu hardware may mask bits == 64 -> 0 and blow up the
336 bytes
= (size_t)1 << radix
;
337 class = (bref
->type
<< 8) | hammer2_devblkradix(radix
);
340 * Lookup the level1 freemap chain, creating and initializing one
341 * if necessary. Intermediate levels will be created automatically
342 * when necessary by hammer2_chain_create().
344 key
= H2FMBASE(iter
->bnext
, HAMMER2_FREEMAP_LEVEL1_RADIX
);
345 l0size
= H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX
);
346 l1size
= H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX
);
349 chain
= hammer2_chain_lookup(parentp
, &key_dummy
, key
, key
+ l1mask
,
351 HAMMER2_LOOKUP_ALWAYS
|
352 HAMMER2_LOOKUP_MATCHIND
);
356 * Create the missing leaf, be sure to initialize
357 * the auxillary freemap tracking information in
358 * the bref.check.freemap structure.
361 kprintf("freemap create L1 @ %016jx bpref %016jx\n",
364 error
= hammer2_chain_create(parentp
, &chain
,
365 hmp
->spmp
, HAMMER2_METH_DEFAULT
,
366 key
, HAMMER2_FREEMAP_LEVEL1_RADIX
,
367 HAMMER2_BREF_TYPE_FREEMAP_LEAF
,
368 HAMMER2_FREEMAP_LEVELN_PSIZE
,
370 KKASSERT(error
== 0);
372 hammer2_chain_modify(chain
, mtid
, 0, 0);
373 bzero(&chain
->data
->bmdata
[0],
374 HAMMER2_FREEMAP_LEVELN_PSIZE
);
375 chain
->bref
.check
.freemap
.bigmask
= (uint32_t)-1;
376 chain
->bref
.check
.freemap
.avail
= l1size
;
377 /* bref.methods should already be inherited */
379 hammer2_freemap_init(hmp
, key
, chain
);
381 } else if (chain
->error
) {
383 * Error during lookup.
385 kprintf("hammer2_freemap_try_alloc: %016jx: error %s\n",
386 (intmax_t)bref
->data_off
,
387 hammer2_error_str(chain
->error
));
389 } else if ((chain
->bref
.check
.freemap
.bigmask
&
390 ((size_t)1 << radix
)) == 0) {
392 * Already flagged as not having enough space
397 * Modify existing chain to setup for adjustment.
399 hammer2_chain_modify(chain
, mtid
, 0, 0);
406 hammer2_bmap_data_t
*bmap
;
407 hammer2_key_t base_key
;
412 KKASSERT(chain
->bref
.type
== HAMMER2_BREF_TYPE_FREEMAP_LEAF
);
413 start
= (int)((iter
->bnext
- key
) >>
414 HAMMER2_FREEMAP_LEVEL0_RADIX
);
415 KKASSERT(start
>= 0 && start
< HAMMER2_FREEMAP_COUNT
);
416 hammer2_chain_modify(chain
, mtid
, 0, 0);
419 for (count
= 0; count
< HAMMER2_FREEMAP_COUNT
; ++count
) {
422 if (start
+ count
>= HAMMER2_FREEMAP_COUNT
&&
428 * Calculate bmap pointer
430 * NOTE: bmap pointer is invalid if n >= FREEMAP_COUNT.
433 bmap
= &chain
->data
->bmdata
[n
];
435 if (n
>= HAMMER2_FREEMAP_COUNT
) {
437 } else if (bmap
->avail
) {
439 } else if (radix
< HAMMER2_FREEMAP_BLOCK_RADIX
&&
440 (bmap
->linear
& HAMMER2_FREEMAP_BLOCK_MASK
)) {
447 (bmap
->class == 0 || bmap
->class == class)) {
448 base_key
= key
+ n
* l0size
;
449 error
= hammer2_bmap_alloc(hmp
, bmap
,
452 if (error
!= ENOSPC
) {
459 * Must recalculate after potentially having called
460 * hammer2_bmap_alloc() above in case chain was
463 * NOTE: bmap pointer is invalid if n < 0.
466 bmap
= &chain
->data
->bmdata
[n
];
469 } else if (bmap
->avail
) {
471 } else if (radix
< HAMMER2_FREEMAP_BLOCK_RADIX
&&
472 (bmap
->linear
& HAMMER2_FREEMAP_BLOCK_MASK
)) {
479 (bmap
->class == 0 || bmap
->class == class)) {
480 base_key
= key
+ n
* l0size
;
481 error
= hammer2_bmap_alloc(hmp
, bmap
,
484 if (error
!= ENOSPC
) {
490 if (error
== ENOSPC
) {
491 chain
->bref
.check
.freemap
.bigmask
&=
492 (uint32_t)~((size_t)1 << radix
);
494 /* XXX also scan down from original count */
499 * Assert validity. Must be beyond the static allocator used
500 * by newfs_hammer2 (and thus also beyond the aux area),
501 * not go past the volume size, and must not be in the
502 * reserved segment area for a zone.
504 KKASSERT(key
>= hmp
->voldata
.allocator_beg
&&
505 key
+ bytes
<= hmp
->voldata
.volu_size
);
506 KKASSERT((key
& HAMMER2_ZONE_MASK64
) >= HAMMER2_ZONE_SEG
);
507 bref
->data_off
= key
| radix
;
509 kprintf("alloc cp=%p %016jx %016jx using %016jx\n",
511 bref
->key
, bref
->data_off
, chain
->bref
.data_off
);
513 } else if (error
== ENOSPC
) {
515 * Return EAGAIN with next iteration in iter->bnext, or
516 * return ENOSPC if the allocation map has been exhausted.
518 error
= hammer2_freemap_iterate(parentp
, &chain
, iter
);
525 hammer2_chain_unlock(chain
);
526 hammer2_chain_drop(chain
);
532 * Allocate (1<<radix) bytes from the bmap whos base data offset is (*basep).
534 * If the linear iterator is mid-block we use it directly (the bitmap should
535 * already be marked allocated), otherwise we search for a block in the bitmap
536 * that fits the allocation request.
538 * A partial bitmap allocation sets the minimum bitmap granularity (16KB)
539 * to fully allocated and adjusts the linear allocator to allow the
540 * remaining space to be allocated.
544 hammer2_bmap_alloc(hammer2_dev_t
*hmp
, hammer2_bmap_data_t
*bmap
,
545 uint16_t class, int n
, int radix
, hammer2_key_t
*basep
)
550 hammer2_bitmap_t bmmask
;
556 * Take into account 2-bits per block when calculating bmradix.
558 size
= (size_t)1 << radix
;
560 if (radix
<= HAMMER2_FREEMAP_BLOCK_RADIX
) {
562 /* (16K) 2 bits per allocation block */
564 bmradix
= (hammer2_bitmap_t
)2 <<
565 (radix
- HAMMER2_FREEMAP_BLOCK_RADIX
);
566 /* (32K-256K) 4, 8, 16, 32 bits per allocation block */
570 * Use the linear iterator to pack small allocations, otherwise
571 * fall-back to finding a free 16KB chunk. The linear iterator
572 * is only valid when *NOT* on a freemap chunking boundary (16KB).
573 * If it is the bitmap must be scanned. It can become invalid
574 * once we pack to the boundary. We adjust it after a bitmap
575 * allocation only for sub-16KB allocations (so the perfectly good
576 * previous value can still be used for fragments when 16KB+
577 * allocations are made).
579 * Beware of hardware artifacts when bmradix == 64 (intermediate
580 * result can wind up being '1' instead of '0' if hardware masks
583 * NOTE: j needs to be even in the j= calculation. As an artifact
584 * of the /2 division, our bitmask has to clear bit 0.
586 * NOTE: TODO this can leave little unallocatable fragments lying
589 if (((uint32_t)bmap
->linear
& HAMMER2_FREEMAP_BLOCK_MASK
) + size
<=
590 HAMMER2_FREEMAP_BLOCK_SIZE
&&
591 (bmap
->linear
& HAMMER2_FREEMAP_BLOCK_MASK
) &&
592 bmap
->linear
< HAMMER2_SEGSIZE
) {
593 KKASSERT(bmap
->linear
>= 0 &&
594 bmap
->linear
+ size
<= HAMMER2_SEGSIZE
&&
595 (bmap
->linear
& (HAMMER2_ALLOC_MIN
- 1)) == 0);
596 offset
= bmap
->linear
;
597 i
= offset
/ (HAMMER2_SEGSIZE
/ 8);
598 j
= (offset
/ (HAMMER2_FREEMAP_BLOCK_SIZE
/ 2)) & 30;
599 bmmask
= (bmradix
== HAMMER2_BMAP_BITS_PER_ELEMENT
) ?
600 HAMMER2_BMAP_ALLONES
:
601 ((hammer2_bitmap_t
)1 << bmradix
) - 1;
603 bmap
->linear
= offset
+ size
;
605 for (i
= 0; i
< HAMMER2_BMAP_ELEMENTS
; ++i
) {
606 bmmask
= (bmradix
== HAMMER2_BMAP_BITS_PER_ELEMENT
) ?
607 HAMMER2_BMAP_ALLONES
:
608 ((hammer2_bitmap_t
)1 << bmradix
) - 1;
610 j
< HAMMER2_BMAP_BITS_PER_ELEMENT
;
612 if ((bmap
->bitmapq
[i
] & bmmask
) == 0)
617 /*fragments might remain*/
618 /*KKASSERT(bmap->avail == 0);*/
621 offset
= i
* (HAMMER2_SEGSIZE
/ HAMMER2_BMAP_ELEMENTS
) +
622 (j
* (HAMMER2_FREEMAP_BLOCK_SIZE
/ 2));
623 if (size
& HAMMER2_FREEMAP_BLOCK_MASK
)
624 bmap
->linear
= offset
+ size
;
627 /* 8 x (64/2) -> 256 x 16K -> 4MB */
628 KKASSERT(i
>= 0 && i
< HAMMER2_BMAP_ELEMENTS
);
631 * Optimize the buffer cache to avoid unnecessary read-before-write
634 * The device block size could be larger than the allocation size
635 * so the actual bitmap test is somewhat more involved. We have
636 * to use a compatible buffer size for this operation.
638 if ((bmap
->bitmapq
[i
] & bmmask
) == 0 &&
639 hammer2_devblksize(size
) != size
) {
640 size_t psize
= hammer2_devblksize(size
);
641 hammer2_off_t pmask
= (hammer2_off_t
)psize
- 1;
642 int pbmradix
= (hammer2_bitmap_t
)2 <<
643 (hammer2_devblkradix(radix
) -
644 HAMMER2_FREEMAP_BLOCK_RADIX
);
645 hammer2_bitmap_t pbmmask
;
646 int pradix
= hammer2_getradix(psize
);
648 pbmmask
= (pbmradix
== HAMMER2_BMAP_BITS_PER_ELEMENT
) ?
649 HAMMER2_BMAP_ALLONES
:
650 ((hammer2_bitmap_t
)1 << pbmradix
) - 1;
651 while ((pbmmask
& bmmask
) == 0)
652 pbmmask
<<= pbmradix
;
655 kprintf("%016jx mask %016jx %016jx %016jx (%zd/%zd)\n",
656 *basep
+ offset
, bmap
->bitmapq
[i
],
657 pbmmask
, bmmask
, size
, psize
);
660 if ((bmap
->bitmapq
[i
] & pbmmask
) == 0) {
661 hammer2_io_newq(hmp
, HAMMER2_BREF_TYPE_FREEMAP_LEAF
,
662 (*basep
+ (offset
& ~pmask
)) |
669 * When initializing a new inode segment also attempt to initialize
670 * an adjacent segment. Be careful not to index beyond the array
673 * We do this to try to localize inode accesses to improve
674 * directory scan rates. XXX doesn't improve scan rates.
676 if (size
== HAMMER2_INODE_BYTES
) {
678 if (bmap
[-1].radix
== 0 && bmap
[-1].avail
)
679 bmap
[-1].radix
= radix
;
681 if (bmap
[1].radix
== 0 && bmap
[1].avail
)
682 bmap
[1].radix
= radix
;
687 * Calculate the bitmap-granular change in bgsize for the volume
688 * header. We cannot use the fine-grained change here because
689 * the bulkfree code can't undo it. If the bitmap element is already
690 * marked allocated it has already been accounted for.
692 if (radix
< HAMMER2_FREEMAP_BLOCK_RADIX
) {
693 if (bmap
->bitmapq
[i
] & bmmask
)
696 bgsize
= HAMMER2_FREEMAP_BLOCK_SIZE
;
702 * Adjust the bitmap, set the class (it might have been 0),
703 * and available bytes, update the allocation offset (*basep)
704 * from the L0 base to the actual offset.
706 * avail must reflect the bitmap-granular availability. The allocator
707 * tests will also check the linear iterator.
709 bmap
->bitmapq
[i
] |= bmmask
;
711 bmap
->avail
-= bgsize
;
715 * Adjust the volume header's allocator_free parameter. This
716 * parameter has to be fixed up by bulkfree which has no way to
717 * figure out sub-16K chunking, so it must be adjusted by the
718 * bitmap-granular size.
721 hammer2_voldata_lock(hmp
);
722 hammer2_voldata_modify(hmp
);
723 hmp
->voldata
.allocator_free
-= bgsize
;
724 hammer2_voldata_unlock(hmp
);
732 hammer2_freemap_init(hammer2_dev_t
*hmp
, hammer2_key_t key
,
733 hammer2_chain_t
*chain
)
735 hammer2_off_t l1size
;
738 hammer2_bmap_data_t
*bmap
;
741 l1size
= H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX
);
744 * Calculate the portion of the 2GB map that should be initialized
745 * as free. Portions below or after will be initialized as allocated.
746 * SEGMASK-align the areas so we don't have to worry about sub-scans
747 * or endianess when using memset.
749 * (1) Ensure that all statically allocated space from newfs_hammer2
750 * is marked allocated.
752 * (2) Ensure that the reserved area is marked allocated (typically
753 * the first 4MB of the 2GB area being represented).
755 * (3) Ensure that any trailing space at the end-of-volume is marked
758 * WARNING! It is possible for lokey to be larger than hikey if the
759 * entire 2GB segment is within the static allocation.
761 lokey
= (hmp
->voldata
.allocator_beg
+ HAMMER2_SEGMASK64
) &
764 if (lokey
< H2FMBASE(key
, HAMMER2_FREEMAP_LEVEL1_RADIX
) +
765 HAMMER2_ZONE_SEG64
) {
766 lokey
= H2FMBASE(key
, HAMMER2_FREEMAP_LEVEL1_RADIX
) +
770 hikey
= key
+ H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX
);
771 if (hikey
> hmp
->voldata
.volu_size
) {
772 hikey
= hmp
->voldata
.volu_size
& ~HAMMER2_SEGMASK64
;
775 chain
->bref
.check
.freemap
.avail
=
776 H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX
);
777 bmap
= &chain
->data
->bmdata
[0];
779 for (count
= 0; count
< HAMMER2_FREEMAP_COUNT
; ++count
) {
780 if (key
< lokey
|| key
>= hikey
) {
781 memset(bmap
->bitmapq
, -1,
782 sizeof(bmap
->bitmapq
));
784 bmap
->linear
= HAMMER2_SEGSIZE
;
785 chain
->bref
.check
.freemap
.avail
-=
786 H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX
);
788 bmap
->avail
= H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX
);
790 key
+= H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX
);
796 * The current Level 1 freemap has been exhausted, iterate to the next
797 * one, return ENOSPC if no freemaps remain.
799 * XXX this should rotate back to the beginning to handle freed-up space
800 * XXX or use intermediate entries to locate free space. TODO
803 hammer2_freemap_iterate(hammer2_chain_t
**parentp
, hammer2_chain_t
**chainp
,
804 hammer2_fiterate_t
*iter
)
806 hammer2_dev_t
*hmp
= (*parentp
)->hmp
;
808 iter
->bnext
&= ~(H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX
) - 1);
809 iter
->bnext
+= H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX
);
810 if (iter
->bnext
>= hmp
->voldata
.volu_size
) {
812 if (++iter
->loops
== 2)
819 * Adjust the bit-pattern for data in the freemap bitmap according to
820 * (how). This code is called from on-mount recovery to fixup (mark
821 * as allocated) blocks whos freemap upates might not have been committed
822 * in the last crash and is used by the bulk freemap scan to stage frees.
824 * WARNING! Cannot be called with a empty-data bref (radix == 0).
826 * XXX currently disabled when how == 0 (the normal real-time case). At
827 * the moment we depend on the bulk freescan to actually free blocks. It
828 * will still call this routine with a non-zero how to stage possible frees
829 * and to do the actual free.
832 hammer2_freemap_adjust(hammer2_dev_t
*hmp
, hammer2_blockref_t
*bref
,
835 hammer2_off_t data_off
= bref
->data_off
;
836 hammer2_chain_t
*chain
;
837 hammer2_chain_t
*parent
;
838 hammer2_bmap_data_t
*bmap
;
840 hammer2_key_t key_dummy
;
841 hammer2_off_t l0size
;
842 hammer2_off_t l1size
;
843 hammer2_off_t l1mask
;
845 hammer2_bitmap_t
*bitmap
;
846 const hammer2_bitmap_t bmmask00
= 0;
847 hammer2_bitmap_t bmmask01
;
848 hammer2_bitmap_t bmmask10
;
849 hammer2_bitmap_t bmmask11
;
856 int cache_index
= -1;
860 KKASSERT(how
== HAMMER2_FREEMAP_DORECOVER
);
862 mtid
= hammer2_trans_sub(hmp
->spmp
);
864 radix
= (int)data_off
& HAMMER2_OFF_MASK_RADIX
;
865 KKASSERT(radix
!= 0);
866 data_off
&= ~HAMMER2_OFF_MASK_RADIX
;
867 KKASSERT(radix
<= HAMMER2_RADIX_MAX
);
870 bytes
= (size_t)1 << radix
;
873 class = (bref
->type
<< 8) | hammer2_devblkradix(radix
);
876 * We can't adjust the freemap for data allocations made by
879 if (data_off
< hmp
->voldata
.allocator_beg
)
882 KKASSERT((data_off
& HAMMER2_ZONE_MASK64
) >= HAMMER2_ZONE_SEG
);
885 * Lookup the level1 freemap chain. The chain must exist.
887 key
= H2FMBASE(data_off
, HAMMER2_FREEMAP_LEVEL1_RADIX
);
888 l0size
= H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX
);
889 l1size
= H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX
);
892 parent
= &hmp
->fchain
;
893 hammer2_chain_ref(parent
);
894 hammer2_chain_lock(parent
, HAMMER2_RESOLVE_ALWAYS
);
896 chain
= hammer2_chain_lookup(&parent
, &key_dummy
, key
, key
+ l1mask
,
898 HAMMER2_LOOKUP_ALWAYS
|
899 HAMMER2_LOOKUP_MATCHIND
);
902 * Stop early if we are trying to free something but no leaf exists.
904 if (chain
== NULL
&& how
!= HAMMER2_FREEMAP_DORECOVER
) {
905 kprintf("hammer2_freemap_adjust: %016jx: no chain\n",
906 (intmax_t)bref
->data_off
);
910 kprintf("hammer2_freemap_adjust: %016jx: error %s\n",
911 (intmax_t)bref
->data_off
,
912 hammer2_error_str(chain
->error
));
913 hammer2_chain_unlock(chain
);
914 hammer2_chain_drop(chain
);
920 * Create any missing leaf(s) if we are doing a recovery (marking
921 * the block(s) as being allocated instead of being freed). Be sure
922 * to initialize the auxillary freemap tracking info in the
923 * bref.check.freemap structure.
925 if (chain
== NULL
&& how
== HAMMER2_FREEMAP_DORECOVER
) {
926 error
= hammer2_chain_create(&parent
, &chain
,
927 hmp
->spmp
, HAMMER2_METH_DEFAULT
,
928 key
, HAMMER2_FREEMAP_LEVEL1_RADIX
,
929 HAMMER2_BREF_TYPE_FREEMAP_LEAF
,
930 HAMMER2_FREEMAP_LEVELN_PSIZE
,
933 if (hammer2_debug
& 0x0040) {
934 kprintf("fixup create chain %p %016jx:%d\n",
935 chain
, chain
->bref
.key
, chain
->bref
.keybits
);
939 hammer2_chain_modify(chain
, mtid
, 0, 0);
940 bzero(&chain
->data
->bmdata
[0],
941 HAMMER2_FREEMAP_LEVELN_PSIZE
);
942 chain
->bref
.check
.freemap
.bigmask
= (uint32_t)-1;
943 chain
->bref
.check
.freemap
.avail
= l1size
;
944 /* bref.methods should already be inherited */
946 hammer2_freemap_init(hmp
, key
, chain
);
948 /* XXX handle error */
952 kprintf("FREEMAP ADJUST TYPE %d %016jx/%d DATA_OFF=%016jx\n",
953 chain
->bref
.type
, chain
->bref
.key
,
954 chain
->bref
.keybits
, chain
->bref
.data_off
);
958 * Calculate the bitmask (runs in 2-bit pairs).
960 start
= ((int)(data_off
>> HAMMER2_FREEMAP_BLOCK_RADIX
) & 15) * 2;
961 bmmask01
= (hammer2_bitmap_t
)1 << start
;
962 bmmask10
= (hammer2_bitmap_t
)2 << start
;
963 bmmask11
= (hammer2_bitmap_t
)3 << start
;
966 * Fixup the bitmap. Partial blocks cannot be fully freed unless
967 * a bulk scan is able to roll them up.
969 if (radix
< HAMMER2_FREEMAP_BLOCK_RADIX
) {
971 if (how
== HAMMER2_FREEMAP_DOREALFREE
)
972 how
= HAMMER2_FREEMAP_DOMAYFREE
;
974 count
= 1 << (radix
- HAMMER2_FREEMAP_BLOCK_RADIX
);
978 * [re]load the bmap and bitmap pointers. Each bmap entry covers
979 * a 2MB swath. The bmap itself (LEVEL1) covers 2GB.
981 * Be sure to reset the linear iterator to ensure that the adjustment
985 bmap
= &chain
->data
->bmdata
[(int)(data_off
>> HAMMER2_SEGRADIX
) &
986 (HAMMER2_FREEMAP_COUNT
- 1)];
987 bitmap
= &bmap
->bitmapq
[(int)(data_off
>> (HAMMER2_SEGRADIX
- 3)) & 7];
994 if (how
== HAMMER2_FREEMAP_DORECOVER
) {
996 * Recovery request, mark as allocated.
998 if ((*bitmap
& bmmask11
) != bmmask11
) {
1000 hammer2_chain_modify(chain
, mtid
, 0, 0);
1004 if ((*bitmap
& bmmask11
) == bmmask00
) {
1006 HAMMER2_FREEMAP_BLOCK_SIZE
;
1007 bgsize
+= HAMMER2_FREEMAP_BLOCK_SIZE
;
1009 if (bmap
->class == 0)
1010 bmap
->class = class;
1011 *bitmap
|= bmmask11
;
1012 if (hammer2_debug
& 0x0040) {
1013 kprintf("hammer2_freemap_recover: "
1015 "block=%016jx/%zd\n",
1016 bref
->type
, data_off
, bytes
);
1020 kprintf("hammer2_freemap_recover: good "
1021 "type=%02x block=%016jx/%zd\n",
1022 bref->type, data_off, bytes);
1028 * XXX this stuff doesn't work, avail is miscalculated and
1029 * code 10 means something else now.
1031 else if ((*bitmap
& bmmask11
) == bmmask11
) {
1033 * Mayfree/Realfree request and bitmap is currently
1034 * marked as being fully allocated.
1037 hammer2_chain_modify(chain
, 0);
1041 if (how
== HAMMER2_FREEMAP_DOREALFREE
)
1042 *bitmap
&= ~bmmask11
;
1044 *bitmap
= (*bitmap
& ~bmmask11
) | bmmask10
;
1045 } else if ((*bitmap
& bmmask11
) == bmmask10
) {
1047 * Mayfree/Realfree request and bitmap is currently
1048 * marked as being possibly freeable.
1050 if (how
== HAMMER2_FREEMAP_DOREALFREE
) {
1052 hammer2_chain_modify(chain
, 0);
1056 *bitmap
&= ~bmmask11
;
1060 * 01 - Not implemented, currently illegal state
1061 * 00 - Not allocated at all, illegal free.
1063 panic("hammer2_freemap_adjust: "
1064 "Illegal state %08x(%08x)",
1065 *bitmap
, *bitmap
& bmmask11
);
1073 #if HAMMER2_BMAP_ELEMENTS != 8
1074 #error "hammer2_freemap.c: HAMMER2_BMAP_ELEMENTS expected to be 8"
1076 if (how
== HAMMER2_FREEMAP_DOREALFREE
&& modified
) {
1077 bmap
->avail
+= 1 << radix
;
1078 KKASSERT(bmap
->avail
<= HAMMER2_SEGSIZE
);
1079 if (bmap
->avail
== HAMMER2_SEGSIZE
&&
1080 bmap
->bitmapq
[0] == 0 &&
1081 bmap
->bitmapq
[1] == 0 &&
1082 bmap
->bitmapq
[2] == 0 &&
1083 bmap
->bitmapq
[3] == 0 &&
1084 bmap
->bitmapq
[4] == 0 &&
1085 bmap
->bitmapq
[5] == 0 &&
1086 bmap
->bitmapq
[6] == 0 &&
1087 bmap
->bitmapq
[7] == 0) {
1088 key
= H2FMBASE(data_off
, HAMMER2_FREEMAP_LEVEL0_RADIX
);
1089 kprintf("Freeseg %016jx\n", (intmax_t)key
);
1095 * chain->bref.check.freemap.bigmask (XXX)
1097 * Setting bigmask is a hint to the allocation code that there might
1098 * be something allocatable. We also set this in recovery... it
1099 * doesn't hurt and we might want to use the hint for other validation
1100 * operations later on.
1103 chain
->bref
.check
.freemap
.bigmask
|= 1 << radix
;
1105 hammer2_chain_unlock(chain
);
1106 hammer2_chain_drop(chain
);
1108 hammer2_chain_unlock(parent
);
1109 hammer2_chain_drop(parent
);
1112 hammer2_voldata_lock(hmp
);
1113 hammer2_voldata_modify(hmp
);
1114 hmp
->voldata
.allocator_free
-= bgsize
;
1115 hammer2_voldata_unlock(hmp
);
1120 * Validate the freemap, in three stages.
1122 * stage-1 ALLOCATED -> POSSIBLY FREE
1123 * POSSIBLY FREE -> POSSIBLY FREE (type corrected)
1125 * This transitions bitmap entries from ALLOCATED to POSSIBLY FREE.
1126 * The POSSIBLY FREE state does not mean that a block is actually free
1127 * and may be transitioned back to ALLOCATED in stage-2.
1129 * This is typically done during normal filesystem operations when
1130 * something is deleted or a block is replaced.
1132 * This is done by bulkfree in-bulk after a memory-bounded meta-data
1133 * scan to try to determine what might be freeable.
1135 * This can be done unconditionally through a freemap scan when the
1136 * intention is to brute-force recover the proper state of the freemap.
1138 * stage-2 POSSIBLY FREE -> ALLOCATED (scan metadata topology)
1140 * This is done by bulkfree during a meta-data scan to ensure that
1141 * all blocks still actually allocated by the filesystem are marked
1144 * NOTE! Live filesystem transitions to POSSIBLY FREE can occur while
1145 * the bulkfree stage-2 and stage-3 is running. The live filesystem
1146 * will use the alternative POSSIBLY FREE type (2) to prevent
1147 * stage-3 from improperly transitioning unvetted possibly-free
1150 * stage-3 POSSIBLY FREE (type 1) -> FREE (scan freemap)
1152 * This is done by bulkfree to finalize POSSIBLY FREE states.