2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 #include <vm/vm_page2.h>
42 static int hammer_res_rb_compare(hammer_reserve_t res1
, hammer_reserve_t res2
);
43 static void hammer_reserve_setdelay_offset(hammer_mount_t hmp
,
44 hammer_off_t base_offset
, int zone
,
45 hammer_blockmap_layer2_t layer2
);
46 static void hammer_reserve_setdelay(hammer_mount_t hmp
, hammer_reserve_t resv
);
47 static int update_bytes_free(hammer_reserve_t resv
, int bytes
);
48 static int hammer_check_volume(hammer_mount_t
, hammer_off_t
*);
49 static void hammer_skip_volume(hammer_off_t
*offsetp
);
52 * Reserved big-blocks red-black tree support
54 RB_GENERATE2(hammer_res_rb_tree
, hammer_reserve
, rb_node
,
55 hammer_res_rb_compare
, hammer_off_t
, zone_offset
);
58 hammer_res_rb_compare(hammer_reserve_t res1
, hammer_reserve_t res2
)
60 if (res1
->zone_offset
< res2
->zone_offset
)
62 if (res1
->zone_offset
> res2
->zone_offset
)
68 * Allocate bytes from a zone
71 hammer_blockmap_alloc(hammer_transaction_t trans
, int zone
, int bytes
,
72 hammer_off_t hint
, int *errorp
)
75 hammer_volume_t root_volume
;
76 hammer_blockmap_t blockmap
;
77 hammer_blockmap_t freemap
;
78 hammer_reserve_t resv
;
79 hammer_blockmap_layer1_t layer1
;
80 hammer_blockmap_layer2_t layer2
;
81 hammer_buffer_t buffer1
= NULL
;
82 hammer_buffer_t buffer2
= NULL
;
83 hammer_buffer_t buffer3
= NULL
;
84 hammer_off_t tmp_offset
;
85 hammer_off_t next_offset
;
86 hammer_off_t result_offset
;
87 hammer_off_t layer1_offset
;
88 hammer_off_t layer2_offset
;
89 hammer_off_t base_off
;
91 int offset
; /* offset within big-block */
97 * Deal with alignment and buffer-boundary issues.
99 * Be careful, certain primary alignments are used below to allocate
100 * new blockmap blocks.
102 bytes
= HAMMER_DATA_DOALIGN(bytes
);
103 KKASSERT(bytes
> 0 && bytes
<= HAMMER_XBUFSIZE
);
104 KKASSERT(hammer_is_index_record(zone
));
109 root_volume
= trans
->rootvol
;
111 blockmap
= &hmp
->blockmap
[zone
];
112 freemap
= &hmp
->blockmap
[HAMMER_ZONE_FREEMAP_INDEX
];
113 KKASSERT(HAMMER_ZONE_DECODE(blockmap
->next_offset
) == zone
);
116 * Use the hint if we have one.
118 if (hint
&& HAMMER_ZONE_DECODE(hint
) == zone
) {
119 next_offset
= HAMMER_DATA_DOALIGN_WITH(hammer_off_t
, hint
);
122 next_offset
= blockmap
->next_offset
;
128 * use_hint is turned off if we leave the hinted big-block.
130 if (use_hint
&& ((next_offset
^ hint
) & ~HAMMER_HINTBLOCK_MASK64
)) {
131 next_offset
= blockmap
->next_offset
;
138 if (next_offset
== HAMMER_ZONE_ENCODE(zone
+ 1, 0)) {
140 hmkprintf(hmp
, "No space left for zone %d "
141 "allocation\n", zone
);
146 next_offset
= HAMMER_ZONE_ENCODE(zone
, 0);
150 * The allocation request may not cross a buffer boundary. Special
151 * large allocations must not cross a big-block boundary.
153 tmp_offset
= next_offset
+ bytes
- 1;
154 if (bytes
<= HAMMER_BUFSIZE
) {
155 if ((next_offset
^ tmp_offset
) & ~HAMMER_BUFMASK64
) {
156 next_offset
= tmp_offset
& ~HAMMER_BUFMASK64
;
160 if ((next_offset
^ tmp_offset
) & ~HAMMER_BIGBLOCK_MASK64
) {
161 next_offset
= tmp_offset
& ~HAMMER_BIGBLOCK_MASK64
;
165 offset
= (int)next_offset
& HAMMER_BIGBLOCK_MASK
;
170 layer1_offset
= freemap
->phys_offset
+
171 HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset
);
173 layer1
= hammer_bread(hmp
, layer1_offset
, errorp
, &buffer1
);
182 if (!hammer_crc_test_layer1(layer1
)) {
183 hammer_lock_ex(&hmp
->blkmap_lock
);
184 if (!hammer_crc_test_layer1(layer1
))
185 hpanic("CRC FAILED: LAYER1");
186 hammer_unlock(&hmp
->blkmap_lock
);
190 * If we are at a big-block boundary and layer1 indicates no
191 * free big-blocks, then we cannot allocate a new big-block in
192 * layer2, skip to the next layer1 entry.
194 if (offset
== 0 && layer1
->blocks_free
== 0) {
195 next_offset
= HAMMER_ZONE_LAYER1_NEXT_OFFSET(next_offset
);
196 if (hammer_check_volume(hmp
, &next_offset
)) {
202 KKASSERT(layer1
->phys_offset
!= HAMMER_BLOCKMAP_UNAVAIL
);
205 * Skip the whole volume if it is pointing to a layer2 big-block
206 * on a volume that we are currently trying to remove from the
207 * file-system. This is used by the volume-del code together with
208 * the reblocker to free up a volume.
210 if (HAMMER_VOL_DECODE(layer1
->phys_offset
) == hmp
->volume_to_remove
) {
211 hammer_skip_volume(&next_offset
);
216 * Dive layer 2, each entry represents a big-block.
218 layer2_offset
= layer1
->phys_offset
+
219 HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset
);
220 layer2
= hammer_bread(hmp
, layer2_offset
, errorp
, &buffer2
);
227 * Check CRC. This can race another thread holding the lock
228 * and in the middle of modifying layer2.
230 if (!hammer_crc_test_layer2(layer2
)) {
231 hammer_lock_ex(&hmp
->blkmap_lock
);
232 if (!hammer_crc_test_layer2(layer2
))
233 hpanic("CRC FAILED: LAYER2");
234 hammer_unlock(&hmp
->blkmap_lock
);
238 * Skip the layer if the zone is owned by someone other then us.
240 if (layer2
->zone
&& layer2
->zone
!= zone
) {
241 next_offset
+= (HAMMER_BIGBLOCK_SIZE
- offset
);
244 if (offset
< layer2
->append_off
) {
245 next_offset
+= layer2
->append_off
- offset
;
251 * If operating in the current non-hint blockmap block, do not
252 * allow it to get over-full. Also drop any active hinting so
253 * blockmap->next_offset is updated at the end.
255 * We do this for B-Tree and meta-data allocations to provide
256 * localization for updates.
258 if ((zone
== HAMMER_ZONE_BTREE_INDEX
||
259 zone
== HAMMER_ZONE_META_INDEX
) &&
260 offset
>= HAMMER_BIGBLOCK_OVERFILL
&&
261 !((next_offset
^ blockmap
->next_offset
) & ~HAMMER_BIGBLOCK_MASK64
)) {
262 if (offset
>= HAMMER_BIGBLOCK_OVERFILL
) {
263 next_offset
+= (HAMMER_BIGBLOCK_SIZE
- offset
);
271 * We need the lock from this point on. We have to re-check zone
272 * ownership after acquiring the lock and also check for reservations.
274 hammer_lock_ex(&hmp
->blkmap_lock
);
276 if (layer2
->zone
&& layer2
->zone
!= zone
) {
277 hammer_unlock(&hmp
->blkmap_lock
);
278 next_offset
+= (HAMMER_BIGBLOCK_SIZE
- offset
);
281 if (offset
< layer2
->append_off
) {
282 hammer_unlock(&hmp
->blkmap_lock
);
283 next_offset
+= layer2
->append_off
- offset
;
288 * The big-block might be reserved by another zone. If it is reserved
289 * by our zone we may have to move next_offset past the append_off.
291 base_off
= hammer_xlate_to_zone2(next_offset
& ~HAMMER_BIGBLOCK_MASK64
);
292 resv
= RB_LOOKUP(hammer_res_rb_tree
, &hmp
->rb_resv_root
, base_off
);
294 if (resv
->zone
!= zone
) {
295 hammer_unlock(&hmp
->blkmap_lock
);
296 next_offset
= HAMMER_ZONE_LAYER2_NEXT_OFFSET(next_offset
);
299 if (offset
< resv
->append_off
) {
300 hammer_unlock(&hmp
->blkmap_lock
);
301 next_offset
+= resv
->append_off
- offset
;
308 * Ok, we can allocate out of this layer2 big-block. Assume ownership
309 * of the layer for real. At this point we've validated any
310 * reservation that might exist and can just ignore resv.
312 if (layer2
->zone
== 0) {
314 * Assign the big-block to our zone
316 hammer_modify_buffer(trans
, buffer1
, layer1
, sizeof(*layer1
));
317 --layer1
->blocks_free
;
318 hammer_crc_set_layer1(layer1
);
319 hammer_modify_buffer_done(buffer1
);
320 hammer_modify_buffer(trans
, buffer2
, layer2
, sizeof(*layer2
));
322 KKASSERT(layer2
->bytes_free
== HAMMER_BIGBLOCK_SIZE
);
323 KKASSERT(layer2
->append_off
== 0);
324 hammer_modify_volume_field(trans
, trans
->rootvol
,
325 vol0_stat_freebigblocks
);
326 --root_volume
->ondisk
->vol0_stat_freebigblocks
;
327 hmp
->copy_stat_freebigblocks
=
328 root_volume
->ondisk
->vol0_stat_freebigblocks
;
329 hammer_modify_volume_done(trans
->rootvol
);
331 hammer_modify_buffer(trans
, buffer2
, layer2
, sizeof(*layer2
));
333 KKASSERT(layer2
->zone
== zone
);
336 * NOTE: bytes_free can legally go negative due to de-dup.
338 layer2
->bytes_free
-= bytes
;
339 KKASSERT(layer2
->append_off
<= offset
);
340 layer2
->append_off
= offset
+ bytes
;
341 hammer_crc_set_layer2(layer2
);
342 hammer_modify_buffer_done(buffer2
);
345 * We hold the blockmap lock and should be the only ones
346 * capable of modifying resv->append_off. Track the allocation
349 KKASSERT(bytes
!= 0);
351 KKASSERT(resv
->append_off
<= offset
);
352 resv
->append_off
= offset
+ bytes
;
353 resv
->flags
&= ~HAMMER_RESF_LAYER2FREE
;
354 hammer_blockmap_reserve_complete(hmp
, resv
);
358 * If we are allocating from the base of a new buffer we can avoid
359 * a disk read by calling hammer_bnew_ext().
361 if ((next_offset
& HAMMER_BUFMASK
) == 0) {
362 hammer_bnew_ext(trans
->hmp
, next_offset
, bytes
,
369 result_offset
= next_offset
;
372 * If we weren't supplied with a hint or could not use the hint
373 * then we wound up using blockmap->next_offset as the hint and
377 hammer_modify_volume_noundo(NULL
, root_volume
);
378 blockmap
->next_offset
= next_offset
+ bytes
;
379 hammer_modify_volume_done(root_volume
);
381 hammer_unlock(&hmp
->blkmap_lock
);
388 hammer_rel_buffer(buffer1
, 0);
390 hammer_rel_buffer(buffer2
, 0);
392 hammer_rel_buffer(buffer3
, 0);
394 return(result_offset
);
398 * Frontend function - Reserve bytes in a zone.
400 * This code reserves bytes out of a blockmap without committing to any
401 * meta-data modifications, allowing the front-end to directly issue disk
402 * write I/O for big-blocks of data
404 * The backend later finalizes the reservation with hammer_blockmap_finalize()
405 * upon committing the related record.
408 hammer_blockmap_reserve(hammer_mount_t hmp
, int zone
, int bytes
,
409 hammer_off_t
*zone_offp
, int *errorp
)
411 hammer_volume_t root_volume
;
412 hammer_blockmap_t blockmap
;
413 hammer_blockmap_t freemap
;
414 hammer_blockmap_layer1_t layer1
;
415 hammer_blockmap_layer2_t layer2
;
416 hammer_buffer_t buffer1
= NULL
;
417 hammer_buffer_t buffer2
= NULL
;
418 hammer_buffer_t buffer3
= NULL
;
419 hammer_off_t tmp_offset
;
420 hammer_off_t next_offset
;
421 hammer_off_t layer1_offset
;
422 hammer_off_t layer2_offset
;
423 hammer_off_t base_off
;
424 hammer_reserve_t resv
;
425 hammer_reserve_t resx
= NULL
;
432 KKASSERT(hammer_is_index_record(zone
));
433 root_volume
= hammer_get_root_volume(hmp
, errorp
);
436 blockmap
= &hmp
->blockmap
[zone
];
437 freemap
= &hmp
->blockmap
[HAMMER_ZONE_FREEMAP_INDEX
];
438 KKASSERT(HAMMER_ZONE_DECODE(blockmap
->next_offset
) == zone
);
441 * Deal with alignment and buffer-boundary issues.
443 * Be careful, certain primary alignments are used below to allocate
444 * new blockmap blocks.
446 bytes
= HAMMER_DATA_DOALIGN(bytes
);
447 KKASSERT(bytes
> 0 && bytes
<= HAMMER_XBUFSIZE
);
449 next_offset
= blockmap
->next_offset
;
455 if (next_offset
== HAMMER_ZONE_ENCODE(zone
+ 1, 0)) {
457 hmkprintf(hmp
, "No space left for zone %d "
458 "reservation\n", zone
);
462 next_offset
= HAMMER_ZONE_ENCODE(zone
, 0);
466 * The allocation request may not cross a buffer boundary. Special
467 * large allocations must not cross a big-block boundary.
469 tmp_offset
= next_offset
+ bytes
- 1;
470 if (bytes
<= HAMMER_BUFSIZE
) {
471 if ((next_offset
^ tmp_offset
) & ~HAMMER_BUFMASK64
) {
472 next_offset
= tmp_offset
& ~HAMMER_BUFMASK64
;
476 if ((next_offset
^ tmp_offset
) & ~HAMMER_BIGBLOCK_MASK64
) {
477 next_offset
= tmp_offset
& ~HAMMER_BIGBLOCK_MASK64
;
481 offset
= (int)next_offset
& HAMMER_BIGBLOCK_MASK
;
486 layer1_offset
= freemap
->phys_offset
+
487 HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset
);
488 layer1
= hammer_bread(hmp
, layer1_offset
, errorp
, &buffer1
);
495 if (!hammer_crc_test_layer1(layer1
)) {
496 hammer_lock_ex(&hmp
->blkmap_lock
);
497 if (!hammer_crc_test_layer1(layer1
))
498 hpanic("CRC FAILED: LAYER1");
499 hammer_unlock(&hmp
->blkmap_lock
);
503 * If we are at a big-block boundary and layer1 indicates no
504 * free big-blocks, then we cannot allocate a new big-block in
505 * layer2, skip to the next layer1 entry.
507 if ((next_offset
& HAMMER_BIGBLOCK_MASK
) == 0 &&
508 layer1
->blocks_free
== 0) {
509 next_offset
= HAMMER_ZONE_LAYER1_NEXT_OFFSET(next_offset
);
510 if (hammer_check_volume(hmp
, &next_offset
))
514 KKASSERT(layer1
->phys_offset
!= HAMMER_BLOCKMAP_UNAVAIL
);
517 * Dive layer 2, each entry represents a big-block.
519 layer2_offset
= layer1
->phys_offset
+
520 HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset
);
521 layer2
= hammer_bread(hmp
, layer2_offset
, errorp
, &buffer2
);
526 * Check CRC if not allocating into uninitialized space (which we
527 * aren't when reserving space).
529 if (!hammer_crc_test_layer2(layer2
)) {
530 hammer_lock_ex(&hmp
->blkmap_lock
);
531 if (!hammer_crc_test_layer2(layer2
))
532 hpanic("CRC FAILED: LAYER2");
533 hammer_unlock(&hmp
->blkmap_lock
);
537 * Skip the layer if the zone is owned by someone other then us.
539 if (layer2
->zone
&& layer2
->zone
!= zone
) {
540 next_offset
+= (HAMMER_BIGBLOCK_SIZE
- offset
);
543 if (offset
< layer2
->append_off
) {
544 next_offset
+= layer2
->append_off
- offset
;
549 * We need the lock from this point on. We have to re-check zone
550 * ownership after acquiring the lock and also check for reservations.
552 hammer_lock_ex(&hmp
->blkmap_lock
);
554 if (layer2
->zone
&& layer2
->zone
!= zone
) {
555 hammer_unlock(&hmp
->blkmap_lock
);
556 next_offset
+= (HAMMER_BIGBLOCK_SIZE
- offset
);
559 if (offset
< layer2
->append_off
) {
560 hammer_unlock(&hmp
->blkmap_lock
);
561 next_offset
+= layer2
->append_off
- offset
;
566 * The big-block might be reserved by another zone. If it is reserved
567 * by our zone we may have to move next_offset past the append_off.
569 base_off
= hammer_xlate_to_zone2(next_offset
& ~HAMMER_BIGBLOCK_MASK64
);
570 resv
= RB_LOOKUP(hammer_res_rb_tree
, &hmp
->rb_resv_root
, base_off
);
572 if (resv
->zone
!= zone
) {
573 hammer_unlock(&hmp
->blkmap_lock
);
574 next_offset
= HAMMER_ZONE_LAYER2_NEXT_OFFSET(next_offset
);
577 if (offset
< resv
->append_off
) {
578 hammer_unlock(&hmp
->blkmap_lock
);
579 next_offset
+= resv
->append_off
- offset
;
584 resx
= kmalloc(sizeof(*resv
), hmp
->m_misc
,
585 M_WAITOK
| M_ZERO
| M_USE_RESERVE
);
588 resx
->zone_offset
= base_off
;
589 if (layer2
->bytes_free
== HAMMER_BIGBLOCK_SIZE
)
590 resx
->flags
|= HAMMER_RESF_LAYER2FREE
;
591 resv
= RB_INSERT(hammer_res_rb_tree
, &hmp
->rb_resv_root
, resx
);
592 KKASSERT(resv
== NULL
);
594 ++hammer_count_reservations
;
596 resv
->append_off
= offset
+ bytes
;
599 * If we are not reserving a whole buffer but are at the start of
600 * a new block, call hammer_bnew() to avoid a disk read.
602 * If we are reserving a whole buffer (or more), the caller will
603 * probably use a direct read, so do nothing.
605 * If we do not have a whole lot of system memory we really can't
606 * afford to block while holding the blkmap_lock!
608 if (bytes
< HAMMER_BUFSIZE
&& (next_offset
& HAMMER_BUFMASK
) == 0) {
609 if (!vm_page_count_min(HAMMER_BUFSIZE
/ PAGE_SIZE
)) {
610 hammer_bnew(hmp
, next_offset
, errorp
, &buffer3
);
616 blockmap
->next_offset
= next_offset
+ bytes
;
617 hammer_unlock(&hmp
->blkmap_lock
);
621 hammer_rel_buffer(buffer1
, 0);
623 hammer_rel_buffer(buffer2
, 0);
625 hammer_rel_buffer(buffer3
, 0);
626 hammer_rel_volume(root_volume
, 0);
627 *zone_offp
= next_offset
;
633 * Frontend function - Dedup bytes in a zone.
635 * Dedup reservations work exactly the same as normal write reservations
636 * except we only adjust bytes_free field and don't touch append offset.
637 * Finalization mechanic for dedup reservations is also the same as for
638 * normal write ones - the backend finalizes the reservation with
639 * hammer_blockmap_finalize().
642 hammer_blockmap_reserve_dedup(hammer_mount_t hmp
, int zone
, int bytes
,
643 hammer_off_t zone_offset
, int *errorp
)
645 hammer_volume_t root_volume
;
646 hammer_blockmap_t freemap
;
647 hammer_blockmap_layer1_t layer1
;
648 hammer_blockmap_layer2_t layer2
;
649 hammer_buffer_t buffer1
= NULL
;
650 hammer_buffer_t buffer2
= NULL
;
651 hammer_off_t layer1_offset
;
652 hammer_off_t layer2_offset
;
653 hammer_off_t base_off
;
654 hammer_reserve_t resv
= NULL
;
655 hammer_reserve_t resx
= NULL
;
660 KKASSERT(hammer_is_index_record(zone
));
661 root_volume
= hammer_get_root_volume(hmp
, errorp
);
664 freemap
= &hmp
->blockmap
[HAMMER_ZONE_FREEMAP_INDEX
];
665 KKASSERT(freemap
->phys_offset
!= 0);
667 bytes
= HAMMER_DATA_DOALIGN(bytes
);
668 KKASSERT(bytes
> 0 && bytes
<= HAMMER_XBUFSIZE
);
673 layer1_offset
= freemap
->phys_offset
+
674 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset
);
675 layer1
= hammer_bread(hmp
, layer1_offset
, errorp
, &buffer1
);
682 if (!hammer_crc_test_layer1(layer1
)) {
683 hammer_lock_ex(&hmp
->blkmap_lock
);
684 if (!hammer_crc_test_layer1(layer1
))
685 hpanic("CRC FAILED: LAYER1");
686 hammer_unlock(&hmp
->blkmap_lock
);
688 KKASSERT(layer1
->phys_offset
!= HAMMER_BLOCKMAP_UNAVAIL
);
691 * Dive layer 2, each entry represents a big-block.
693 layer2_offset
= layer1
->phys_offset
+
694 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset
);
695 layer2
= hammer_bread(hmp
, layer2_offset
, errorp
, &buffer2
);
702 if (!hammer_crc_test_layer2(layer2
)) {
703 hammer_lock_ex(&hmp
->blkmap_lock
);
704 if (!hammer_crc_test_layer2(layer2
))
705 hpanic("CRC FAILED: LAYER2");
706 hammer_unlock(&hmp
->blkmap_lock
);
710 * Fail if the zone is owned by someone other than us.
712 if (layer2
->zone
&& layer2
->zone
!= zone
)
716 * We need the lock from this point on. We have to re-check zone
717 * ownership after acquiring the lock and also check for reservations.
719 hammer_lock_ex(&hmp
->blkmap_lock
);
721 if (layer2
->zone
&& layer2
->zone
!= zone
) {
722 hammer_unlock(&hmp
->blkmap_lock
);
726 base_off
= hammer_xlate_to_zone2(zone_offset
& ~HAMMER_BIGBLOCK_MASK64
);
727 resv
= RB_LOOKUP(hammer_res_rb_tree
, &hmp
->rb_resv_root
, base_off
);
729 if (resv
->zone
!= zone
) {
730 hammer_unlock(&hmp
->blkmap_lock
);
735 * Due to possible big-block underflow we can't simply
736 * subtract bytes from bytes_free.
738 if (update_bytes_free(resv
, bytes
) == 0) {
739 hammer_unlock(&hmp
->blkmap_lock
);
745 resx
= kmalloc(sizeof(*resv
), hmp
->m_misc
,
746 M_WAITOK
| M_ZERO
| M_USE_RESERVE
);
749 resx
->bytes_free
= layer2
->bytes_free
;
751 * Due to possible big-block underflow we can't simply
752 * subtract bytes from bytes_free.
754 if (update_bytes_free(resx
, bytes
) == 0) {
755 hammer_unlock(&hmp
->blkmap_lock
);
756 kfree(resx
, hmp
->m_misc
);
759 resx
->zone_offset
= base_off
;
760 resv
= RB_INSERT(hammer_res_rb_tree
, &hmp
->rb_resv_root
, resx
);
761 KKASSERT(resv
== NULL
);
763 ++hammer_count_reservations
;
766 hammer_unlock(&hmp
->blkmap_lock
);
770 hammer_rel_buffer(buffer1
, 0);
772 hammer_rel_buffer(buffer2
, 0);
773 hammer_rel_volume(root_volume
, 0);
779 update_bytes_free(hammer_reserve_t resv
, int bytes
)
784 * Big-block underflow check
786 temp
= resv
->bytes_free
- HAMMER_BIGBLOCK_SIZE
* 2;
787 cpu_ccfence(); /* XXX do we really need it ? */
788 if (temp
> resv
->bytes_free
) {
789 hdkprintf("BIGBLOCK UNDERFLOW\n");
793 resv
->bytes_free
-= bytes
;
798 * Dereference a reservation structure. Upon the final release the
799 * underlying big-block is checked and if it is entirely free we delete
800 * any related HAMMER buffers to avoid potential conflicts with future
801 * reuse of the big-block.
804 hammer_blockmap_reserve_complete(hammer_mount_t hmp
, hammer_reserve_t resv
)
806 hammer_off_t base_offset
;
809 KKASSERT(resv
->refs
> 0);
810 KKASSERT(hammer_is_zone_raw_buffer(resv
->zone_offset
));
813 * Setting append_off to the max prevents any new allocations
814 * from occuring while we are trying to dispose of the reservation,
815 * allowing us to safely delete any related HAMMER buffers.
817 * If we are unable to clean out all related HAMMER buffers we
820 if (resv
->refs
== 1 && (resv
->flags
& HAMMER_RESF_LAYER2FREE
)) {
821 resv
->append_off
= HAMMER_BIGBLOCK_SIZE
;
822 base_offset
= hammer_xlate_to_zoneX(resv
->zone
, resv
->zone_offset
);
823 if (!TAILQ_EMPTY(&hmp
->dedup_lru_list
))
824 hammer_dedup_cache_inval(hmp
, base_offset
);
825 error
= hammer_del_buffers(hmp
, base_offset
,
827 HAMMER_BIGBLOCK_SIZE
,
829 if (hammer_debug_general
& 0x20000) {
830 hkprintf("delbgblk %016jx error %d\n",
831 (intmax_t)base_offset
, error
);
834 hammer_reserve_setdelay(hmp
, resv
);
836 if (--resv
->refs
== 0) {
837 if (hammer_debug_general
& 0x20000) {
838 hkprintf("delresvr %016jx zone %02x\n",
839 (intmax_t)resv
->zone_offset
, resv
->zone
);
841 KKASSERT((resv
->flags
& HAMMER_RESF_ONDELAY
) == 0);
842 RB_REMOVE(hammer_res_rb_tree
, &hmp
->rb_resv_root
, resv
);
843 kfree(resv
, hmp
->m_misc
);
844 --hammer_count_reservations
;
849 * Prevent a potentially free big-block from being reused until after
850 * the related flushes have completely cycled, otherwise crash recovery
851 * could resurrect a data block that was already reused and overwritten.
853 * The caller might reset the underlying layer2 entry's append_off to 0, so
854 * our covering append_off must be set to max to prevent any reallocation
855 * until after the flush delays complete, not to mention proper invalidation
856 * of any underlying cached blocks.
859 hammer_reserve_setdelay_offset(hammer_mount_t hmp
, hammer_off_t base_offset
,
860 int zone
, hammer_blockmap_layer2_t layer2
)
862 hammer_reserve_t resv
;
865 * Allocate the reservation if necessary.
867 * NOTE: need lock in future around resv lookup/allocation and
868 * the setdelay call, currently refs is not bumped until the call.
871 resv
= RB_LOOKUP(hammer_res_rb_tree
, &hmp
->rb_resv_root
, base_offset
);
873 resv
= kmalloc(sizeof(*resv
), hmp
->m_misc
,
874 M_WAITOK
| M_ZERO
| M_USE_RESERVE
);
876 resv
->zone_offset
= base_offset
;
878 resv
->append_off
= HAMMER_BIGBLOCK_SIZE
;
880 if (layer2
->bytes_free
== HAMMER_BIGBLOCK_SIZE
)
881 resv
->flags
|= HAMMER_RESF_LAYER2FREE
;
882 if (RB_INSERT(hammer_res_rb_tree
, &hmp
->rb_resv_root
, resv
)) {
883 kfree(resv
, hmp
->m_misc
);
886 ++hammer_count_reservations
;
888 if (layer2
->bytes_free
== HAMMER_BIGBLOCK_SIZE
)
889 resv
->flags
|= HAMMER_RESF_LAYER2FREE
;
891 hammer_reserve_setdelay(hmp
, resv
);
895 * Enter the reservation on the on-delay list, or move it if it
896 * is already on the list.
899 hammer_reserve_setdelay(hammer_mount_t hmp
, hammer_reserve_t resv
)
901 if (resv
->flags
& HAMMER_RESF_ONDELAY
) {
902 TAILQ_REMOVE(&hmp
->delay_list
, resv
, delay_entry
);
903 resv
->flg_no
= hmp
->flusher
.next
+ 1;
904 TAILQ_INSERT_TAIL(&hmp
->delay_list
, resv
, delay_entry
);
907 ++hmp
->rsv_fromdelay
;
908 resv
->flags
|= HAMMER_RESF_ONDELAY
;
909 resv
->flg_no
= hmp
->flusher
.next
+ 1;
910 TAILQ_INSERT_TAIL(&hmp
->delay_list
, resv
, delay_entry
);
915 * Reserve has reached its flush point, remove it from the delay list
916 * and finish it off. hammer_blockmap_reserve_complete() inherits
917 * the ondelay reference.
920 hammer_reserve_clrdelay(hammer_mount_t hmp
, hammer_reserve_t resv
)
922 KKASSERT(resv
->flags
& HAMMER_RESF_ONDELAY
);
923 resv
->flags
&= ~HAMMER_RESF_ONDELAY
;
924 TAILQ_REMOVE(&hmp
->delay_list
, resv
, delay_entry
);
925 --hmp
->rsv_fromdelay
;
926 hammer_blockmap_reserve_complete(hmp
, resv
);
930 * Backend function - free (offset, bytes) in a zone.
935 hammer_blockmap_free(hammer_transaction_t trans
,
936 hammer_off_t zone_offset
, int bytes
)
939 hammer_volume_t root_volume
;
940 hammer_blockmap_t freemap
;
941 hammer_blockmap_layer1_t layer1
;
942 hammer_blockmap_layer2_t layer2
;
943 hammer_buffer_t buffer1
= NULL
;
944 hammer_buffer_t buffer2
= NULL
;
945 hammer_off_t layer1_offset
;
946 hammer_off_t layer2_offset
;
947 hammer_off_t base_off
;
958 bytes
= HAMMER_DATA_DOALIGN(bytes
);
959 KKASSERT(bytes
<= HAMMER_XBUFSIZE
);
960 KKASSERT(((zone_offset
^ (zone_offset
+ (bytes
- 1))) &
961 ~HAMMER_BIGBLOCK_MASK64
) == 0);
964 * Basic zone validation & locking
966 zone
= HAMMER_ZONE_DECODE(zone_offset
);
967 KKASSERT(hammer_is_index_record(zone
));
968 root_volume
= trans
->rootvol
;
971 freemap
= &hmp
->blockmap
[HAMMER_ZONE_FREEMAP_INDEX
];
976 layer1_offset
= freemap
->phys_offset
+
977 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset
);
978 layer1
= hammer_bread(hmp
, layer1_offset
, &error
, &buffer1
);
981 KKASSERT(layer1
->phys_offset
&&
982 layer1
->phys_offset
!= HAMMER_BLOCKMAP_UNAVAIL
);
983 if (!hammer_crc_test_layer1(layer1
)) {
984 hammer_lock_ex(&hmp
->blkmap_lock
);
985 if (!hammer_crc_test_layer1(layer1
))
986 hpanic("CRC FAILED: LAYER1");
987 hammer_unlock(&hmp
->blkmap_lock
);
991 * Dive layer 2, each entry represents a big-block.
993 layer2_offset
= layer1
->phys_offset
+
994 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset
);
995 layer2
= hammer_bread(hmp
, layer2_offset
, &error
, &buffer2
);
998 if (!hammer_crc_test_layer2(layer2
)) {
999 hammer_lock_ex(&hmp
->blkmap_lock
);
1000 if (!hammer_crc_test_layer2(layer2
))
1001 hpanic("CRC FAILED: LAYER2");
1002 hammer_unlock(&hmp
->blkmap_lock
);
1005 hammer_lock_ex(&hmp
->blkmap_lock
);
1007 hammer_modify_buffer(trans
, buffer2
, layer2
, sizeof(*layer2
));
1010 * Free space previously allocated via blockmap_alloc().
1012 * NOTE: bytes_free can be and remain negative due to de-dup ops
1013 * but can never become larger than HAMMER_BIGBLOCK_SIZE.
1015 KKASSERT(layer2
->zone
== zone
);
1016 layer2
->bytes_free
+= bytes
;
1017 KKASSERT(layer2
->bytes_free
<= HAMMER_BIGBLOCK_SIZE
);
1020 * If a big-block becomes entirely free we must create a covering
1021 * reservation to prevent premature reuse. Note, however, that
1022 * the big-block and/or reservation may still have an append_off
1023 * that allows further (non-reused) allocations.
1025 * Once the reservation has been made we re-check layer2 and if
1026 * the big-block is still entirely free we reset the layer2 entry.
1027 * The reservation will prevent premature reuse.
1029 * NOTE: hammer_buffer's are only invalidated when the reservation
1030 * is completed, if the layer2 entry is still completely free at
1031 * that time. Any allocations from the reservation that may have
1032 * occured in the mean time, or active references on the reservation
1033 * from new pending allocations, will prevent the invalidation from
1036 if (layer2
->bytes_free
== HAMMER_BIGBLOCK_SIZE
) {
1037 base_off
= hammer_xlate_to_zone2(zone_offset
&
1038 ~HAMMER_BIGBLOCK_MASK64
);
1040 hammer_reserve_setdelay_offset(hmp
, base_off
, zone
, layer2
);
1041 if (layer2
->bytes_free
== HAMMER_BIGBLOCK_SIZE
) {
1043 layer2
->append_off
= 0;
1044 hammer_modify_buffer(trans
, buffer1
,
1045 layer1
, sizeof(*layer1
));
1046 ++layer1
->blocks_free
;
1047 hammer_crc_set_layer1(layer1
);
1048 hammer_modify_buffer_done(buffer1
);
1049 hammer_modify_volume_field(trans
,
1051 vol0_stat_freebigblocks
);
1052 ++root_volume
->ondisk
->vol0_stat_freebigblocks
;
1053 hmp
->copy_stat_freebigblocks
=
1054 root_volume
->ondisk
->vol0_stat_freebigblocks
;
1055 hammer_modify_volume_done(trans
->rootvol
);
1058 hammer_crc_set_layer2(layer2
);
1059 hammer_modify_buffer_done(buffer2
);
1060 hammer_unlock(&hmp
->blkmap_lock
);
1064 hammer_rel_buffer(buffer1
, 0);
1066 hammer_rel_buffer(buffer2
, 0);
1070 hammer_blockmap_dedup(hammer_transaction_t trans
,
1071 hammer_off_t zone_offset
, int bytes
)
1074 hammer_blockmap_t freemap
;
1075 hammer_blockmap_layer1_t layer1
;
1076 hammer_blockmap_layer2_t layer2
;
1077 hammer_buffer_t buffer1
= NULL
;
1078 hammer_buffer_t buffer2
= NULL
;
1079 hammer_off_t layer1_offset
;
1080 hammer_off_t layer2_offset
;
1083 int zone __debugvar
;
1092 bytes
= HAMMER_DATA_DOALIGN(bytes
);
1093 KKASSERT(bytes
<= HAMMER_BIGBLOCK_SIZE
);
1094 KKASSERT(((zone_offset
^ (zone_offset
+ (bytes
- 1))) &
1095 ~HAMMER_BIGBLOCK_MASK64
) == 0);
1098 * Basic zone validation & locking
1100 zone
= HAMMER_ZONE_DECODE(zone_offset
);
1101 KKASSERT(hammer_is_index_record(zone
));
1104 freemap
= &hmp
->blockmap
[HAMMER_ZONE_FREEMAP_INDEX
];
1109 layer1_offset
= freemap
->phys_offset
+
1110 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset
);
1111 layer1
= hammer_bread(hmp
, layer1_offset
, &error
, &buffer1
);
1114 KKASSERT(layer1
->phys_offset
&&
1115 layer1
->phys_offset
!= HAMMER_BLOCKMAP_UNAVAIL
);
1116 if (!hammer_crc_test_layer1(layer1
)) {
1117 hammer_lock_ex(&hmp
->blkmap_lock
);
1118 if (!hammer_crc_test_layer1(layer1
))
1119 hpanic("CRC FAILED: LAYER1");
1120 hammer_unlock(&hmp
->blkmap_lock
);
1124 * Dive layer 2, each entry represents a big-block.
1126 layer2_offset
= layer1
->phys_offset
+
1127 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset
);
1128 layer2
= hammer_bread(hmp
, layer2_offset
, &error
, &buffer2
);
1131 if (!hammer_crc_test_layer2(layer2
)) {
1132 hammer_lock_ex(&hmp
->blkmap_lock
);
1133 if (!hammer_crc_test_layer2(layer2
))
1134 hpanic("CRC FAILED: LAYER2");
1135 hammer_unlock(&hmp
->blkmap_lock
);
1138 hammer_lock_ex(&hmp
->blkmap_lock
);
1140 hammer_modify_buffer(trans
, buffer2
, layer2
, sizeof(*layer2
));
1143 * Free space previously allocated via blockmap_alloc().
1145 * NOTE: bytes_free can be and remain negative due to de-dup ops
1146 * but can never become larger than HAMMER_BIGBLOCK_SIZE.
1148 KKASSERT(layer2
->zone
== zone
);
1149 temp
= layer2
->bytes_free
- HAMMER_BIGBLOCK_SIZE
* 2;
1150 cpu_ccfence(); /* prevent gcc from optimizing temp out */
1151 if (temp
> layer2
->bytes_free
) {
1155 layer2
->bytes_free
-= bytes
;
1157 KKASSERT(layer2
->bytes_free
<= HAMMER_BIGBLOCK_SIZE
);
1159 hammer_crc_set_layer2(layer2
);
1161 hammer_modify_buffer_done(buffer2
);
1162 hammer_unlock(&hmp
->blkmap_lock
);
1166 hammer_rel_buffer(buffer1
, 0);
1168 hammer_rel_buffer(buffer2
, 0);
1173 * Backend function - finalize (offset, bytes) in a zone.
1175 * Allocate space that was previously reserved by the frontend.
1178 hammer_blockmap_finalize(hammer_transaction_t trans
,
1179 hammer_reserve_t resv
,
1180 hammer_off_t zone_offset
, int bytes
)
1183 hammer_volume_t root_volume
;
1184 hammer_blockmap_t freemap
;
1185 hammer_blockmap_layer1_t layer1
;
1186 hammer_blockmap_layer2_t layer2
;
1187 hammer_buffer_t buffer1
= NULL
;
1188 hammer_buffer_t buffer2
= NULL
;
1189 hammer_off_t layer1_offset
;
1190 hammer_off_t layer2_offset
;
1202 bytes
= HAMMER_DATA_DOALIGN(bytes
);
1203 KKASSERT(bytes
<= HAMMER_XBUFSIZE
);
1206 * Basic zone validation & locking
1208 zone
= HAMMER_ZONE_DECODE(zone_offset
);
1209 KKASSERT(hammer_is_index_record(zone
));
1210 root_volume
= trans
->rootvol
;
1213 freemap
= &hmp
->blockmap
[HAMMER_ZONE_FREEMAP_INDEX
];
1218 layer1_offset
= freemap
->phys_offset
+
1219 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset
);
1220 layer1
= hammer_bread(hmp
, layer1_offset
, &error
, &buffer1
);
1223 KKASSERT(layer1
->phys_offset
&&
1224 layer1
->phys_offset
!= HAMMER_BLOCKMAP_UNAVAIL
);
1225 if (!hammer_crc_test_layer1(layer1
)) {
1226 hammer_lock_ex(&hmp
->blkmap_lock
);
1227 if (!hammer_crc_test_layer1(layer1
))
1228 hpanic("CRC FAILED: LAYER1");
1229 hammer_unlock(&hmp
->blkmap_lock
);
1233 * Dive layer 2, each entry represents a big-block.
1235 layer2_offset
= layer1
->phys_offset
+
1236 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset
);
1237 layer2
= hammer_bread(hmp
, layer2_offset
, &error
, &buffer2
);
1240 if (!hammer_crc_test_layer2(layer2
)) {
1241 hammer_lock_ex(&hmp
->blkmap_lock
);
1242 if (!hammer_crc_test_layer2(layer2
))
1243 hpanic("CRC FAILED: LAYER2");
1244 hammer_unlock(&hmp
->blkmap_lock
);
1247 hammer_lock_ex(&hmp
->blkmap_lock
);
1249 hammer_modify_buffer(trans
, buffer2
, layer2
, sizeof(*layer2
));
1252 * Finalize some or all of the space covered by a current
1253 * reservation. An allocation in the same layer may have
1254 * already assigned ownership.
1256 if (layer2
->zone
== 0) {
1257 hammer_modify_buffer(trans
, buffer1
, layer1
, sizeof(*layer1
));
1258 --layer1
->blocks_free
;
1259 hammer_crc_set_layer1(layer1
);
1260 hammer_modify_buffer_done(buffer1
);
1261 layer2
->zone
= zone
;
1262 KKASSERT(layer2
->bytes_free
== HAMMER_BIGBLOCK_SIZE
);
1263 KKASSERT(layer2
->append_off
== 0);
1264 hammer_modify_volume_field(trans
,
1266 vol0_stat_freebigblocks
);
1267 --root_volume
->ondisk
->vol0_stat_freebigblocks
;
1268 hmp
->copy_stat_freebigblocks
=
1269 root_volume
->ondisk
->vol0_stat_freebigblocks
;
1270 hammer_modify_volume_done(trans
->rootvol
);
1272 if (layer2
->zone
!= zone
)
1273 hdkprintf("layer2 zone mismatch %d %d\n", layer2
->zone
, zone
);
1274 KKASSERT(layer2
->zone
== zone
);
1275 KKASSERT(bytes
!= 0);
1276 layer2
->bytes_free
-= bytes
;
1279 resv
->flags
&= ~HAMMER_RESF_LAYER2FREE
;
1283 * Finalizations can occur out of order, or combined with allocations.
1284 * append_off must be set to the highest allocated offset.
1286 offset
= ((int)zone_offset
& HAMMER_BIGBLOCK_MASK
) + bytes
;
1287 if (layer2
->append_off
< offset
)
1288 layer2
->append_off
= offset
;
1290 hammer_crc_set_layer2(layer2
);
1291 hammer_modify_buffer_done(buffer2
);
1292 hammer_unlock(&hmp
->blkmap_lock
);
1296 hammer_rel_buffer(buffer1
, 0);
1298 hammer_rel_buffer(buffer2
, 0);
1303 * Return the approximate number of free bytes in the big-block
1304 * containing the specified blockmap offset.
1306 * WARNING: A negative number can be returned if data de-dup exists,
1307 * and the result will also not represent he actual number
1308 * of free bytes in this case.
1310 * This code is used only by the reblocker.
1313 hammer_blockmap_getfree(hammer_mount_t hmp
, hammer_off_t zone_offset
,
1314 int *curp
, int *errorp
)
1316 hammer_volume_t root_volume
;
1317 hammer_blockmap_t blockmap
;
1318 hammer_blockmap_t freemap
;
1319 hammer_blockmap_layer1_t layer1
;
1320 hammer_blockmap_layer2_t layer2
;
1321 hammer_buffer_t buffer
= NULL
;
1322 hammer_off_t layer1_offset
;
1323 hammer_off_t layer2_offset
;
1327 zone
= HAMMER_ZONE_DECODE(zone_offset
);
1328 KKASSERT(hammer_is_index_record(zone
));
1329 root_volume
= hammer_get_root_volume(hmp
, errorp
);
1334 blockmap
= &hmp
->blockmap
[zone
];
1335 freemap
= &hmp
->blockmap
[HAMMER_ZONE_FREEMAP_INDEX
];
1340 layer1_offset
= freemap
->phys_offset
+
1341 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset
);
1342 layer1
= hammer_bread(hmp
, layer1_offset
, errorp
, &buffer
);
1348 KKASSERT(layer1
->phys_offset
);
1349 if (!hammer_crc_test_layer1(layer1
)) {
1350 hammer_lock_ex(&hmp
->blkmap_lock
);
1351 if (!hammer_crc_test_layer1(layer1
))
1352 hpanic("CRC FAILED: LAYER1");
1353 hammer_unlock(&hmp
->blkmap_lock
);
1357 * Dive layer 2, each entry represents a big-block.
1359 * (reuse buffer, layer1 pointer becomes invalid)
1361 layer2_offset
= layer1
->phys_offset
+
1362 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset
);
1363 layer2
= hammer_bread(hmp
, layer2_offset
, errorp
, &buffer
);
1369 if (!hammer_crc_test_layer2(layer2
)) {
1370 hammer_lock_ex(&hmp
->blkmap_lock
);
1371 if (!hammer_crc_test_layer2(layer2
))
1372 hpanic("CRC FAILED: LAYER2");
1373 hammer_unlock(&hmp
->blkmap_lock
);
1375 KKASSERT(layer2
->zone
== zone
);
1377 bytes
= layer2
->bytes_free
;
1380 * *curp becomes 1 only when no error and,
1381 * next_offset and zone_offset are in the same big-block.
1383 if ((blockmap
->next_offset
^ zone_offset
) & ~HAMMER_BIGBLOCK_MASK64
)
1384 *curp
= 0; /* not same */
1389 hammer_rel_buffer(buffer
, 0);
1390 hammer_rel_volume(root_volume
, 0);
1391 if (hammer_debug_general
& 0x4000) {
1392 hdkprintf("%016jx -> %d\n", (intmax_t)zone_offset
, bytes
);
1399 * Lookup a blockmap offset and verify blockmap layers.
1402 hammer_blockmap_lookup_verify(hammer_mount_t hmp
, hammer_off_t zone_offset
,
1405 hammer_volume_t root_volume
;
1406 hammer_blockmap_t freemap
;
1407 hammer_blockmap_layer1_t layer1
;
1408 hammer_blockmap_layer2_t layer2
;
1409 hammer_buffer_t buffer
= NULL
;
1410 hammer_off_t layer1_offset
;
1411 hammer_off_t layer2_offset
;
1412 hammer_off_t result_offset
;
1413 hammer_off_t base_off
;
1414 hammer_reserve_t resv __debugvar
;
1418 * Calculate the zone-2 offset.
1420 zone
= HAMMER_ZONE_DECODE(zone_offset
);
1421 result_offset
= hammer_xlate_to_zone2(zone_offset
);
1424 * Validate the allocation zone
1426 root_volume
= hammer_get_root_volume(hmp
, errorp
);
1429 freemap
= &hmp
->blockmap
[HAMMER_ZONE_FREEMAP_INDEX
];
1430 KKASSERT(freemap
->phys_offset
!= 0);
1435 layer1_offset
= freemap
->phys_offset
+
1436 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset
);
1437 layer1
= hammer_bread(hmp
, layer1_offset
, errorp
, &buffer
);
1440 KKASSERT(layer1
->phys_offset
!= HAMMER_BLOCKMAP_UNAVAIL
);
1441 if (!hammer_crc_test_layer1(layer1
)) {
1442 hammer_lock_ex(&hmp
->blkmap_lock
);
1443 if (!hammer_crc_test_layer1(layer1
))
1444 hpanic("CRC FAILED: LAYER1");
1445 hammer_unlock(&hmp
->blkmap_lock
);
1449 * Dive layer 2, each entry represents a big-block.
1451 layer2_offset
= layer1
->phys_offset
+
1452 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset
);
1453 layer2
= hammer_bread(hmp
, layer2_offset
, errorp
, &buffer
);
1457 if (layer2
->zone
== 0) {
1458 base_off
= hammer_xlate_to_zone2(zone_offset
&
1459 ~HAMMER_BIGBLOCK_MASK64
);
1460 resv
= RB_LOOKUP(hammer_res_rb_tree
, &hmp
->rb_resv_root
,
1462 KKASSERT(resv
&& resv
->zone
== zone
);
1464 } else if (layer2
->zone
!= zone
) {
1465 hpanic("bad zone %d/%d", layer2
->zone
, zone
);
1467 if (!hammer_crc_test_layer2(layer2
)) {
1468 hammer_lock_ex(&hmp
->blkmap_lock
);
1469 if (!hammer_crc_test_layer2(layer2
))
1470 hpanic("CRC FAILED: LAYER2");
1471 hammer_unlock(&hmp
->blkmap_lock
);
1476 hammer_rel_buffer(buffer
, 0);
1477 hammer_rel_volume(root_volume
, 0);
1478 if (hammer_debug_general
& 0x0800) {
1479 hdkprintf("%016jx -> %016jx\n",
1480 (intmax_t)zone_offset
, (intmax_t)result_offset
);
1482 return(result_offset
);
1487 * Check space availability
1489 * MPSAFE - does not require fs_token
1492 _hammer_checkspace(hammer_mount_t hmp
, int slop
, int64_t *resp
)
1494 const int in_size
= sizeof(struct hammer_inode_data
) +
1495 sizeof(union hammer_btree_elm
);
1496 const int rec_size
= (sizeof(union hammer_btree_elm
) * 2);
1499 usedbytes
= hmp
->rsv_inodes
* in_size
+
1500 hmp
->rsv_recs
* rec_size
+
1501 hmp
->rsv_databytes
+
1502 ((int64_t)hmp
->rsv_fromdelay
<< HAMMER_BIGBLOCK_BITS
) +
1503 ((int64_t)hammer_limit_dirtybufspace
) +
1504 (slop
<< HAMMER_BIGBLOCK_BITS
);
1509 if (hmp
->copy_stat_freebigblocks
>=
1510 (usedbytes
>> HAMMER_BIGBLOCK_BITS
)) {
1518 hammer_check_volume(hammer_mount_t hmp
, hammer_off_t
*offsetp
)
1520 hammer_blockmap_t freemap
;
1521 hammer_blockmap_layer1_t layer1
;
1522 hammer_buffer_t buffer1
= NULL
;
1523 hammer_off_t layer1_offset
;
1526 freemap
= &hmp
->blockmap
[HAMMER_ZONE_FREEMAP_INDEX
];
1528 layer1_offset
= freemap
->phys_offset
+
1529 HAMMER_BLOCKMAP_LAYER1_OFFSET(*offsetp
);
1530 layer1
= hammer_bread(hmp
, layer1_offset
, &error
, &buffer1
);
1535 * No more physically available space in layer1s
1536 * of the current volume, go to the next volume.
1538 if (layer1
->phys_offset
== HAMMER_BLOCKMAP_UNAVAIL
)
1539 hammer_skip_volume(offsetp
);
1542 hammer_rel_buffer(buffer1
, 0);
1547 hammer_skip_volume(hammer_off_t
*offsetp
)
1549 hammer_off_t offset
;
1553 zone
= HAMMER_ZONE_DECODE(offset
);
1554 vol_no
= HAMMER_VOL_DECODE(offset
) + 1;
1555 KKASSERT(vol_no
<= HAMMER_MAX_VOLUMES
);
1557 if (vol_no
== HAMMER_MAX_VOLUMES
) { /* wrap */
1562 *offsetp
= HAMMER_ENCODE(zone
, vol_no
, 0);