2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_blockmap.c,v 1.15 2008/06/07 07:41:51 dillon Exp $
42 static hammer_off_t
hammer_find_hole(hammer_mount_t hmp
,
43 hammer_holes_t holes
, int bytes
);
44 static void hammer_add_hole(hammer_mount_t hmp
, hammer_holes_t holes
,
45 hammer_off_t offset
, int bytes
);
46 static void hammer_clean_holes(hammer_mount_t hmp
, hammer_holes_t holes
,
50 * Allocate a big-block from the freemap and stuff it into the blockmap
54 hammer_blockmap_llalloc(hammer_transaction_t trans
,
55 hammer_off_t zone_offset
, int *errorp
,
56 hammer_buffer_t buffer1
, hammer_blockmap_layer1_t layer1
,
57 hammer_buffer_t buffer2
, hammer_blockmap_layer2_t layer2
)
59 hammer_off_t zone2_offset
;
61 zone2_offset
= hammer_freemap_alloc(trans
, zone_offset
, errorp
);
64 hammer_modify_buffer(trans
, buffer1
, layer1
, sizeof(*layer1
));
65 KKASSERT(layer1
->blocks_free
);
66 --layer1
->blocks_free
;
67 layer1
->layer1_crc
= crc32(layer1
, HAMMER_LAYER1_CRCSIZE
);
68 hammer_modify_buffer_done(buffer1
);
69 hammer_modify_buffer(trans
, buffer2
, layer2
, sizeof(*layer2
));
70 bzero(layer2
, sizeof(*layer2
));
71 layer2
->u
.phys_offset
= zone2_offset
;
72 layer2
->bytes_free
= HAMMER_LARGEBLOCK_SIZE
;
73 layer2
->entry_crc
= crc32(layer2
, HAMMER_LAYER2_CRCSIZE
);
74 hammer_modify_buffer_done(buffer2
);
79 * Allocate bytes from a zone
82 hammer_blockmap_alloc(hammer_transaction_t trans
, int zone
,
83 int bytes
, int *errorp
)
85 hammer_volume_t root_volume
;
86 hammer_blockmap_t rootmap
;
87 struct hammer_blockmap_layer1
*layer1
;
88 struct hammer_blockmap_layer2
*layer2
;
89 hammer_buffer_t buffer1
= NULL
;
90 hammer_buffer_t buffer2
= NULL
;
91 hammer_buffer_t buffer3
= NULL
;
92 hammer_off_t tmp_offset
;
93 hammer_off_t next_offset
;
94 hammer_off_t layer1_offset
;
95 hammer_off_t layer2_offset
;
96 hammer_off_t bigblock_offset
;
101 KKASSERT(zone
>= HAMMER_ZONE_BTREE_INDEX
&& zone
< HAMMER_MAX_ZONES
);
102 root_volume
= hammer_get_root_volume(trans
->hmp
, errorp
);
105 rootmap
= &trans
->hmp
->blockmap
[zone
];
106 KKASSERT(rootmap
->phys_offset
!= 0);
107 KKASSERT(HAMMER_ZONE_DECODE(rootmap
->phys_offset
) ==
108 HAMMER_ZONE_RAW_BUFFER_INDEX
);
109 KKASSERT(HAMMER_ZONE_DECODE(rootmap
->alloc_offset
) == zone
);
110 KKASSERT(HAMMER_ZONE_DECODE(rootmap
->next_offset
) == zone
);
113 * Deal with alignment and buffer-boundary issues.
115 * Be careful, certain primary alignments are used below to allocate
116 * new blockmap blocks.
118 bytes
= (bytes
+ 7) & ~7;
119 KKASSERT(bytes
> 0 && bytes
<= HAMMER_BUFSIZE
);
121 lockmgr(&trans
->hmp
->blockmap_lock
, LK_EXCLUSIVE
|LK_RETRY
);
124 * Try to use a known-free hole, otherwise append.
126 next_offset
= hammer_find_hole(trans
->hmp
, &trans
->hmp
->holes
[zone
],
128 if (next_offset
== 0) {
129 next_offset
= rootmap
->next_offset
;
137 * The allocation request may not cross a buffer boundary.
139 tmp_offset
= next_offset
+ bytes
- 1;
140 if ((next_offset
^ tmp_offset
) & ~HAMMER_BUFMASK64
) {
141 skip_amount
= HAMMER_BUFSIZE
-
142 ((int)next_offset
& HAMMER_BUFMASK
);
143 hammer_add_hole(trans
->hmp
, &trans
->hmp
->holes
[zone
],
144 next_offset
, skip_amount
);
145 next_offset
= tmp_offset
& ~HAMMER_BUFMASK64
;
149 * Dive layer 1. If we are starting a new layer 1 entry,
150 * allocate a layer 2 block for it.
152 layer1_offset
= rootmap
->phys_offset
+
153 HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset
);
154 layer1
= hammer_bread(trans
->hmp
, layer1_offset
, errorp
, &buffer1
);
155 KKASSERT(*errorp
== 0);
156 KKASSERT(next_offset
<= rootmap
->alloc_offset
);
159 * Check CRC if not allocating into uninitialized space
161 if ((next_offset
!= rootmap
->alloc_offset
) ||
162 (next_offset
& HAMMER_BLOCKMAP_LAYER2_MASK
)) {
163 if (layer1
->layer1_crc
!= crc32(layer1
,
164 HAMMER_LAYER1_CRCSIZE
)) {
165 Debugger("CRC FAILED: LAYER1");
170 * Allocate layer2 backing store in layer1 if necessary. next_offset
171 * can skip to a bigblock boundary but alloc_offset is at least
172 * bigblock-aligned so that's ok.
174 if ((next_offset
== rootmap
->alloc_offset
&&
175 (next_offset
& HAMMER_BLOCKMAP_LAYER2_MASK
) == 0) ||
176 layer1
->phys_offset
== HAMMER_BLOCKMAP_FREE
178 KKASSERT((next_offset
& HAMMER_BLOCKMAP_LAYER2_MASK
) == 0);
179 hammer_modify_buffer(trans
, buffer1
, layer1
, sizeof(*layer1
));
180 bzero(layer1
, sizeof(*layer1
));
181 layer1
->phys_offset
=
182 hammer_freemap_alloc(trans
, next_offset
, errorp
);
183 layer1
->blocks_free
= HAMMER_BLOCKMAP_RADIX2
;
184 layer1
->layer1_crc
= crc32(layer1
, HAMMER_LAYER1_CRCSIZE
);
185 hammer_modify_buffer_done(buffer1
);
186 KKASSERT(*errorp
== 0);
188 KKASSERT(layer1
->phys_offset
);
191 * If layer1 indicates no free blocks in layer2 and our alloc_offset
192 * is not in layer2, skip layer2 entirely.
194 if (layer1
->blocks_free
== 0 &&
195 ((next_offset
^ rootmap
->alloc_offset
) & ~HAMMER_BLOCKMAP_LAYER2_MASK
) != 0) {
196 next_offset
= (next_offset
+ HAMMER_BLOCKMAP_LAYER2_MASK
) &
197 ~HAMMER_BLOCKMAP_LAYER2_MASK
;
198 if (next_offset
>= trans
->hmp
->zone_limits
[zone
]) {
199 hkprintf("blockmap wrap1\n");
200 next_offset
= HAMMER_ZONE_ENCODE(zone
, 0);
201 if (++loops
== 2) { /* XXX poor-man's */
211 * Dive layer 2, each entry represents a large-block.
213 layer2_offset
= layer1
->phys_offset
+
214 HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset
);
215 layer2
= hammer_bread(trans
->hmp
, layer2_offset
, errorp
, &buffer2
);
216 KKASSERT(*errorp
== 0);
219 * Check CRC if not allocating into uninitialized space
221 if (next_offset
!= rootmap
->alloc_offset
||
222 (next_offset
& HAMMER_LARGEBLOCK_MASK64
)) {
223 if (layer2
->entry_crc
!= crc32(layer2
, HAMMER_LAYER2_CRCSIZE
)) {
224 Debugger("CRC FAILED: LAYER2");
228 if ((next_offset
& HAMMER_LARGEBLOCK_MASK64
) == 0) {
230 * We are at the beginning of a new bigblock
232 if (next_offset
== rootmap
->alloc_offset
||
233 layer2
->u
.phys_offset
== HAMMER_BLOCKMAP_FREE
) {
235 * Allocate the bigblock in layer2 if diving into
236 * uninitialized space or if the block was previously
239 hammer_blockmap_llalloc(trans
,
243 KKASSERT(layer2
->u
.phys_offset
!= HAMMER_BLOCKMAP_FREE
);
244 } else if (layer2
->bytes_free
!= HAMMER_LARGEBLOCK_SIZE
) {
246 * We have encountered a block that is already
247 * partially allocated. We must skip this block.
249 next_offset
+= HAMMER_LARGEBLOCK_SIZE
;
250 if (next_offset
>= trans
->hmp
->zone_limits
[zone
]) {
251 next_offset
= HAMMER_ZONE_ENCODE(zone
, 0);
252 hkprintf("blockmap wrap2\n");
253 if (++loops
== 2) { /* XXX poor-man's */
263 * We are appending within a bigblock. It is possible that
264 * the blockmap has been marked completely free via a prior
265 * pruning operation. We no longer reset the append index
266 * for that case because it compromises the UNDO by allowing
270 KKASSERT(layer2->u.phys_offset != HAMMER_BLOCKMAP_FREE);
274 hammer_modify_buffer(trans
, buffer2
, layer2
, sizeof(*layer2
));
275 layer2
->bytes_free
-= bytes
;
276 layer2
->entry_crc
= crc32(layer2
, HAMMER_LAYER2_CRCSIZE
);
277 hammer_modify_buffer_done(buffer2
);
278 KKASSERT(layer2
->bytes_free
>= 0);
281 * If the buffer was completely free we do not have to read it from
282 * disk, call hammer_bnew() to instantiate it.
284 if ((next_offset
& HAMMER_BUFMASK
) == 0) {
285 bigblock_offset
= layer2
->u
.phys_offset
+
286 (next_offset
& HAMMER_LARGEBLOCK_MASK64
);
287 hammer_bnew(trans
->hmp
, bigblock_offset
, errorp
, &buffer3
);
291 * Adjust our iterator and alloc_offset. The layer1 and layer2
292 * space beyond alloc_offset is uninitialized. alloc_offset must
293 * be big-block aligned.
295 if (used_hole
== 0) {
296 hammer_modify_volume(trans
, root_volume
, NULL
, 0);
297 rootmap
->next_offset
= next_offset
+ bytes
;
298 if (rootmap
->alloc_offset
< rootmap
->next_offset
) {
299 rootmap
->alloc_offset
=
300 (rootmap
->next_offset
+ HAMMER_LARGEBLOCK_MASK
) &
301 ~HAMMER_LARGEBLOCK_MASK64
;
303 hammer_modify_volume_done(root_volume
);
307 hammer_rel_buffer(buffer1
, 0);
309 hammer_rel_buffer(buffer2
, 0);
311 hammer_rel_buffer(buffer3
, 0);
312 hammer_rel_volume(root_volume
, 0);
313 lockmgr(&trans
->hmp
->blockmap_lock
, LK_RELEASE
);
318 * Front-end blockmap reservation
320 * This code reserves bytes out of a blockmap without committing to any
321 * meta-data modifications, allowing the front-end to issue disk write I/O
322 * for large blocks of data without having to queue the BIOs to the back-end.
323 * If the reservation winds up not being used, for example due to a crash,
324 * the reblocker should eventually come along and clean it up.
326 * This code will attempt to assign free big-blocks to the blockmap to
327 * accomodate the request.
329 * If we return 0 a reservation was not possible and the caller must queue
330 * the I/O to the backend.
333 hammer_blockmap_reserve(hammer_mount_t hmp
, int zone
, int bytes
, int *errorp
)
335 hammer_volume_t root_volume
;
336 hammer_blockmap_t rootmap
;
337 struct hammer_blockmap_layer1
*layer1
;
338 struct hammer_blockmap_layer2
*layer2
;
339 hammer_buffer_t buffer1
= NULL
;
340 hammer_buffer_t buffer2
= NULL
;
341 hammer_buffer_t buffer3
= NULL
;
342 hammer_off_t tmp_offset
;
343 hammer_off_t next_offset
;
344 hammer_off_t layer1_offset
;
345 hammer_off_t layer2_offset
;
346 hammer_off_t bigblock_offset
;
350 KKASSERT(zone
>= HAMMER_ZONE_BTREE_INDEX
&& zone
< HAMMER_MAX_ZONES
);
351 root_volume
= hammer_get_root_volume(hmp
, errorp
);
354 rootmap
= &hmp
->blockmap
[zone
];
355 KKASSERT(rootmap
->phys_offset
!= 0);
356 KKASSERT(HAMMER_ZONE_DECODE(rootmap
->phys_offset
) ==
357 HAMMER_ZONE_RAW_BUFFER_INDEX
);
358 KKASSERT(HAMMER_ZONE_DECODE(rootmap
->alloc_offset
) == zone
);
359 KKASSERT(HAMMER_ZONE_DECODE(rootmap
->next_offset
) == zone
);
362 * Deal with alignment and buffer-boundary issues.
364 * Be careful, certain primary alignments are used below to allocate
365 * new blockmap blocks.
367 bytes
= (bytes
+ 7) & ~7;
368 KKASSERT(bytes
> 0 && bytes
<= HAMMER_BUFSIZE
);
370 lockmgr(&hmp
->blockmap_lock
, LK_EXCLUSIVE
|LK_RETRY
);
373 * Starting zoneX offset. The reservation code always wraps at the
374 * alloc_offset (the allocation code is allowed to go through to the
377 next_offset
= rootmap
->next_offset
;
379 if (next_offset
>= rootmap
->alloc_offset
) {
380 if (++loops
== 2) { /* XXX poor-man's */
385 next_offset
= HAMMER_ZONE_ENCODE(zone
, 0);
389 * The allocation request may not cross a buffer boundary.
391 tmp_offset
= next_offset
+ bytes
- 1;
392 if ((next_offset
^ tmp_offset
) & ~HAMMER_BUFMASK64
) {
393 skip_amount
= HAMMER_BUFSIZE
-
394 ((int)next_offset
& HAMMER_BUFMASK
);
395 hammer_add_hole(hmp
, &hmp
->holes
[zone
],
396 next_offset
, skip_amount
);
397 next_offset
= tmp_offset
& ~HAMMER_BUFMASK64
;
403 layer1_offset
= rootmap
->phys_offset
+
404 HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset
);
405 layer1
= hammer_bread(hmp
, layer1_offset
, errorp
, &buffer1
);
406 KKASSERT(*errorp
== 0);
407 KKASSERT(next_offset
<= rootmap
->alloc_offset
);
410 * Check CRC if not allocating into uninitialized space
412 if (layer1
->layer1_crc
!= crc32(layer1
, HAMMER_LAYER1_CRCSIZE
)) {
413 Debugger("CRC FAILED: LAYER1");
415 KKASSERT(layer1
->phys_offset
);
418 * If layer1 indicates no free blocks in layer2 and our alloc_offset
419 * is not in layer2, skip layer2 entirely.
421 if (layer1
->blocks_free
== 0 &&
422 ((next_offset
^ rootmap
->alloc_offset
) & ~HAMMER_BLOCKMAP_LAYER2_MASK
) != 0) {
423 next_offset
= (next_offset
+ HAMMER_BLOCKMAP_LAYER2_MASK
) &
424 ~HAMMER_BLOCKMAP_LAYER2_MASK
;
429 * Dive layer 2, each entry represents a large-block.
431 layer2_offset
= layer1
->phys_offset
+
432 HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset
);
433 layer2
= hammer_bread(hmp
, layer2_offset
, errorp
, &buffer2
);
434 KKASSERT(*errorp
== 0);
437 * Check CRC if not allocating into uninitialized space
439 if (layer2
->entry_crc
!= crc32(layer2
, HAMMER_LAYER2_CRCSIZE
)) {
440 Debugger("CRC FAILED: LAYER2");
443 if ((next_offset
& HAMMER_LARGEBLOCK_MASK64
) == 0) {
445 * We are at the beginning of a new bigblock
447 if (layer2
->u
.phys_offset
== HAMMER_BLOCKMAP_FREE
) {
448 struct hammer_transaction trans
;
450 hammer_start_transaction(&trans
, hmp
);
451 if (hammer_sync_lock_sh_try(&trans
) == 0) {
452 hammer_blockmap_llalloc(&trans
,
456 hammer_sync_unlock(&trans
);
459 hammer_sync_lock_sh(&trans
);
460 hammer_blockmap_llalloc(&trans
,
464 hammer_sync_unlock(&trans
);
465 /* *errorp = EDEADLK; */
467 hammer_done_transaction(&trans
);
468 if (layer2
->u
.phys_offset
== HAMMER_BLOCKMAP_FREE
) {
472 } else if (layer2
->bytes_free
!= HAMMER_LARGEBLOCK_SIZE
) {
474 * We have encountered a block that is already
475 * partially allocated. We must skip this block.
477 next_offset
+= HAMMER_LARGEBLOCK_SIZE
;
482 * We are appending within a bigblock. It is possible that
483 * the blockmap has been marked completely free via a prior
484 * pruning operation. We no longer reset the append index
485 * for that case because it compromises the UNDO by allowing
488 KKASSERT(layer2
->u
.phys_offset
!= HAMMER_BLOCKMAP_FREE
);
489 KKASSERT(layer2
->bytes_free
>= HAMMER_LARGEBLOCK_SIZE
- (int)(next_offset
& HAMMER_LARGEBLOCK_MASK64
));
493 * The reservation code does not modify layer2->bytes_free, it
494 * simply adjusts next_offset.
496 KKASSERT(layer2
->bytes_free
>= 0);
499 * Reservations are used for direct I/O, make sure there is no
500 * zone-2 bp cached in the device layer.
502 bigblock_offset
= layer2
->u
.phys_offset
+
503 (next_offset
& HAMMER_LARGEBLOCK_MASK64
);
504 hammer_binval(hmp
, bigblock_offset
);
507 * Adjust our iterator and alloc_offset. The layer1 and layer2
508 * space beyond alloc_offset is uninitialized. alloc_offset must
509 * be big-block aligned.
511 rootmap
->next_offset
= next_offset
+ bytes
;
514 hammer_rel_buffer(buffer1
, 0);
516 hammer_rel_buffer(buffer2
, 0);
518 hammer_rel_buffer(buffer3
, 0);
519 hammer_rel_volume(root_volume
, 0);
520 lockmgr(&hmp
->blockmap_lock
, LK_RELEASE
);
525 * Free (offset,bytes) in a zone.
527 * If bytes is negative we are actually allocating previously reserved
531 hammer_blockmap_free(hammer_transaction_t trans
,
532 hammer_off_t bmap_off
, int bytes
)
534 hammer_volume_t root_volume
;
535 hammer_blockmap_t rootmap
;
536 struct hammer_blockmap_layer1
*layer1
;
537 struct hammer_blockmap_layer2
*layer2
;
538 hammer_buffer_t buffer1
= NULL
;
539 hammer_buffer_t buffer2
= NULL
;
540 hammer_off_t layer1_offset
;
541 hammer_off_t layer2_offset
;
546 bytes
= (bytes
+ 7) & ~7;
547 KKASSERT(bytes
<= HAMMER_BUFSIZE
);
548 KKASSERT(((bmap_off
^ (bmap_off
+ (bytes
- 1))) &
549 ~HAMMER_LARGEBLOCK_MASK64
) == 0);
551 KKASSERT(bytes
>= -HAMMER_BUFSIZE
);
553 zone
= HAMMER_ZONE_DECODE(bmap_off
);
554 KKASSERT(zone
>= HAMMER_ZONE_BTREE_INDEX
&& zone
< HAMMER_MAX_ZONES
);
555 root_volume
= hammer_get_root_volume(trans
->hmp
, &error
);
559 lockmgr(&trans
->hmp
->blockmap_lock
, LK_EXCLUSIVE
|LK_RETRY
);
561 rootmap
= &trans
->hmp
->blockmap
[zone
];
562 KKASSERT(rootmap
->phys_offset
!= 0);
563 KKASSERT(HAMMER_ZONE_DECODE(rootmap
->phys_offset
) ==
564 HAMMER_ZONE_RAW_BUFFER_INDEX
);
565 KKASSERT(HAMMER_ZONE_DECODE(rootmap
->alloc_offset
) == zone
);
567 if (bmap_off
>= rootmap
->alloc_offset
) {
568 panic("hammer_blockmap_lookup: %016llx beyond EOF %016llx",
569 bmap_off
, rootmap
->alloc_offset
);
576 layer1_offset
= rootmap
->phys_offset
+
577 HAMMER_BLOCKMAP_LAYER1_OFFSET(bmap_off
);
578 layer1
= hammer_bread(trans
->hmp
, layer1_offset
, &error
, &buffer1
);
579 KKASSERT(error
== 0);
580 KKASSERT(layer1
->phys_offset
);
581 if (layer1
->layer1_crc
!= crc32(layer1
, HAMMER_LAYER1_CRCSIZE
)) {
582 Debugger("CRC FAILED: LAYER1");
586 * Dive layer 2, each entry represents a large-block.
588 layer2_offset
= layer1
->phys_offset
+
589 HAMMER_BLOCKMAP_LAYER2_OFFSET(bmap_off
);
590 layer2
= hammer_bread(trans
->hmp
, layer2_offset
, &error
, &buffer2
);
591 KKASSERT(error
== 0);
592 KKASSERT(layer2
->u
.phys_offset
);
593 if (layer2
->entry_crc
!= crc32(layer2
, HAMMER_LAYER2_CRCSIZE
)) {
594 Debugger("CRC FAILED: LAYER2");
597 hammer_modify_buffer(trans
, buffer2
, layer2
, sizeof(*layer2
));
598 layer2
->bytes_free
+= bytes
;
599 KKASSERT(layer2
->bytes_free
<= HAMMER_LARGEBLOCK_SIZE
);
602 * If the big-block is free, return it to the free pool. The layer2
603 * infrastructure is left intact even if the entire layer2 becomes
606 * At the moment if our iterator is in a bigblock that becomes
607 * wholely free, we have to leave the block allocated and we cannot
608 * reset the iterator because there may be UNDOs on-disk that
609 * reference areas of that block and we cannot overwrite those areas.
611 if (layer2
->bytes_free
== HAMMER_LARGEBLOCK_SIZE
) {
612 if ((rootmap
->next_offset
^ bmap_off
) &
613 ~HAMMER_LARGEBLOCK_MASK64
) {
615 * Our iterator is not in the now-free big-block
616 * and we can release it.
618 hammer_clean_holes(trans
->hmp
,
619 &trans
->hmp
->holes
[zone
],
621 hammer_freemap_free(trans
, layer2
->u
.phys_offset
,
623 hammer_clrxlate_buffer(trans
->hmp
,
624 layer2
->u
.phys_offset
);
625 layer2
->u
.phys_offset
= HAMMER_BLOCKMAP_FREE
;
627 hammer_modify_buffer(trans
, buffer1
,
628 layer1
, sizeof(*layer1
));
629 ++layer1
->blocks_free
;
632 * This commented out code would release the layer2
633 * bigblock. We do not want to do this, at least
636 * This also may be incomplete.
638 if (layer1
->blocks_free
== HAMMER_BLOCKMAP_RADIX2
) {
640 trans
, layer1
->phys_offset
,
641 bmap_off
& ~HAMMER_BLOCKMAP_LAYER2_MASK
,
643 layer1
->phys_offset
= HAMMER_BLOCKMAP_FREE
;
646 layer1
->layer1_crc
= crc32(layer1
,
647 HAMMER_LAYER1_CRCSIZE
);
648 hammer_modify_buffer_done(buffer1
);
652 * This commented out code would reset the iterator,
653 * which we cannot do at the moment as it could cause
654 * new allocations to overwrite deleted data still
655 * subject to undo on reboot.
657 hammer_modify_volume(trans
, root_volume
,
659 rootmap
->next_offset
&= ~HAMMER_LARGEBLOCK_MASK64
;
660 hammer_modify_volume_done(root_volume
);
664 layer2
->entry_crc
= crc32(layer2
, HAMMER_LAYER2_CRCSIZE
);
665 hammer_modify_buffer_done(buffer2
);
667 lockmgr(&trans
->hmp
->blockmap_lock
, LK_RELEASE
);
670 hammer_rel_buffer(buffer1
, 0);
672 hammer_rel_buffer(buffer2
, 0);
673 hammer_rel_volume(root_volume
, 0);
677 * Return the number of free bytes in the big-block containing the
678 * specified blockmap offset.
681 hammer_blockmap_getfree(hammer_mount_t hmp
, hammer_off_t bmap_off
,
682 int *curp
, int *errorp
)
684 hammer_volume_t root_volume
;
685 hammer_blockmap_t rootmap
;
686 struct hammer_blockmap_layer1
*layer1
;
687 struct hammer_blockmap_layer2
*layer2
;
688 hammer_buffer_t buffer
= NULL
;
689 hammer_off_t layer1_offset
;
690 hammer_off_t layer2_offset
;
694 zone
= HAMMER_ZONE_DECODE(bmap_off
);
695 KKASSERT(zone
>= HAMMER_ZONE_BTREE_INDEX
&& zone
< HAMMER_MAX_ZONES
);
696 root_volume
= hammer_get_root_volume(hmp
, errorp
);
701 rootmap
= &hmp
->blockmap
[zone
];
702 KKASSERT(rootmap
->phys_offset
!= 0);
703 KKASSERT(HAMMER_ZONE_DECODE(rootmap
->phys_offset
) ==
704 HAMMER_ZONE_RAW_BUFFER_INDEX
);
705 KKASSERT(HAMMER_ZONE_DECODE(rootmap
->alloc_offset
) == zone
);
707 if (bmap_off
>= rootmap
->alloc_offset
) {
708 panic("hammer_blockmap_lookup: %016llx beyond EOF %016llx",
709 bmap_off
, rootmap
->alloc_offset
);
718 layer1_offset
= rootmap
->phys_offset
+
719 HAMMER_BLOCKMAP_LAYER1_OFFSET(bmap_off
);
720 layer1
= hammer_bread(hmp
, layer1_offset
, errorp
, &buffer
);
721 KKASSERT(*errorp
== 0);
722 KKASSERT(layer1
->phys_offset
);
723 if (layer1
->layer1_crc
!= crc32(layer1
, HAMMER_LAYER1_CRCSIZE
)) {
724 Debugger("CRC FAILED: LAYER1");
728 * Dive layer 2, each entry represents a large-block.
730 layer2_offset
= layer1
->phys_offset
+
731 HAMMER_BLOCKMAP_LAYER2_OFFSET(bmap_off
);
732 layer2
= hammer_bread(hmp
, layer2_offset
, errorp
, &buffer
);
733 KKASSERT(*errorp
== 0);
734 KKASSERT(layer2
->u
.phys_offset
);
735 if (layer2
->entry_crc
!= crc32(layer2
, HAMMER_LAYER2_CRCSIZE
)) {
736 Debugger("CRC FAILED: LAYER2");
739 bytes
= layer2
->bytes_free
;
741 if ((rootmap
->next_offset
^ bmap_off
) & ~HAMMER_LARGEBLOCK_MASK64
)
747 hammer_rel_buffer(buffer
, 0);
748 hammer_rel_volume(root_volume
, 0);
749 if (hammer_debug_general
& 0x0800) {
750 kprintf("hammer_blockmap_getfree: %016llx -> %d\n",
758 * Lookup a blockmap offset.
761 hammer_blockmap_lookup(hammer_mount_t hmp
, hammer_off_t bmap_off
, int *errorp
)
763 hammer_volume_t root_volume
;
764 hammer_blockmap_t rootmap
;
765 struct hammer_blockmap_layer1
*layer1
;
766 struct hammer_blockmap_layer2
*layer2
;
767 hammer_buffer_t buffer
= NULL
;
768 hammer_off_t layer1_offset
;
769 hammer_off_t layer2_offset
;
770 hammer_off_t result_offset
;
773 zone
= HAMMER_ZONE_DECODE(bmap_off
);
774 KKASSERT(zone
>= HAMMER_ZONE_BTREE_INDEX
&& zone
< HAMMER_MAX_ZONES
);
775 root_volume
= hammer_get_root_volume(hmp
, errorp
);
778 rootmap
= &hmp
->blockmap
[zone
];
779 KKASSERT(rootmap
->phys_offset
!= 0);
780 KKASSERT(HAMMER_ZONE_DECODE(rootmap
->phys_offset
) ==
781 HAMMER_ZONE_RAW_BUFFER_INDEX
);
782 KKASSERT(HAMMER_ZONE_DECODE(rootmap
->alloc_offset
) == zone
);
784 if (bmap_off
>= rootmap
->alloc_offset
) {
785 panic("hammer_blockmap_lookup: %016llx beyond EOF %016llx",
786 bmap_off
, rootmap
->alloc_offset
);
794 layer1_offset
= rootmap
->phys_offset
+
795 HAMMER_BLOCKMAP_LAYER1_OFFSET(bmap_off
);
796 layer1
= hammer_bread(hmp
, layer1_offset
, errorp
, &buffer
);
797 KKASSERT(*errorp
== 0);
798 KKASSERT(layer1
->phys_offset
);
799 if (layer1
->layer1_crc
!= crc32(layer1
, HAMMER_LAYER1_CRCSIZE
)) {
800 Debugger("CRC FAILED: LAYER1");
804 * Dive layer 2, each entry represents a large-block.
806 layer2_offset
= layer1
->phys_offset
+
807 HAMMER_BLOCKMAP_LAYER2_OFFSET(bmap_off
);
808 layer2
= hammer_bread(hmp
, layer2_offset
, errorp
, &buffer
);
810 KKASSERT(*errorp
== 0);
811 KKASSERT(layer2
->u
.phys_offset
);
812 if (layer2
->entry_crc
!= crc32(layer2
, HAMMER_LAYER2_CRCSIZE
)) {
813 Debugger("CRC FAILED: LAYER2");
816 result_offset
= layer2
->u
.phys_offset
+
817 (bmap_off
& HAMMER_LARGEBLOCK_MASK64
);
820 hammer_rel_buffer(buffer
, 0);
821 hammer_rel_volume(root_volume
, 0);
822 if (hammer_debug_general
& 0x0800) {
823 kprintf("hammer_blockmap_lookup: %016llx -> %016llx\n",
824 bmap_off
, result_offset
);
826 return(result_offset
);
829 /************************************************************************
830 * IN-CORE TRACKING OF ALLOCATION HOLES *
831 ************************************************************************
833 * This is a temporary shim in need of a more permanent solution.
835 * As we allocate space holes are created due to having to align to a new
836 * 16K buffer when an allocation would otherwise cross the buffer boundary.
837 * These holes are recorded here and used to fullfill smaller requests as
838 * much as possible. Only a limited number of holes are recorded and these
839 * functions operate somewhat like a heuristic, where information is allowed
844 hammer_init_holes(hammer_mount_t hmp
, hammer_holes_t holes
)
846 TAILQ_INIT(&holes
->list
);
851 hammer_free_holes(hammer_mount_t hmp
, hammer_holes_t holes
)
855 while ((hole
= TAILQ_FIRST(&holes
->list
)) != NULL
) {
856 TAILQ_REMOVE(&holes
->list
, hole
, entry
);
857 kfree(hole
, M_HAMMER
);
862 * Attempt to locate a hole with sufficient free space to accomodate the
863 * requested allocation. Return the offset or 0 if no hole could be found.
866 hammer_find_hole(hammer_mount_t hmp
, hammer_holes_t holes
, int bytes
)
869 hammer_off_t result_off
= 0;
871 TAILQ_FOREACH(hole
, &holes
->list
, entry
) {
872 if (bytes
<= hole
->bytes
) {
873 result_off
= hole
->offset
;
874 hole
->offset
+= bytes
;
875 hole
->bytes
-= bytes
;
883 * If a newly created hole is reasonably sized then record it. We only
884 * keep track of a limited number of holes. Lost holes are recovered by
887 * offset is a zone-N offset.
890 hammer_add_hole(hammer_mount_t hmp
, hammer_holes_t holes
,
891 hammer_off_t offset
, int bytes
)
898 if (holes
->count
< HAMMER_MAX_HOLES
) {
899 hole
= kmalloc(sizeof(*hole
), M_HAMMER
, M_WAITOK
);
902 hole
= TAILQ_FIRST(&holes
->list
);
903 TAILQ_REMOVE(&holes
->list
, hole
, entry
);
905 TAILQ_INSERT_TAIL(&holes
->list
, hole
, entry
);
906 hole
->offset
= offset
;
911 * Clean out any holes cached for the bigblock we are about to release back
915 hammer_clean_holes(hammer_mount_t hmp
, hammer_holes_t holes
,
920 offset
&= ~HAMMER_LARGEBLOCK_MASK64
;
923 TAILQ_FOREACH(hole
, &holes
->list
, entry
) {
924 if ((hole
->offset
& ~HAMMER_LARGEBLOCK_MASK64
) == offset
) {
925 TAILQ_REMOVE(&holes
->list
, hole
, entry
);
926 kfree(hole
, M_HAMMER
);