2 * Copyright (c) 2010 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 struct recover_dict
*next
;
39 struct recover_dict
*parent
;
48 #define DICTF_MADEDIR 0x01
49 #define DICTF_MADEFILE 0x02
50 #define DICTF_PARENT 0x04 /* parent attached for real */
51 #define DICTF_TRAVERSED 0x80
53 typedef struct bigblock
*bigblock_t
;
55 static void recover_top(char *ptr
, hammer_off_t offset
);
56 static void recover_elm(hammer_btree_leaf_elm_t leaf
);
57 static struct recover_dict
*get_dict(int64_t obj_id
, uint16_t pfs_id
);
58 static char *recover_path(struct recover_dict
*dict
);
59 static void sanitize_string(char *str
);
60 static hammer_off_t
scan_raw_limit(void);
61 static void scan_bigblocks(int target_zone
);
62 static void free_bigblocks(void);
63 static void add_bigblock_entry(hammer_off_t offset
,
64 hammer_blockmap_layer1_t layer1
, hammer_blockmap_layer2_t layer2
);
65 static bigblock_t
get_bigblock_entry(hammer_off_t offset
);
67 static const char *TargetDir
;
68 static int CachedFd
= -1;
69 static char *CachedPath
;
71 typedef struct bigblock
{
72 RB_ENTRY(bigblock
) entry
;
73 hammer_off_t phys_offset
; /* zone-2 */
74 struct hammer_blockmap_layer1 layer1
;
75 struct hammer_blockmap_layer2 layer2
;
79 bigblock_cmp(bigblock_t b1
, bigblock_t b2
)
81 if (b1
->phys_offset
< b2
->phys_offset
)
83 if (b1
->phys_offset
> b2
->phys_offset
)
88 RB_HEAD(bigblock_rb_tree
, bigblock
) ZoneTree
= RB_INITIALIZER(&ZoneTree
);
89 RB_PROTOTYPE2(bigblock_rb_tree
, bigblock
, entry
, bigblock_cmp
, hammer_off_t
);
90 RB_GENERATE2(bigblock_rb_tree
, bigblock
, entry
, bigblock_cmp
, hammer_off_t
,
94 * There was a hidden bug here while iterating zone-2 offset as
95 * shown in an example below.
97 * If a volume was once used as HAMMER filesystem which consists of
98 * multiple volumes whose usage has reached beyond the first volume,
99 * and then later re-formatted only using 1 volume, hammer recover is
100 * likely to hit assertion in get_buffer() due to having access to
101 * invalid volume (vol1,2,...) from old filesystem data.
103 * To avoid this, now the command only scans upto the last big-block
104 * that's actually used for filesystem data or meta-data at the moment,
105 * if all layer1/2 entries have correct CRC values. This also avoids
106 * recovery of irrelevant files from old filesystem.
108 * It also doesn't scan beyond append offset of big-blocks in B-Tree
109 * zone to avoid recovery of irrelevant files from old filesystem,
110 * if layer1/2 entries for those big-blocks have correct CRC values.
112 * |-----vol0-----|-----vol1-----|-----vol2-----| old filesystem
113 * <-----------------------> used by old filesystem
115 * |-----vol0-----| new filesystem
116 * <-----> used by new filesystem
117 * <-------> unused, invalid data from old filesystem
118 * <-> B-Tree nodes likely to point to vol1
122 hammer_cmd_recover(char **av
, int ac
)
124 struct buffer_info
*data_buffer
;
125 struct volume_info
*volume
;
128 hammer_off_t off_end
;
129 hammer_off_t off_blk
;
130 hammer_off_t raw_limit
= 0;
131 hammer_off_t zone_limit
= 0;
134 int target_zone
= HAMMER_ZONE_BTREE_INDEX
;
139 errx(1, "hammer recover <target_dir> [full|quick]");
143 if (!strcmp(av
[1], "full"))
145 if (!strcmp(av
[1], "quick"))
148 assert(!full
|| !quick
);
150 if (mkdir(TargetDir
, 0777) == -1) {
155 printf("Running %sraw scan of HAMMER image, recovering to %s\n",
156 full
? "full " : quick
? "quick " : "",
160 scan_bigblocks(target_zone
);
161 raw_limit
= scan_raw_limit();
163 raw_limit
+= HAMMER_BIGBLOCK_SIZE
;
164 assert(hammer_is_zone_raw_buffer(raw_limit
));
170 if (!RB_EMPTY(&ZoneTree
)) {
171 printf("Found zone-%d big-blocks at\n", target_zone
);
172 RB_FOREACH(b
, bigblock_rb_tree
, &ZoneTree
)
173 printf("%016jx\n", b
->phys_offset
);
175 b
= RB_MAX(bigblock_rb_tree
, &ZoneTree
);
176 zone_limit
= b
->phys_offset
+ HAMMER_BIGBLOCK_SIZE
;
177 assert(hammer_is_zone_raw_buffer(zone_limit
));
181 if (raw_limit
|| zone_limit
) {
182 #define _fmt "Scanning zone-%d big-blocks till %016jx"
183 if (!raw_limit
) /* unlikely */
184 printf(_fmt
" ???", target_zone
, zone_limit
);
185 else if (!zone_limit
)
186 printf(_fmt
, HAMMER_ZONE_RAW_BUFFER_INDEX
, raw_limit
);
187 else if (raw_limit
>= zone_limit
)
188 printf(_fmt
, target_zone
, zone_limit
);
190 printf(_fmt
" ???", HAMMER_ZONE_RAW_BUFFER_INDEX
, raw_limit
);
195 for (i
= 0; i
< HAMMER_MAX_VOLUMES
; i
++) {
196 volume
= get_volume(i
);
200 printf("Scanning volume %d size %s\n",
201 volume
->vol_no
, sizetostr(volume
->size
));
202 off
= HAMMER_ENCODE_RAW_BUFFER(volume
->vol_no
, 0);
203 off_end
= off
+ HAMMER_VOL_BUF_SIZE(volume
->ondisk
);
205 while (off
< off_end
) {
206 off_blk
= off
& HAMMER_BIGBLOCK_MASK64
;
208 b
= get_bigblock_entry(off
);
211 if (off
>= raw_limit
) {
212 printf("Done %016jx\n", (uintmax_t)off
);
217 if (off
>= zone_limit
) {
218 printf("Done %016jx\n", (uintmax_t)off
);
222 off
= HAMMER_ZONE_LAYER2_NEXT_OFFSET(off
);
228 if (hammer_crc_test_layer1(&b
->layer1
) &&
229 hammer_crc_test_layer2(&b
->layer2
) &&
230 off_blk
>= b
->layer2
.append_off
) {
231 off
= HAMMER_ZONE_LAYER2_NEXT_OFFSET(off
);
236 ptr
= get_buffer_data(off
, &data_buffer
, 0);
238 recover_top(ptr
, off
);
239 off
+= HAMMER_BUFSIZE
;
243 rel_buffer(data_buffer
);
256 print_node(hammer_node_ondisk_t node
, hammer_off_t offset
)
258 char buf
[HAMMER_BTREE_LEAF_ELMS
+ 1];
259 int maxcount
= hammer_node_max_elements(node
->type
);
262 for (i
= 0; i
< node
->count
&& i
< maxcount
; ++i
)
263 buf
[i
] = hammer_elm_btype(&node
->elms
[i
]);
266 printf("%016jx %c %d %s\n", offset
, node
->type
, node
->count
, buf
);
270 * Top level recovery processor. Assume the data is a B-Tree node.
271 * If the CRC is good we attempt to process the node, building the
272 * object space and creating the dictionary as we go.
275 recover_top(char *ptr
, hammer_off_t offset
)
277 hammer_node_ondisk_t node
;
278 hammer_btree_elm_t elm
;
283 for (node
= (void *)ptr
; (char *)node
< ptr
+ HAMMER_BUFSIZE
; ++node
) {
284 isnode
= hammer_crc_test_btree(node
);
285 maxcount
= hammer_node_max_elements(node
->type
);
289 print_node(node
, offset
);
290 else if (DebugOpt
> 1)
291 printf("%016jx -\n", offset
);
293 offset
+= sizeof(*node
);
295 if (isnode
&& node
->type
== HAMMER_BTREE_TYPE_LEAF
) {
296 for (i
= 0; i
< node
->count
&& i
< maxcount
; ++i
) {
297 elm
= &node
->elms
[i
];
298 if (elm
->base
.btype
== HAMMER_BTREE_TYPE_RECORD
)
299 recover_elm(&elm
->leaf
);
306 recover_elm(hammer_btree_leaf_elm_t leaf
)
308 struct buffer_info
*data_buffer
= NULL
;
309 struct recover_dict
*dict
;
310 struct recover_dict
*dict2
;
311 hammer_data_ondisk_t ondisk
;
312 hammer_off_t data_offset
;
326 * Ignore deleted records
332 * If we're running full scan, it's possible that data_offset
333 * refers to old filesystem data that we can't physically access.
335 data_offset
= leaf
->data_offset
;
336 if (get_volume(HAMMER_VOL_DECODE(data_offset
)) == NULL
)
339 if (data_offset
!= 0)
340 ondisk
= get_buffer_data(data_offset
, &data_buffer
, 0);
346 len
= leaf
->data_len
;
347 chunk
= HAMMER_BUFSIZE
- ((int)data_offset
& HAMMER_BUFMASK
);
351 if (len
< 0 || len
> HAMMER_XBUFSIZE
|| len
> chunk
)
354 pfs_id
= lo_to_pfs(leaf
->base
.localization
);
357 * Note that meaning of leaf->base.obj_id differs depending
358 * on record type. For a direntry, leaf->base.obj_id points
359 * to its parent inode that this entry is a part of, but not
360 * its corresponding inode.
362 dict
= get_dict(leaf
->base
.obj_id
, pfs_id
);
364 switch(leaf
->base
.rec_type
) {
365 case HAMMER_RECTYPE_INODE
:
367 * We found an inode which also tells us where the file
368 * or directory is in the directory hierarchy.
371 printf("inode %016jx:%05d found\n",
372 (uintmax_t)leaf
->base
.obj_id
, pfs_id
);
374 path1
= recover_path(dict
);
377 * Attach the inode to its parent. This isn't strictly
378 * necessary because the information is also in the
379 * directory entries, but if we do not find the directory
380 * entry this ensures that the files will still be
381 * reasonably well organized in their proper directories.
383 if ((dict
->flags
& DICTF_PARENT
) == 0 &&
384 dict
->obj_id
!= HAMMER_OBJID_ROOT
&&
385 ondisk
->inode
.parent_obj_id
!= 0) {
386 dict
->flags
|= DICTF_PARENT
;
387 dict
->parent
= get_dict(ondisk
->inode
.parent_obj_id
,
390 (dict
->parent
->flags
& DICTF_MADEDIR
) == 0) {
391 dict
->parent
->flags
|= DICTF_MADEDIR
;
392 path2
= recover_path(dict
->parent
);
393 printf("mkdir %s\n", path2
);
399 if (dict
->obj_type
== 0)
400 dict
->obj_type
= ondisk
->inode
.obj_type
;
401 dict
->size
= ondisk
->inode
.size
;
402 path2
= recover_path(dict
);
404 if (lstat(path1
, &st
) == 0) {
405 if (ondisk
->inode
.obj_type
== HAMMER_OBJTYPE_REGFILE
) {
406 truncate(path1
, dict
->size
);
407 /* chmod(path1, 0666); */
409 if (strcmp(path1
, path2
)) {
410 printf("Rename (inode) %s -> %s\n", path1
, path2
);
411 rename(path1
, path2
);
413 } else if (ondisk
->inode
.obj_type
== HAMMER_OBJTYPE_REGFILE
) {
414 printf("mkinode (file) %s\n", path2
);
415 fd
= open(path2
, O_RDWR
|O_CREAT
, 0666);
418 } else if (ondisk
->inode
.obj_type
== HAMMER_OBJTYPE_DIRECTORY
) {
419 printf("mkinode (dir) %s\n", path2
);
421 dict
->flags
|= DICTF_MADEDIR
;
426 case HAMMER_RECTYPE_DATA
:
430 if (leaf
->base
.obj_id
== 0)
433 printf("inode %016jx:%05d data %016jx,%d\n",
434 (uintmax_t)leaf
->base
.obj_id
,
436 (uintmax_t)leaf
->base
.key
- len
,
441 * Update the dictionary entry
443 if (dict
->obj_type
== 0)
444 dict
->obj_type
= HAMMER_OBJTYPE_REGFILE
;
447 * If the parent directory has not been created we
448 * have to create it (typically a PFS%05d)
451 (dict
->parent
->flags
& DICTF_MADEDIR
) == 0) {
452 dict
->parent
->flags
|= DICTF_MADEDIR
;
453 path2
= recover_path(dict
->parent
);
454 printf("mkdir %s\n", path2
);
461 * Create the file if necessary, report file creations
463 path1
= recover_path(dict
);
464 if (CachedPath
&& strcmp(CachedPath
, path1
) == 0) {
467 fd
= open(path1
, O_CREAT
|O_RDWR
, 0666);
470 printf("Unable to create %s: %s\n",
471 path1
, strerror(errno
));
475 if ((dict
->flags
& DICTF_MADEFILE
) == 0) {
476 dict
->flags
|= DICTF_MADEFILE
;
477 printf("mkfile %s\n", path1
);
481 * And write the record. A HAMMER data block is aligned
482 * and may contain trailing zeros after the file EOF. The
483 * inode record is required to get the actual file size.
485 * However, when the inode record is not available
486 * we can do a sparse write and that will get it right
487 * most of the time even if the inode record is never
490 file_offset
= (int64_t)leaf
->base
.key
- len
;
491 lseek(fd
, (off_t
)file_offset
, SEEK_SET
);
493 if (dict
->size
== -1) {
494 for (zfill
= chunk
- 1; zfill
>= 0; --zfill
) {
495 if (((char *)ondisk
)[zfill
])
504 write(fd
, ondisk
, zfill
);
506 lseek(fd
, chunk
- zfill
, SEEK_CUR
);
509 data_offset
+= chunk
;
510 file_offset
+= chunk
;
511 ondisk
= get_buffer_data(data_offset
, &data_buffer
, 0);
514 chunk
= HAMMER_BUFSIZE
-
515 ((int)data_offset
& HAMMER_BUFMASK
);
519 if (dict
->size
>= 0 && file_offset
> dict
->size
) {
520 ftruncate(fd
, dict
->size
);
521 /* fchmod(fd, 0666); */
524 if (fd
== CachedFd
) {
526 } else if (CachedPath
) {
536 case HAMMER_RECTYPE_DIRENTRY
:
537 nlen
= len
- HAMMER_ENTRY_NAME_OFF
;
538 if ((int)nlen
< 0) /* illegal length */
540 if (ondisk
->entry
.obj_id
== 0 ||
541 ondisk
->entry
.obj_id
== HAMMER_OBJID_ROOT
)
543 name
= malloc(nlen
+ 1);
544 bcopy(ondisk
->entry
.name
, name
, nlen
);
546 sanitize_string(name
);
549 printf("dir %016jx:%05d entry %016jx \"%s\"\n",
550 (uintmax_t)leaf
->base
.obj_id
,
552 (uintmax_t)ondisk
->entry
.obj_id
,
557 * We can't deal with hardlinks so if the object already
558 * has a name assigned to it we just keep using that name.
560 dict2
= get_dict(ondisk
->entry
.obj_id
, pfs_id
);
561 path1
= recover_path(dict2
);
563 if (dict2
->name
== NULL
)
569 * Attach dict2 to its directory (dict), create the
570 * directory (dict) if necessary. We must ensure
571 * that the directory entry exists in order to be
572 * able to properly rename() the file without creating
573 * a namespace conflict.
575 if ((dict2
->flags
& DICTF_PARENT
) == 0) {
576 dict2
->flags
|= DICTF_PARENT
;
577 dict2
->parent
= dict
;
578 if ((dict
->flags
& DICTF_MADEDIR
) == 0) {
579 dict
->flags
|= DICTF_MADEDIR
;
580 path2
= recover_path(dict
);
581 printf("mkdir %s\n", path2
);
587 path2
= recover_path(dict2
);
588 if (strcmp(path1
, path2
) != 0 && lstat(path1
, &st
) == 0) {
589 printf("Rename (entry) %s -> %s\n", path1
, path2
);
590 rename(path1
, path2
);
597 * Ignore any other record types
602 rel_buffer(data_buffer
);
605 #define RD_HSIZE 32768
606 #define RD_HMASK (RD_HSIZE - 1)
608 struct recover_dict
*RDHash
[RD_HSIZE
];
611 struct recover_dict
*
612 get_dict(int64_t obj_id
, uint16_t pfs_id
)
614 struct recover_dict
*dict
;
620 i
= crc32(&obj_id
, sizeof(obj_id
)) & RD_HMASK
;
621 for (dict
= RDHash
[i
]; dict
; dict
= dict
->next
) {
622 if (dict
->obj_id
== obj_id
&&
623 dict
->pfs_id
== pfs_id
) {
628 dict
= malloc(sizeof(*dict
));
629 bzero(dict
, sizeof(*dict
));
630 dict
->obj_id
= obj_id
;
631 dict
->pfs_id
= pfs_id
;
632 dict
->next
= RDHash
[i
];
637 * Always connect dangling dictionary entries to object 1
638 * (the root of the PFS).
640 * DICTF_PARENT will not be set until we know what the
641 * real parent directory object is.
643 if (dict
->obj_id
!= HAMMER_OBJID_ROOT
)
644 dict
->parent
= get_dict(HAMMER_OBJID_ROOT
, pfs_id
);
650 enum { PI_FIGURE
, PI_LOAD
} state
;
657 static void recover_path_helper(struct recover_dict
*, struct path_info
*);
661 recover_path(struct recover_dict
*dict
)
663 struct path_info info
;
665 /* Find info.len first */
666 bzero(&info
, sizeof(info
));
667 info
.state
= PI_FIGURE
;
668 recover_path_helper(dict
, &info
);
670 /* Fill in the path */
671 info
.pfs_id
= dict
->pfs_id
;
672 info
.base
= malloc(info
.len
);
673 info
.next
= info
.base
;
674 info
.state
= PI_LOAD
;
675 recover_path_helper(dict
, &info
);
677 /* Return the path */
681 #define STRLEN_OBJID 22 /* "obj_0x%016jx" */
682 #define STRLEN_PFSID 8 /* "PFS%05d" */
686 recover_path_helper(struct recover_dict
*dict
, struct path_info
*info
)
689 * Calculate path element length
691 dict
->flags
|= DICTF_TRAVERSED
;
693 switch(info
->state
) {
695 if (dict
->obj_id
== HAMMER_OBJID_ROOT
)
696 info
->len
+= STRLEN_PFSID
;
698 info
->len
+= strlen(dict
->name
);
700 info
->len
+= STRLEN_OBJID
;
704 (dict
->parent
->flags
& DICTF_TRAVERSED
) == 0) {
705 recover_path_helper(dict
->parent
, info
);
707 info
->len
+= strlen(TargetDir
) + 1;
712 (dict
->parent
->flags
& DICTF_TRAVERSED
) == 0) {
713 recover_path_helper(dict
->parent
, info
);
715 strcpy(info
->next
, TargetDir
);
716 info
->next
+= strlen(info
->next
);
720 if (dict
->obj_id
== HAMMER_OBJID_ROOT
) {
721 snprintf(info
->next
, STRLEN_PFSID
+ 1,
722 "PFS%05d", info
->pfs_id
);
723 } else if (dict
->name
) {
724 strcpy(info
->next
, dict
->name
);
726 snprintf(info
->next
, STRLEN_OBJID
+ 1,
727 "obj_0x%016jx", (uintmax_t)dict
->obj_id
);
729 info
->next
+= strlen(info
->next
);
732 dict
->flags
&= ~DICTF_TRAVERSED
;
737 sanitize_string(char *str
)
750 struct volume_info
*vol
;
751 hammer_blockmap_t rootmap
;
752 hammer_blockmap_layer1_t layer1
;
753 hammer_blockmap_layer2_t layer2
;
754 struct buffer_info
*buffer1
= NULL
;
755 struct buffer_info
*buffer2
= NULL
;
756 hammer_off_t layer1_offset
;
757 hammer_off_t layer2_offset
;
758 hammer_off_t phys_offset
;
759 hammer_off_t block_offset
;
760 hammer_off_t offset
= 0;
761 int zone
= HAMMER_ZONE_FREEMAP_INDEX
;
763 vol
= get_root_volume();
764 rootmap
= &vol
->ondisk
->vol0_blockmap
[zone
];
765 assert(rootmap
->phys_offset
!= 0);
767 for (phys_offset
= HAMMER_ZONE_ENCODE(zone
, 0);
768 phys_offset
< HAMMER_ZONE_ENCODE(zone
, HAMMER_OFF_LONG_MASK
);
769 phys_offset
+= HAMMER_BLOCKMAP_LAYER2
) {
773 layer1_offset
= rootmap
->phys_offset
+
774 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset
);
775 layer1
= get_buffer_data(layer1_offset
, &buffer1
, 0);
777 if (!hammer_crc_test_layer1(layer1
)) {
778 offset
= 0; /* failed */
781 if (layer1
->phys_offset
== HAMMER_BLOCKMAP_UNAVAIL
)
784 for (block_offset
= 0;
785 block_offset
< HAMMER_BLOCKMAP_LAYER2
;
786 block_offset
+= HAMMER_BIGBLOCK_SIZE
) {
788 * Dive layer 2, each entry represents a big-block.
790 layer2_offset
= layer1
->phys_offset
+
791 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset
);
792 layer2
= get_buffer_data(layer2_offset
, &buffer2
, 0);
794 if (!hammer_crc_test_layer2(layer2
)) {
795 offset
= 0; /* failed */
798 if (layer2
->zone
== HAMMER_ZONE_UNAVAIL_INDEX
) {
800 } else if (layer2
->zone
&& layer2
->zone
!= zone
) {
801 offset
= phys_offset
+ block_offset
;
809 return(hammer_xlate_to_zone2(offset
));
814 scan_bigblocks(int target_zone
)
816 struct volume_info
*vol
;
817 hammer_blockmap_t rootmap
;
818 hammer_blockmap_layer1_t layer1
;
819 hammer_blockmap_layer2_t layer2
;
820 struct buffer_info
*buffer1
= NULL
;
821 struct buffer_info
*buffer2
= NULL
;
822 hammer_off_t layer1_offset
;
823 hammer_off_t layer2_offset
;
824 hammer_off_t phys_offset
;
825 hammer_off_t block_offset
;
826 hammer_off_t offset
= 0;
827 int zone
= HAMMER_ZONE_FREEMAP_INDEX
;
829 vol
= get_root_volume();
830 rootmap
= &vol
->ondisk
->vol0_blockmap
[zone
];
831 assert(rootmap
->phys_offset
!= 0);
833 for (phys_offset
= HAMMER_ZONE_ENCODE(zone
, 0);
834 phys_offset
< HAMMER_ZONE_ENCODE(zone
, HAMMER_OFF_LONG_MASK
);
835 phys_offset
+= HAMMER_BLOCKMAP_LAYER2
) {
839 layer1_offset
= rootmap
->phys_offset
+
840 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset
);
841 layer1
= get_buffer_data(layer1_offset
, &buffer1
, 0);
844 if (!hammer_crc_test_layer1(layer1)) {
847 if (layer1
->phys_offset
== HAMMER_BLOCKMAP_UNAVAIL
)
850 for (block_offset
= 0;
851 block_offset
< HAMMER_BLOCKMAP_LAYER2
;
852 block_offset
+= HAMMER_BIGBLOCK_SIZE
) {
853 offset
= phys_offset
+ block_offset
;
855 * Dive layer 2, each entry represents a big-block.
857 layer2_offset
= layer1
->phys_offset
+
858 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset
);
859 layer2
= get_buffer_data(layer2_offset
, &buffer2
, 0);
862 if (!hammer_crc_test_layer2(layer2)) {
865 if (layer2
->zone
== target_zone
) {
866 add_bigblock_entry(offset
, layer1
, layer2
);
867 } else if (layer2
->zone
== HAMMER_ZONE_UNAVAIL_INDEX
) {
882 while ((b
= RB_ROOT(&ZoneTree
)) != NULL
) {
883 RB_REMOVE(bigblock_rb_tree
, &ZoneTree
, b
);
886 assert(RB_EMPTY(&ZoneTree
));
891 add_bigblock_entry(hammer_off_t offset
,
892 hammer_blockmap_layer1_t layer1
, hammer_blockmap_layer2_t layer2
)
896 b
= calloc(1, sizeof(*b
));
897 b
->phys_offset
= hammer_xlate_to_zone2(offset
);
898 assert((b
->phys_offset
& HAMMER_BIGBLOCK_MASK64
) == 0);
899 bcopy(layer1
, &b
->layer1
, sizeof(*layer1
));
900 bcopy(layer2
, &b
->layer2
, sizeof(*layer2
));
902 RB_INSERT(bigblock_rb_tree
, &ZoneTree
, b
);
907 get_bigblock_entry(hammer_off_t offset
)
911 offset
= hammer_xlate_to_zone2(offset
);
912 offset
&= ~HAMMER_BIGBLOCK_MASK64
;
914 b
= RB_LOOKUP(bigblock_rb_tree
, &ZoneTree
, offset
);