2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sbin/hammer/cmd_blockmap.c,v 1.4 2008/07/19 18:48:14 dillon Exp $
40 * Each collect covers 1<<(19+23) bytes address space of layer 1.
41 * (plus a copy of 1<<23 bytes that holds layer2 entries in layer 1).
43 typedef struct collect
{
44 RB_ENTRY(collect
) entry
;
45 hammer_off_t phys_offset
; /* layer2 address pointed by layer1 */
46 hammer_off_t
*offsets
; /* big-block offset for layer2[i] */
47 struct hammer_blockmap_layer2
*track2
; /* track of layer2 entries */
48 struct hammer_blockmap_layer2
*layer2
; /* 1<<19 x 16 bytes entries */
49 int error
; /* # of inconsistencies */
53 collect_compare(struct collect
*c1
, struct collect
*c2
)
55 if (c1
->phys_offset
< c2
->phys_offset
)
57 if (c1
->phys_offset
> c2
->phys_offset
)
62 RB_HEAD(collect_rb_tree
, collect
) CollectTree
= RB_INITIALIZER(&CollectTree
);
63 RB_PROTOTYPE2(collect_rb_tree
, collect
, entry
, collect_compare
, hammer_off_t
);
64 RB_GENERATE2(collect_rb_tree
, collect
, entry
, collect_compare
, hammer_off_t
,
67 static void dump_blockmap(const char *label
, int zone
);
68 static void check_freemap(hammer_blockmap_t freemap
);
69 static void check_btree_node(hammer_off_t node_offset
, int depth
);
70 static void check_undo(hammer_blockmap_t undomap
);
71 static __inline
void collect_btree_root(hammer_off_t node_offset
);
72 static __inline
void collect_btree_internal(hammer_btree_elm_t elm
);
73 static __inline
void collect_btree_leaf(hammer_btree_elm_t elm
);
74 static __inline
void collect_freemap_layer1(hammer_blockmap_t freemap
);
75 static __inline
void collect_freemap_layer2(struct hammer_blockmap_layer1
*layer1
);
76 static __inline
void collect_undo(hammer_off_t scan_offset
,
77 hammer_fifo_head_t head
);
78 static void collect_blockmap(hammer_off_t offset
, int32_t length
, int zone
);
79 static struct hammer_blockmap_layer2
*collect_get_track(
80 collect_t collect
, hammer_off_t offset
, int zone
,
81 struct hammer_blockmap_layer2
*layer2
);
82 static collect_t
collect_get(hammer_off_t phys_offset
);
83 static void dump_collect_table(void);
84 static void dump_collect(collect_t collect
, struct zone_stat
*stats
);
86 static int num_bad_layer1
= 0;
87 static int num_bad_layer2
= 0;
88 static int num_bad_node
= 0;
91 hammer_cmd_blockmap(void)
93 dump_blockmap("freemap", HAMMER_ZONE_FREEMAP_INDEX
);
98 dump_blockmap(const char *label
, int zone
)
100 struct volume_info
*root_volume
;
101 hammer_blockmap_t rootmap
;
102 hammer_blockmap_t blockmap
;
103 struct hammer_blockmap_layer1
*layer1
;
104 struct hammer_blockmap_layer2
*layer2
;
105 struct buffer_info
*buffer1
= NULL
;
106 struct buffer_info
*buffer2
= NULL
;
107 hammer_off_t layer1_offset
;
108 hammer_off_t layer2_offset
;
109 hammer_off_t phys_offset
;
110 hammer_off_t block_offset
;
111 struct zone_stat
*stats
= NULL
;
112 int xerr
, aerr
, ferr
;
115 root_volume
= get_root_volume();
116 rootmap
= &root_volume
->ondisk
->vol0_blockmap
[zone
];
117 assert(rootmap
->phys_offset
!= 0);
120 "phys first next alloc\n");
121 for (i
= 0; i
< HAMMER_MAX_ZONES
; i
++) {
122 blockmap
= &root_volume
->ondisk
->vol0_blockmap
[i
];
123 if (VerboseOpt
|| i
== zone
) {
124 printf("zone %-2d %-10s %016jx %016jx %016jx %016jx\n",
125 i
, (i
== zone
? label
: ""),
126 (uintmax_t)blockmap
->phys_offset
,
127 (uintmax_t)blockmap
->first_offset
,
128 (uintmax_t)blockmap
->next_offset
,
129 (uintmax_t)blockmap
->alloc_offset
);
134 stats
= hammer_init_zone_stat();
136 for (phys_offset
= HAMMER_ZONE_ENCODE(zone
, 0);
137 phys_offset
< HAMMER_ZONE_ENCODE(zone
, HAMMER_OFF_LONG_MASK
);
138 phys_offset
+= HAMMER_BLOCKMAP_LAYER2
) {
142 layer1_offset
= rootmap
->phys_offset
+
143 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset
);
144 layer1
= get_buffer_data(layer1_offset
, &buffer1
, 0);
146 xerr
= ' '; /* good */
147 if (layer1
->layer1_crc
!=
148 crc32(layer1
, HAMMER_LAYER1_CRCSIZE
)) {
153 layer1
->phys_offset
== HAMMER_BLOCKMAP_UNAVAIL
) {
156 printf("%c layer1 %016jx @%016jx blocks-free %jd\n",
158 (uintmax_t)phys_offset
,
159 (uintmax_t)layer1
->phys_offset
,
160 (intmax_t)layer1
->blocks_free
);
162 for (block_offset
= 0;
163 block_offset
< HAMMER_BLOCKMAP_LAYER2
;
164 block_offset
+= HAMMER_BIGBLOCK_SIZE
) {
165 hammer_off_t zone_offset
= phys_offset
+ block_offset
;
167 * Dive layer 2, each entry represents a big-block.
169 layer2_offset
= layer1
->phys_offset
+
170 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset
);
171 layer2
= get_buffer_data(layer2_offset
, &buffer2
, 0);
173 xerr
= aerr
= ferr
= ' '; /* good */
174 if (layer2
->entry_crc
!=
175 crc32(layer2
, HAMMER_LAYER2_CRCSIZE
)) {
179 if (layer2
->append_off
> HAMMER_BIGBLOCK_SIZE
) {
183 if (layer2
->bytes_free
< 0 ||
184 layer2
->bytes_free
> HAMMER_BIGBLOCK_SIZE
) {
189 if (VerboseOpt
< 2 &&
190 xerr
== ' ' && aerr
== ' ' && ferr
== ' ' &&
191 layer2
->zone
== HAMMER_ZONE_UNAVAIL_INDEX
) {
194 printf("%c%c%c %016jx zone=%-2d ",
195 xerr
, aerr
, ferr
, (uintmax_t)zone_offset
, layer2
->zone
);
197 printf("vol=%-3d L1#=%-6d L2#=%-6d L1=%-7lu L2=%-7lu ",
198 HAMMER_VOL_DECODE(zone_offset
),
199 HAMMER_BLOCKMAP_LAYER1_INDEX(zone_offset
),
200 HAMMER_BLOCKMAP_LAYER2_INDEX(zone_offset
),
201 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset
),
202 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset
));
204 printf("app=%-7d free=%-7d",
208 double bytes_used
= HAMMER_BIGBLOCK_SIZE
-
210 printf(" fill=%-5.1lf crc=%08x-%08x\n",
211 bytes_used
* 100 / HAMMER_BIGBLOCK_SIZE
,
219 hammer_add_zone_stat_layer2(stats
, layer2
);
224 rel_volume(root_volume
);
227 hammer_print_zone_stat(stats
);
228 hammer_cleanup_zone_stat(stats
);
231 if (num_bad_layer1
|| VerboseOpt
) {
232 printf("%d bad layer1\n", num_bad_layer1
);
234 if (num_bad_layer2
|| VerboseOpt
) {
235 printf("%d bad layer2\n", num_bad_layer1
);
240 hammer_cmd_checkmap(void)
242 struct volume_info
*volume
;
243 hammer_blockmap_t freemap
;
244 hammer_blockmap_t undomap
;
245 hammer_off_t node_offset
;
247 volume
= get_root_volume();
248 node_offset
= volume
->ondisk
->vol0_btree_root
;
249 freemap
= &volume
->ondisk
->vol0_blockmap
[HAMMER_ZONE_FREEMAP_INDEX
];
250 undomap
= &volume
->ondisk
->vol0_blockmap
[HAMMER_ZONE_UNDO_INDEX
];
253 printf("Volume header\tnext_tid=%016jx\n",
254 (uintmax_t)volume
->ondisk
->vol0_next_tid
);
255 printf("\t\tbufoffset=%016jx\n",
256 (uintmax_t)volume
->ondisk
->vol_buf_beg
);
257 printf("\t\tundosize=%jdMB\n",
258 (intmax_t)((undomap
->alloc_offset
& HAMMER_OFF_LONG_MASK
)
263 AssertOnFailure
= (DebugOpt
!= 0);
265 printf("Collecting allocation info from freemap: ");
267 check_freemap(freemap
);
270 printf("Collecting allocation info from B-Tree: ");
272 check_btree_node(node_offset
, 0);
275 printf("Collecting allocation info from UNDO: ");
280 dump_collect_table();
284 check_freemap(hammer_blockmap_t freemap
)
287 struct buffer_info
*buffer1
= NULL
;
288 struct hammer_blockmap_layer1
*layer1
;
291 collect_freemap_layer1(freemap
);
293 for (i
= 0; i
< HAMMER_BLOCKMAP_RADIX1
; ++i
) {
294 offset
= freemap
->phys_offset
+ i
* sizeof(*layer1
);
295 layer1
= get_buffer_data(offset
, &buffer1
, 0);
296 if (layer1
->phys_offset
!= HAMMER_BLOCKMAP_UNAVAIL
)
297 collect_freemap_layer2(layer1
);
303 check_btree_node(hammer_off_t node_offset
, int depth
)
305 struct buffer_info
*buffer
= NULL
;
306 hammer_node_ondisk_t node
;
307 hammer_btree_elm_t elm
;
309 char badc
= ' '; /* good */
310 char badm
= ' '; /* good */
313 collect_btree_root(node_offset
);
314 node
= get_node(node_offset
, &buffer
);
319 } else if (crc32(&node
->crc
+ 1, HAMMER_BTREE_CRCSIZE
) != node
->crc
) {
323 if (badm
!= ' ' || badc
!= ' ') { /* not good */
325 printf("%c%c NODE %016jx ",
326 badc
, badm
, (uintmax_t)node_offset
);
328 printf("(IO ERROR)\n");
332 printf("cnt=%02d p=%016jx type=%c depth=%d mirror=%016jx\n",
334 (uintmax_t)node
->parent
,
335 (node
->type
? node
->type
: '?'),
337 (uintmax_t)node
->mirror_tid
);
341 for (i
= 0; i
< node
->count
; ++i
) {
342 elm
= &node
->elms
[i
];
345 case HAMMER_BTREE_TYPE_INTERNAL
:
346 if (elm
->internal
.subtree_offset
) {
347 collect_btree_internal(elm
);
348 check_btree_node(elm
->internal
.subtree_offset
,
352 case HAMMER_BTREE_TYPE_LEAF
:
353 if (elm
->leaf
.data_offset
)
354 collect_btree_leaf(elm
);
357 assert(!AssertOnFailure
);
365 check_undo(hammer_blockmap_t undomap
)
367 struct buffer_info
*buffer
= NULL
;
368 hammer_off_t scan_offset
;
369 hammer_fifo_head_t head
;
371 scan_offset
= HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX
, 0);
372 while (scan_offset
< undomap
->alloc_offset
) {
373 head
= get_buffer_data(scan_offset
, &buffer
, 0);
374 switch (head
->hdr_type
) {
375 case HAMMER_HEAD_TYPE_PAD
:
376 case HAMMER_HEAD_TYPE_DUMMY
:
377 case HAMMER_HEAD_TYPE_UNDO
:
378 case HAMMER_HEAD_TYPE_REDO
:
379 collect_undo(scan_offset
, head
);
382 assert(!AssertOnFailure
);
385 if ((head
->hdr_size
& HAMMER_HEAD_ALIGN_MASK
) ||
386 head
->hdr_size
== 0 ||
387 head
->hdr_size
> HAMMER_UNDO_ALIGN
-
388 ((u_int
)scan_offset
& HAMMER_UNDO_MASK
)) {
389 printf("Illegal size, skipping to next boundary\n");
390 scan_offset
= (scan_offset
+ HAMMER_UNDO_MASK
) &
393 scan_offset
+= head
->hdr_size
;
401 collect_freemap_layer1(hammer_blockmap_t freemap
)
404 * This translation is necessary to do checkmap properly
405 * as zone4 is really just zone2 address space.
407 hammer_off_t zone4_offset
= hammer_xlate_to_zoneX(
408 HAMMER_ZONE_FREEMAP_INDEX
, freemap
->phys_offset
);
409 collect_blockmap(zone4_offset
, HAMMER_BIGBLOCK_SIZE
,
410 HAMMER_ZONE_FREEMAP_INDEX
);
415 collect_freemap_layer2(struct hammer_blockmap_layer1
*layer1
)
418 * This translation is necessary to do checkmap properly
419 * as zone4 is really just zone2 address space.
421 hammer_off_t zone4_offset
= hammer_xlate_to_zoneX(
422 HAMMER_ZONE_FREEMAP_INDEX
, layer1
->phys_offset
);
423 collect_blockmap(zone4_offset
, HAMMER_BIGBLOCK_SIZE
,
424 HAMMER_ZONE_FREEMAP_INDEX
);
429 collect_btree_root(hammer_off_t node_offset
)
431 collect_blockmap(node_offset
,
432 sizeof(struct hammer_node_ondisk
), /* 4KB */
433 HAMMER_ZONE_BTREE_INDEX
);
438 collect_btree_internal(hammer_btree_elm_t elm
)
440 collect_blockmap(elm
->internal
.subtree_offset
,
441 sizeof(struct hammer_node_ondisk
), /* 4KB */
442 HAMMER_ZONE_BTREE_INDEX
);
447 collect_btree_leaf(hammer_btree_elm_t elm
)
451 switch (elm
->base
.rec_type
) {
452 case HAMMER_RECTYPE_INODE
:
453 case HAMMER_RECTYPE_DIRENTRY
:
454 case HAMMER_RECTYPE_EXT
:
455 case HAMMER_RECTYPE_FIX
:
456 case HAMMER_RECTYPE_PFS
:
457 case HAMMER_RECTYPE_SNAPSHOT
:
458 case HAMMER_RECTYPE_CONFIG
:
459 zone
= HAMMER_ZONE_META_INDEX
;
461 case HAMMER_RECTYPE_DATA
:
462 case HAMMER_RECTYPE_DB
:
463 zone
= hammer_data_zone_index(elm
->leaf
.data_len
);
466 zone
= HAMMER_ZONE_UNAVAIL_INDEX
;
469 collect_blockmap(elm
->leaf
.data_offset
,
470 (elm
->leaf
.data_len
+ 15) & ~15, zone
);
475 collect_undo(hammer_off_t scan_offset
, hammer_fifo_head_t head
)
477 collect_blockmap(scan_offset
, head
->hdr_size
,
478 HAMMER_ZONE_UNDO_INDEX
);
483 collect_blockmap(hammer_off_t offset
, int32_t length
, int zone
)
485 struct hammer_blockmap_layer1 layer1
;
486 struct hammer_blockmap_layer2 layer2
;
487 struct hammer_blockmap_layer2
*track2
;
488 hammer_off_t result_offset
;
492 result_offset
= blockmap_lookup(offset
, &layer1
, &layer2
, &error
);
493 if (AssertOnFailure
) {
494 assert(HAMMER_ZONE_DECODE(offset
) == zone
);
495 assert(HAMMER_ZONE_DECODE(result_offset
) ==
496 HAMMER_ZONE_RAW_BUFFER_INDEX
);
499 collect
= collect_get(layer1
.phys_offset
); /* layer2 address */
500 track2
= collect_get_track(collect
, result_offset
, zone
, &layer2
);
501 track2
->bytes_free
-= length
;
506 collect_get(hammer_off_t phys_offset
)
510 collect
= RB_LOOKUP(collect_rb_tree
, &CollectTree
, phys_offset
);
514 collect
= calloc(sizeof(*collect
), 1);
515 collect
->track2
= malloc(HAMMER_BIGBLOCK_SIZE
); /* 1<<23 bytes */
516 collect
->layer2
= malloc(HAMMER_BIGBLOCK_SIZE
); /* 1<<23 bytes */
517 collect
->offsets
= malloc(sizeof(hammer_off_t
) * HAMMER_BLOCKMAP_RADIX2
);
518 collect
->phys_offset
= phys_offset
;
519 RB_INSERT(collect_rb_tree
, &CollectTree
, collect
);
520 bzero(collect
->track2
, HAMMER_BIGBLOCK_SIZE
);
521 bzero(collect
->layer2
, HAMMER_BIGBLOCK_SIZE
);
528 collect_rel(collect_t collect
)
530 free(collect
->offsets
);
531 free(collect
->layer2
);
532 free(collect
->track2
);
537 struct hammer_blockmap_layer2
*
538 collect_get_track(collect_t collect
, hammer_off_t offset
, int zone
,
539 struct hammer_blockmap_layer2
*layer2
)
541 struct hammer_blockmap_layer2
*track2
;
544 i
= HAMMER_BLOCKMAP_LAYER2_INDEX(offset
);
545 track2
= &collect
->track2
[i
];
546 if (track2
->entry_crc
== 0) {
547 collect
->layer2
[i
] = *layer2
;
548 collect
->offsets
[i
] = offset
& ~HAMMER_BIGBLOCK_MASK64
;
550 track2
->bytes_free
= HAMMER_BIGBLOCK_SIZE
;
551 track2
->entry_crc
= 1; /* steal field to tag track load */
558 dump_collect_table(void)
562 struct zone_stat
*stats
= NULL
;
565 stats
= hammer_init_zone_stat();
567 RB_FOREACH(collect
, collect_rb_tree
, &CollectTree
) {
568 dump_collect(collect
, stats
);
569 error
+= collect
->error
;
572 while ((collect
= RB_ROOT(&CollectTree
)) != NULL
) {
573 RB_REMOVE(collect_rb_tree
, &CollectTree
, collect
);
574 collect_rel(collect
);
576 assert(RB_EMPTY(&CollectTree
));
579 hammer_print_zone_stat(stats
);
580 hammer_cleanup_zone_stat(stats
);
583 if (num_bad_node
|| VerboseOpt
) {
584 printf("%d bad nodes\n", num_bad_node
);
586 if (error
|| VerboseOpt
) {
587 printf("%d errors\n", error
);
593 dump_collect(collect_t collect
, struct zone_stat
*stats
)
595 struct hammer_blockmap_layer2
*track2
;
596 struct hammer_blockmap_layer2
*layer2
;
600 for (i
= 0; i
< HAMMER_BLOCKMAP_RADIX2
; ++i
) {
601 track2
= &collect
->track2
[i
];
602 layer2
= &collect
->layer2
[i
];
603 offset
= collect
->offsets
[i
];
606 * Check big-blocks referenced by freemap, data,
607 * B-Tree nodes and UNDO fifo.
609 if (track2
->entry_crc
== 0)
613 if (AssertOnFailure
) {
614 assert((zone
== HAMMER_ZONE_UNDO_INDEX
) ||
615 (zone
== HAMMER_ZONE_FREEMAP_INDEX
) ||
616 hammer_is_zone2_mapped_index(zone
));
619 hammer_add_zone_stat_layer2(stats
, layer2
);
621 if (track2
->zone
!= layer2
->zone
) {
622 printf("BZ\tblock=%016jx calc zone=%-2d, got zone=%-2d\n",
627 } else if (track2
->bytes_free
!= layer2
->bytes_free
) {
628 printf("BM\tblock=%016jx zone=%-2d calc %d free, got %d\n",
634 } else if (VerboseOpt
) {
635 printf("\tblock=%016jx zone=%-2d %d free (correct)\n",