sbin/hammer: Cleanup blocks with a single statement
[dragonfly.git] / sbin / hammer / cmd_blockmap.c
blob2be3fd51e1fc39c8c9bcc247e0ba7efcd5731010
1 /*
2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sbin/hammer/cmd_blockmap.c,v 1.4 2008/07/19 18:48:14 dillon Exp $
37 #include "hammer.h"
40 * Each collect covers 1<<(19+23) bytes address space of layer 1.
41 * (plus a copy of 1<<23 bytes that holds layer2 entries in layer 1).
43 typedef struct collect {
44 RB_ENTRY(collect) entry;
45 hammer_off_t phys_offset; /* layer2 address pointed by layer1 */
46 hammer_off_t *offsets; /* big-block offset for layer2[i] */
47 hammer_blockmap_layer2_t track2; /* track of layer2 entries */
48 hammer_blockmap_layer2_t layer2; /* 1<<19 x 16 bytes entries */
49 int error; /* # of inconsistencies */
50 } *collect_t;
52 static int
53 collect_compare(struct collect *c1, struct collect *c2)
55 if (c1->phys_offset < c2->phys_offset)
56 return(-1);
57 if (c1->phys_offset > c2->phys_offset)
58 return(1);
59 return(0);
62 RB_HEAD(collect_rb_tree, collect) CollectTree = RB_INITIALIZER(&CollectTree);
63 RB_PROTOTYPE2(collect_rb_tree, collect, entry, collect_compare, hammer_off_t);
64 RB_GENERATE2(collect_rb_tree, collect, entry, collect_compare, hammer_off_t,
65 phys_offset);
67 static void dump_blockmap(int zone);
68 static void check_freemap(hammer_blockmap_t freemap);
69 static void check_btree_node(hammer_off_t node_offset, int depth);
70 static void check_undo(hammer_blockmap_t undomap);
71 static __inline void collect_btree_root(hammer_off_t node_offset);
72 static __inline void collect_btree_internal(hammer_btree_elm_t elm);
73 static __inline void collect_btree_leaf(hammer_btree_elm_t elm);
74 static __inline void collect_freemap_layer1(hammer_blockmap_t freemap);
75 static __inline void collect_freemap_layer2(hammer_blockmap_layer1_t layer1);
76 static __inline void collect_undo(hammer_off_t scan_offset,
77 hammer_fifo_head_t head);
78 static void collect_blockmap(hammer_off_t offset, int32_t length, int zone);
79 static hammer_blockmap_layer2_t collect_get_track(
80 collect_t collect, hammer_off_t offset, int zone,
81 hammer_blockmap_layer2_t layer2);
82 static collect_t collect_get(hammer_off_t phys_offset);
83 static void dump_collect_table(void);
84 static void dump_collect(collect_t collect, struct zone_stat *stats);
86 static int num_bad_layer1 = 0;
87 static int num_bad_layer2 = 0;
88 static int num_bad_node = 0;
90 void
91 hammer_cmd_blockmap(void)
93 dump_blockmap(HAMMER_ZONE_FREEMAP_INDEX);
96 static
97 void
98 dump_blockmap(int zone)
100 struct volume_info *root_volume;
101 hammer_blockmap_t rootmap;
102 hammer_blockmap_layer1_t layer1;
103 hammer_blockmap_layer2_t layer2;
104 struct buffer_info *buffer1 = NULL;
105 struct buffer_info *buffer2 = NULL;
106 hammer_off_t layer1_offset;
107 hammer_off_t layer2_offset;
108 hammer_off_t phys_offset;
109 hammer_off_t block_offset;
110 struct zone_stat *stats = NULL;
111 int xerr, aerr, ferr;
113 root_volume = get_root_volume();
114 rootmap = &root_volume->ondisk->vol0_blockmap[zone];
115 assert(rootmap->phys_offset != 0);
117 print_blockmap(root_volume);
119 if (VerboseOpt)
120 stats = hammer_init_zone_stat();
122 for (phys_offset = HAMMER_ZONE_ENCODE(zone, 0);
123 phys_offset < HAMMER_ZONE_ENCODE(zone, HAMMER_OFF_LONG_MASK);
124 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
126 * Dive layer 1.
128 layer1_offset = rootmap->phys_offset +
129 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
130 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
132 xerr = ' '; /* good */
133 if (!hammer_crc_test_layer1(HammerVersion, layer1)) {
134 xerr = 'B';
135 ++num_bad_layer1;
137 if (xerr == ' ' &&
138 layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL)
139 continue;
140 printf("%c layer1 %016jx @%016jx blocks-free %jd\n",
141 xerr,
142 (uintmax_t)phys_offset,
143 (uintmax_t)layer1->phys_offset,
144 (intmax_t)layer1->blocks_free);
146 for (block_offset = 0;
147 block_offset < HAMMER_BLOCKMAP_LAYER2;
148 block_offset += HAMMER_BIGBLOCK_SIZE) {
149 hammer_off_t zone_offset = phys_offset + block_offset;
151 * Dive layer 2, each entry represents a big-block.
153 layer2_offset = layer1->phys_offset +
154 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
155 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
157 xerr = aerr = ferr = ' '; /* good */
158 if (!hammer_crc_test_layer2(HammerVersion, layer2)) {
159 xerr = 'B';
160 ++num_bad_layer2;
162 if (layer2->append_off > HAMMER_BIGBLOCK_SIZE) {
163 aerr = 'A';
164 ++num_bad_layer2;
166 if (layer2->bytes_free < 0 ||
167 layer2->bytes_free > HAMMER_BIGBLOCK_SIZE) {
168 ferr = 'F';
169 ++num_bad_layer2;
172 if (VerboseOpt < 2 &&
173 xerr == ' ' && aerr == ' ' && ferr == ' ' &&
174 layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX)
175 break;
176 printf("%c%c%c %016jx zone=%-2d ",
177 xerr, aerr, ferr, (uintmax_t)zone_offset, layer2->zone);
178 if (VerboseOpt)
179 printf("vol=%-3d L1#=%-6d L2#=%-6d L1=%-7lu L2=%-7lu ",
180 HAMMER_VOL_DECODE(zone_offset),
181 HAMMER_BLOCKMAP_LAYER1_INDEX(zone_offset),
182 HAMMER_BLOCKMAP_LAYER2_INDEX(zone_offset),
183 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset),
184 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset));
185 printf("app=%-7d free=%-7d",
186 layer2->append_off,
187 layer2->bytes_free);
188 if (VerboseOpt) {
189 double bytes_used = HAMMER_BIGBLOCK_SIZE -
190 layer2->bytes_free;
191 printf(" fill=%-5.1lf crc=%08x-%08x\n",
192 bytes_used * 100 / HAMMER_BIGBLOCK_SIZE,
193 layer1->layer1_crc,
194 layer2->entry_crc);
195 } else {
196 printf("\n");
199 if (stats)
200 hammer_add_zone_stat_layer2(stats, layer2);
203 rel_buffer(buffer1);
204 rel_buffer(buffer2);
206 if (stats) {
207 hammer_print_zone_stat(stats);
208 hammer_cleanup_zone_stat(stats);
211 if (num_bad_layer1 || VerboseOpt)
212 printf("%d bad layer1\n", num_bad_layer1);
213 if (num_bad_layer2 || VerboseOpt)
214 printf("%d bad layer2\n", num_bad_layer1);
217 void
218 hammer_cmd_checkmap(void)
220 struct volume_info *volume;
221 hammer_blockmap_t freemap;
222 hammer_blockmap_t undomap;
223 hammer_off_t node_offset;
225 volume = get_root_volume();
226 node_offset = volume->ondisk->vol0_btree_root;
227 freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
228 undomap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
230 print_blockmap(volume);
232 printf("Collecting allocation info from freemap: ");
233 fflush(stdout);
234 check_freemap(freemap);
235 printf("done\n");
237 printf("Collecting allocation info from B-Tree: ");
238 fflush(stdout);
239 check_btree_node(node_offset, 0);
240 printf("done\n");
242 printf("Collecting allocation info from UNDO: ");
243 fflush(stdout);
244 check_undo(undomap);
245 printf("done\n");
247 dump_collect_table();
250 static void
251 check_freemap(hammer_blockmap_t freemap)
253 hammer_off_t offset;
254 struct buffer_info *buffer1 = NULL;
255 hammer_blockmap_layer1_t layer1;
256 int i;
258 collect_freemap_layer1(freemap);
260 for (i = 0; i < HAMMER_BLOCKMAP_RADIX1; ++i) {
261 offset = freemap->phys_offset + i * sizeof(*layer1);
262 layer1 = get_buffer_data(offset, &buffer1, 0);
263 if (layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL)
264 collect_freemap_layer2(layer1);
266 rel_buffer(buffer1);
269 static void
270 check_btree_node(hammer_off_t node_offset, int depth)
272 struct buffer_info *buffer = NULL;
273 hammer_node_ondisk_t node;
274 hammer_btree_elm_t elm;
275 int i;
276 char badc = ' '; /* good */
277 char badm = ' '; /* good */
279 if (depth == 0)
280 collect_btree_root(node_offset);
281 node = get_buffer_data(node_offset, &buffer, 0);
283 if (node == NULL) {
284 badc = 'B';
285 badm = 'I';
286 } else if (!hammer_crc_test_btree(HammerVersion, node)) {
287 badc = 'B';
290 if (badm != ' ' || badc != ' ') { /* not good */
291 ++num_bad_node;
292 printf("%c%c NODE %016jx ",
293 badc, badm, (uintmax_t)node_offset);
294 if (node == NULL) {
295 printf("(IO ERROR)\n");
296 rel_buffer(buffer);
297 return;
298 } else {
299 printf("cnt=%02d p=%016jx type=%c depth=%d mirror=%016jx\n",
300 node->count,
301 (uintmax_t)node->parent,
302 (node->type ? node->type : '?'),
303 depth,
304 (uintmax_t)node->mirror_tid);
308 for (i = 0; i < node->count; ++i) {
309 elm = &node->elms[i];
311 switch(node->type) {
312 case HAMMER_BTREE_TYPE_INTERNAL:
313 if (elm->internal.subtree_offset) {
314 collect_btree_internal(elm);
315 check_btree_node(elm->internal.subtree_offset,
316 depth + 1);
318 break;
319 case HAMMER_BTREE_TYPE_LEAF:
320 if (elm->leaf.data_offset)
321 collect_btree_leaf(elm);
322 break;
323 default:
324 assert(!DebugOpt);
325 break;
328 rel_buffer(buffer);
331 static void
332 check_undo(hammer_blockmap_t undomap)
334 struct buffer_info *buffer = NULL;
335 hammer_off_t scan_offset;
336 hammer_fifo_head_t head;
338 scan_offset = HAMMER_ENCODE_UNDO(0);
339 while (scan_offset < undomap->alloc_offset) {
340 head = get_buffer_data(scan_offset, &buffer, 0);
341 switch (head->hdr_type) {
342 case HAMMER_HEAD_TYPE_PAD:
343 case HAMMER_HEAD_TYPE_DUMMY:
344 case HAMMER_HEAD_TYPE_UNDO:
345 case HAMMER_HEAD_TYPE_REDO:
346 collect_undo(scan_offset, head);
347 break;
348 default:
349 assert(!DebugOpt);
350 break;
352 if ((head->hdr_size & HAMMER_HEAD_ALIGN_MASK) ||
353 head->hdr_size == 0 ||
354 head->hdr_size > HAMMER_UNDO_ALIGN -
355 ((u_int)scan_offset & HAMMER_UNDO_MASK)) {
356 printf("Illegal size, skipping to next boundary\n");
357 scan_offset = HAMMER_UNDO_DOALIGN(scan_offset);
358 } else {
359 scan_offset += head->hdr_size;
362 rel_buffer(buffer);
365 static __inline
366 void
367 collect_freemap_layer1(hammer_blockmap_t freemap)
370 * This translation is necessary to do checkmap properly
371 * as zone4 is really just zone2 address space.
373 hammer_off_t zone4_offset = hammer_xlate_to_zoneX(
374 HAMMER_ZONE_FREEMAP_INDEX, freemap->phys_offset);
375 collect_blockmap(zone4_offset, HAMMER_BIGBLOCK_SIZE,
376 HAMMER_ZONE_FREEMAP_INDEX);
379 static __inline
380 void
381 collect_freemap_layer2(hammer_blockmap_layer1_t layer1)
384 * This translation is necessary to do checkmap properly
385 * as zone4 is really just zone2 address space.
387 hammer_off_t zone4_offset = hammer_xlate_to_zoneX(
388 HAMMER_ZONE_FREEMAP_INDEX, layer1->phys_offset);
389 collect_blockmap(zone4_offset, HAMMER_BIGBLOCK_SIZE,
390 HAMMER_ZONE_FREEMAP_INDEX);
393 static __inline
394 void
395 collect_btree_root(hammer_off_t node_offset)
397 collect_blockmap(node_offset,
398 sizeof(struct hammer_node_ondisk), /* 4KB */
399 HAMMER_ZONE_BTREE_INDEX);
402 static __inline
403 void
404 collect_btree_internal(hammer_btree_elm_t elm)
406 collect_blockmap(elm->internal.subtree_offset,
407 sizeof(struct hammer_node_ondisk), /* 4KB */
408 HAMMER_ZONE_BTREE_INDEX);
411 static __inline
412 void
413 collect_btree_leaf(hammer_btree_elm_t elm)
415 int zone;
417 switch (elm->base.rec_type) {
418 case HAMMER_RECTYPE_INODE:
419 case HAMMER_RECTYPE_DIRENTRY:
420 case HAMMER_RECTYPE_EXT:
421 case HAMMER_RECTYPE_FIX:
422 case HAMMER_RECTYPE_PFS:
423 case HAMMER_RECTYPE_SNAPSHOT:
424 case HAMMER_RECTYPE_CONFIG:
425 zone = HAMMER_ZONE_META_INDEX;
426 break;
427 case HAMMER_RECTYPE_DATA:
428 case HAMMER_RECTYPE_DB:
429 zone = hammer_data_zone_index(elm->leaf.data_len);
430 break;
431 default:
432 zone = HAMMER_ZONE_UNAVAIL_INDEX;
433 break;
435 collect_blockmap(elm->leaf.data_offset,
436 HAMMER_DATA_DOALIGN(elm->leaf.data_len), zone);
439 static __inline
440 void
441 collect_undo(hammer_off_t scan_offset, hammer_fifo_head_t head)
443 collect_blockmap(scan_offset, head->hdr_size,
444 HAMMER_ZONE_UNDO_INDEX);
447 static
448 void
449 collect_blockmap(hammer_off_t offset, int32_t length, int zone)
451 struct hammer_blockmap_layer1 layer1;
452 struct hammer_blockmap_layer2 layer2;
453 hammer_blockmap_layer2_t track2;
454 hammer_off_t result_offset;
455 collect_t collect;
456 int error;
458 result_offset = blockmap_lookup_save(offset, &layer1, &layer2, &error);
459 if (DebugOpt) {
460 assert(HAMMER_ZONE_DECODE(offset) == zone);
461 assert(hammer_is_zone_raw_buffer(result_offset));
462 assert(error == 0);
464 collect = collect_get(layer1.phys_offset); /* layer2 address */
465 track2 = collect_get_track(collect, result_offset, zone, &layer2);
466 track2->bytes_free -= length;
469 static
470 collect_t
471 collect_get(hammer_off_t phys_offset)
473 collect_t collect;
475 collect = RB_LOOKUP(collect_rb_tree, &CollectTree, phys_offset);
476 if (collect)
477 return(collect);
479 collect = calloc(1, sizeof(*collect));
480 collect->track2 = calloc(1, HAMMER_BIGBLOCK_SIZE); /* 1<<23 bytes */
481 collect->layer2 = calloc(1, HAMMER_BIGBLOCK_SIZE); /* 1<<23 bytes */
482 collect->offsets = calloc(HAMMER_BLOCKMAP_RADIX2, sizeof(hammer_off_t));
483 collect->phys_offset = phys_offset;
484 RB_INSERT(collect_rb_tree, &CollectTree, collect);
486 return (collect);
489 static
490 void
491 collect_rel(collect_t collect)
493 free(collect->offsets);
494 free(collect->layer2);
495 free(collect->track2);
496 free(collect);
499 static
500 hammer_blockmap_layer2_t
501 collect_get_track(collect_t collect, hammer_off_t offset, int zone,
502 hammer_blockmap_layer2_t layer2)
504 hammer_blockmap_layer2_t track2;
505 size_t i;
507 i = HAMMER_BLOCKMAP_LAYER2_INDEX(offset);
508 track2 = &collect->track2[i];
509 if (track2->entry_crc == 0) {
510 collect->layer2[i] = *layer2;
511 collect->offsets[i] = offset & ~HAMMER_BIGBLOCK_MASK64;
512 track2->zone = zone;
513 track2->bytes_free = HAMMER_BIGBLOCK_SIZE;
514 track2->entry_crc = 1; /* steal field to tag track load */
516 return (track2);
519 static
520 void
521 dump_collect_table(void)
523 collect_t collect;
524 int error = 0;
525 struct zone_stat *stats = NULL;
527 if (VerboseOpt)
528 stats = hammer_init_zone_stat();
530 RB_FOREACH(collect, collect_rb_tree, &CollectTree) {
531 dump_collect(collect, stats);
532 error += collect->error;
535 while ((collect = RB_ROOT(&CollectTree)) != NULL) {
536 RB_REMOVE(collect_rb_tree, &CollectTree, collect);
537 collect_rel(collect);
539 assert(RB_EMPTY(&CollectTree));
541 if (stats) {
542 hammer_print_zone_stat(stats);
543 hammer_cleanup_zone_stat(stats);
546 if (num_bad_node || VerboseOpt)
547 printf("%d bad nodes\n", num_bad_node);
548 if (error || VerboseOpt)
549 printf("%d errors\n", error);
552 static
553 void
554 dump_collect(collect_t collect, struct zone_stat *stats)
556 hammer_blockmap_layer2_t track2;
557 hammer_blockmap_layer2_t layer2;
558 hammer_off_t offset;
559 int i;
561 for (i = 0; i < HAMMER_BLOCKMAP_RADIX2; ++i) {
562 track2 = &collect->track2[i];
563 layer2 = &collect->layer2[i];
564 offset = collect->offsets[i];
567 * Check big-blocks referenced by freemap, data,
568 * B-Tree nodes and UNDO fifo.
570 if (track2->entry_crc == 0)
571 continue;
573 if (DebugOpt)
574 assert((layer2->zone == HAMMER_ZONE_UNDO_INDEX) ||
575 (layer2->zone == HAMMER_ZONE_FREEMAP_INDEX) ||
576 hammer_is_index_record(layer2->zone));
577 if (stats)
578 hammer_add_zone_stat_layer2(stats, layer2);
580 if (track2->zone != layer2->zone) {
581 printf("BZ\tblock=%016jx calc zone=%-2d, got zone=%-2d\n",
582 (intmax_t)offset,
583 track2->zone,
584 layer2->zone);
585 collect->error++;
586 } else if (track2->bytes_free != layer2->bytes_free) {
587 printf("BM\tblock=%016jx zone=%-2d calc %d free, got %d\n",
588 (intmax_t)offset,
589 layer2->zone,
590 track2->bytes_free,
591 layer2->bytes_free);
592 collect->error++;
593 } else if (VerboseOpt) {
594 printf("\tblock=%016jx zone=%-2d %d free (correct)\n",
595 (intmax_t)offset,
596 layer2->zone,
597 track2->bytes_free);