kernel/nata: Extract ad_get_geometry().
[dragonfly.git] / sbin / hammer / cmd_blockmap.c
blobc85259fea3f3e9290f5dfb2ebba95ac8aceedbb1
1 /*
2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sbin/hammer/cmd_blockmap.c,v 1.4 2008/07/19 18:48:14 dillon Exp $
37 #include "hammer.h"
39 #include <sys/tree.h>
42 * Each collect covers 1<<(19+23) bytes address space of layer 1.
43 * (plus a copy of 1<<23 bytes that holds layer2 entries in layer 1).
45 typedef struct collect {
46 RB_ENTRY(collect) entry;
47 hammer_off_t phys_offset; /* layer2 address pointed by layer1 */
48 hammer_off_t *offsets; /* big-block offset for layer2[i] */
49 hammer_blockmap_layer2_t track2; /* track of layer2 entries */
50 hammer_blockmap_layer2_t layer2; /* 1<<19 x 16 bytes entries */
51 int error; /* # of inconsistencies */
52 } *collect_t;
54 static int
55 collect_compare(struct collect *c1, struct collect *c2)
57 if (c1->phys_offset < c2->phys_offset)
58 return(-1);
59 if (c1->phys_offset > c2->phys_offset)
60 return(1);
61 return(0);
64 RB_HEAD(collect_rb_tree, collect) CollectTree = RB_INITIALIZER(&CollectTree);
65 RB_PROTOTYPE2(collect_rb_tree, collect, entry, collect_compare, hammer_off_t);
66 RB_GENERATE2(collect_rb_tree, collect, entry, collect_compare, hammer_off_t,
67 phys_offset);
69 static void dump_blockmap(int zone);
70 static void check_freemap(hammer_blockmap_t freemap);
71 static void check_btree_node(hammer_off_t node_offset, int depth);
72 static void check_undo(hammer_blockmap_t undomap);
73 static __inline void collect_btree_root(hammer_off_t node_offset);
74 static __inline void collect_btree_internal(hammer_btree_elm_t elm);
75 static __inline void collect_btree_leaf(hammer_btree_elm_t elm);
76 static __inline void collect_freemap_layer1(hammer_blockmap_t freemap);
77 static __inline void collect_freemap_layer2(hammer_blockmap_layer1_t layer1);
78 static __inline void collect_undo(hammer_off_t scan_offset,
79 hammer_fifo_head_t head);
80 static void collect_blockmap(hammer_off_t offset, int32_t length, int zone);
81 static hammer_blockmap_layer2_t collect_get_track(
82 collect_t collect, hammer_off_t offset, int zone,
83 hammer_blockmap_layer2_t layer2);
84 static collect_t collect_get(hammer_off_t phys_offset);
85 static void dump_collect_table(void);
86 static void dump_collect(collect_t collect, zone_stat_t stats);
88 static int num_bad_layer1 = 0;
89 static int num_bad_layer2 = 0;
90 static int num_bad_node = 0;
92 void
93 hammer_cmd_blockmap(void)
95 dump_blockmap(HAMMER_ZONE_FREEMAP_INDEX);
98 static
99 void
100 dump_blockmap(int zone)
102 volume_info_t root_volume;
103 hammer_blockmap_t rootmap;
104 hammer_blockmap_layer1_t layer1;
105 hammer_blockmap_layer2_t layer2;
106 buffer_info_t buffer1 = NULL;
107 buffer_info_t buffer2 = NULL;
108 hammer_off_t layer1_offset;
109 hammer_off_t layer2_offset;
110 hammer_off_t phys_offset;
111 hammer_off_t block_offset;
112 zone_stat_t stats = NULL;
113 int xerr, aerr, ferr;
115 root_volume = get_root_volume();
116 rootmap = &root_volume->ondisk->vol0_blockmap[zone];
117 assert(rootmap->phys_offset != 0);
119 print_blockmap(root_volume);
121 if (VerboseOpt)
122 stats = hammer_init_zone_stat();
124 for (phys_offset = HAMMER_ZONE_ENCODE(zone, 0);
125 phys_offset < HAMMER_ZONE_ENCODE(zone, HAMMER_OFF_LONG_MASK);
126 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
128 * Dive layer 1.
130 layer1_offset = rootmap->phys_offset +
131 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
132 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
134 xerr = ' '; /* good */
135 if (!hammer_crc_test_layer1(HammerVersion, layer1)) {
136 xerr = 'B';
137 ++num_bad_layer1;
139 if (xerr == ' ' &&
140 layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
141 continue;
143 printf("%c layer1 %016jx @%016jx blocks-free %jd\n",
144 xerr,
145 (uintmax_t)phys_offset,
146 (uintmax_t)layer1->phys_offset,
147 (intmax_t)layer1->blocks_free);
149 for (block_offset = 0;
150 block_offset < HAMMER_BLOCKMAP_LAYER2;
151 block_offset += HAMMER_BIGBLOCK_SIZE) {
152 hammer_off_t zone_offset = phys_offset + block_offset;
154 * Dive layer 2, each entry represents a big-block.
156 layer2_offset = layer1->phys_offset +
157 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
158 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
160 xerr = aerr = ferr = ' '; /* good */
161 if (!hammer_crc_test_layer2(HammerVersion, layer2)) {
162 xerr = 'B';
163 ++num_bad_layer2;
165 if (layer2->append_off > HAMMER_BIGBLOCK_SIZE) {
166 aerr = 'A';
167 ++num_bad_layer2;
169 if (layer2->bytes_free < 0 ||
170 layer2->bytes_free > HAMMER_BIGBLOCK_SIZE) {
171 ferr = 'F';
172 ++num_bad_layer2;
175 if (VerboseOpt < 2 &&
176 xerr == ' ' && aerr == ' ' && ferr == ' ' &&
177 layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
178 break;
180 printf("%c%c%c %016jx zone=%-2d ",
181 xerr, aerr, ferr, (uintmax_t)zone_offset, layer2->zone);
182 if (VerboseOpt) {
183 printf("vol=%-3d L1#=%-6d L2#=%-6d L1=%-7lu L2=%-7lu ",
184 HAMMER_VOL_DECODE(zone_offset),
185 HAMMER_BLOCKMAP_LAYER1_INDEX(zone_offset),
186 HAMMER_BLOCKMAP_LAYER2_INDEX(zone_offset),
187 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset),
188 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset));
190 printf("app=%-7d free=%-7d",
191 layer2->append_off,
192 layer2->bytes_free);
193 if (VerboseOpt) {
194 double bytes_used = HAMMER_BIGBLOCK_SIZE -
195 layer2->bytes_free;
196 printf(" fill=%-5.1lf crc=%08x-%08x\n",
197 bytes_used * 100 / HAMMER_BIGBLOCK_SIZE,
198 layer1->layer1_crc,
199 layer2->entry_crc);
200 } else {
201 printf("\n");
204 if (stats)
205 hammer_add_zone_stat_layer2(stats, layer2);
208 rel_buffer(buffer1);
209 rel_buffer(buffer2);
211 if (stats) {
212 hammer_print_zone_stat(stats);
213 hammer_cleanup_zone_stat(stats);
216 if (num_bad_layer1 || VerboseOpt)
217 printf("%d bad layer1\n", num_bad_layer1);
218 if (num_bad_layer2 || VerboseOpt)
219 printf("%d bad layer2\n", num_bad_layer1);
222 void
223 hammer_cmd_checkmap(void)
225 volume_info_t volume;
226 hammer_blockmap_t freemap;
227 hammer_blockmap_t undomap;
228 hammer_off_t node_offset;
230 volume = get_root_volume();
231 node_offset = volume->ondisk->vol0_btree_root;
232 freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
233 undomap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
235 print_blockmap(volume);
237 printf("Collecting allocation info from freemap: ");
238 fflush(stdout);
239 check_freemap(freemap);
240 printf("done\n");
242 printf("Collecting allocation info from B-Tree: ");
243 fflush(stdout);
244 check_btree_node(node_offset, 0);
245 printf("done\n");
247 printf("Collecting allocation info from UNDO: ");
248 fflush(stdout);
249 check_undo(undomap);
250 printf("done\n");
252 dump_collect_table();
255 static
256 void
257 check_freemap(hammer_blockmap_t freemap)
259 hammer_off_t offset;
260 buffer_info_t buffer1 = NULL;
261 hammer_blockmap_layer1_t layer1;
262 int i;
264 collect_freemap_layer1(freemap);
266 for (i = 0; i < HAMMER_BLOCKMAP_RADIX1; ++i) {
267 offset = freemap->phys_offset + i * sizeof(*layer1);
268 layer1 = get_buffer_data(offset, &buffer1, 0);
269 if (layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL)
270 collect_freemap_layer2(layer1);
272 rel_buffer(buffer1);
275 static
276 void
277 check_btree_node(hammer_off_t node_offset, int depth)
279 buffer_info_t buffer = NULL;
280 hammer_node_ondisk_t node;
281 hammer_btree_elm_t elm;
282 int i;
283 char badc = ' '; /* good */
284 char badm = ' '; /* good */
286 if (depth == 0)
287 collect_btree_root(node_offset);
288 node = get_buffer_data(node_offset, &buffer, 0);
290 if (node == NULL) {
291 badc = 'B';
292 badm = 'I';
293 } else if (!hammer_crc_test_btree(HammerVersion, node)) {
294 badc = 'B';
297 if (badm != ' ' || badc != ' ') { /* not good */
298 ++num_bad_node;
299 printf("%c%c NODE %016jx ",
300 badc, badm, (uintmax_t)node_offset);
301 if (node == NULL) {
302 printf("(IO ERROR)\n");
303 rel_buffer(buffer);
304 return;
305 } else {
306 printf("cnt=%02d p=%016jx type=%c depth=%d mirror=%016jx\n",
307 node->count,
308 (uintmax_t)node->parent,
309 (node->type ? node->type : '?'),
310 depth,
311 (uintmax_t)node->mirror_tid);
315 for (i = 0; i < node->count; ++i) {
316 elm = &node->elms[i];
318 switch(node->type) {
319 case HAMMER_BTREE_TYPE_INTERNAL:
320 if (elm->internal.subtree_offset) {
321 collect_btree_internal(elm);
322 check_btree_node(elm->internal.subtree_offset,
323 depth + 1);
325 break;
326 case HAMMER_BTREE_TYPE_LEAF:
327 if (elm->leaf.data_offset)
328 collect_btree_leaf(elm);
329 break;
330 default:
331 assert(!DebugOpt);
332 break;
335 rel_buffer(buffer);
338 static
339 void
340 check_undo(hammer_blockmap_t undomap)
342 buffer_info_t buffer = NULL;
343 hammer_off_t scan_offset;
344 hammer_fifo_head_t head;
346 scan_offset = HAMMER_ENCODE_UNDO(0);
347 while (scan_offset < undomap->alloc_offset) {
348 head = get_buffer_data(scan_offset, &buffer, 0);
349 switch (head->hdr_type) {
350 case HAMMER_HEAD_TYPE_PAD:
351 case HAMMER_HEAD_TYPE_DUMMY:
352 case HAMMER_HEAD_TYPE_UNDO:
353 case HAMMER_HEAD_TYPE_REDO:
354 collect_undo(scan_offset, head);
355 break;
356 default:
357 assert(!DebugOpt);
358 break;
360 if ((head->hdr_size & HAMMER_HEAD_ALIGN_MASK) ||
361 head->hdr_size == 0 ||
362 head->hdr_size > HAMMER_UNDO_ALIGN -
363 ((u_int)scan_offset & HAMMER_UNDO_MASK)) {
364 printf("Illegal size, skipping to next boundary\n");
365 scan_offset = HAMMER_UNDO_DOALIGN(scan_offset);
366 } else {
367 scan_offset += head->hdr_size;
370 rel_buffer(buffer);
373 static __inline
374 void
375 collect_freemap_layer1(hammer_blockmap_t freemap)
378 * This translation is necessary to do checkmap properly
379 * as zone4 is really just zone2 address space.
381 hammer_off_t zone4_offset = hammer_xlate_to_zoneX(
382 HAMMER_ZONE_FREEMAP_INDEX, freemap->phys_offset);
383 collect_blockmap(zone4_offset, HAMMER_BIGBLOCK_SIZE,
384 HAMMER_ZONE_FREEMAP_INDEX);
387 static __inline
388 void
389 collect_freemap_layer2(hammer_blockmap_layer1_t layer1)
392 * This translation is necessary to do checkmap properly
393 * as zone4 is really just zone2 address space.
395 hammer_off_t zone4_offset = hammer_xlate_to_zoneX(
396 HAMMER_ZONE_FREEMAP_INDEX, layer1->phys_offset);
397 collect_blockmap(zone4_offset, HAMMER_BIGBLOCK_SIZE,
398 HAMMER_ZONE_FREEMAP_INDEX);
401 static __inline
402 void
403 collect_btree_root(hammer_off_t node_offset)
405 collect_blockmap(node_offset,
406 sizeof(struct hammer_node_ondisk), /* 4KB */
407 HAMMER_ZONE_BTREE_INDEX);
410 static __inline
411 void
412 collect_btree_internal(hammer_btree_elm_t elm)
414 collect_blockmap(elm->internal.subtree_offset,
415 sizeof(struct hammer_node_ondisk), /* 4KB */
416 HAMMER_ZONE_BTREE_INDEX);
419 static __inline
420 void
421 collect_btree_leaf(hammer_btree_elm_t elm)
423 int zone;
425 switch (elm->base.rec_type) {
426 case HAMMER_RECTYPE_INODE:
427 case HAMMER_RECTYPE_DIRENTRY:
428 case HAMMER_RECTYPE_EXT:
429 case HAMMER_RECTYPE_FIX:
430 case HAMMER_RECTYPE_PFS:
431 case HAMMER_RECTYPE_SNAPSHOT:
432 case HAMMER_RECTYPE_CONFIG:
433 zone = HAMMER_ZONE_META_INDEX;
434 break;
435 case HAMMER_RECTYPE_DATA:
436 case HAMMER_RECTYPE_DB:
437 zone = hammer_data_zone_index(elm->leaf.data_len);
438 break;
439 default:
440 zone = HAMMER_ZONE_UNAVAIL_INDEX;
441 break;
443 collect_blockmap(elm->leaf.data_offset,
444 HAMMER_DATA_DOALIGN(elm->leaf.data_len), zone);
447 static __inline
448 void
449 collect_undo(hammer_off_t scan_offset, hammer_fifo_head_t head)
451 collect_blockmap(scan_offset, head->hdr_size,
452 HAMMER_ZONE_UNDO_INDEX);
455 static
456 void
457 collect_blockmap(hammer_off_t offset, int32_t length, int zone)
459 struct hammer_blockmap_layer1 layer1;
460 struct hammer_blockmap_layer2 layer2;
461 hammer_blockmap_layer2_t track2;
462 hammer_off_t result_offset;
463 collect_t collect;
464 int error;
466 result_offset = blockmap_lookup_save(offset, &layer1, &layer2, &error);
467 if (DebugOpt) {
468 assert(HAMMER_ZONE_DECODE(offset) == zone);
469 assert(hammer_is_zone_raw_buffer(result_offset));
470 assert(error == 0);
472 collect = collect_get(layer1.phys_offset); /* layer2 address */
473 track2 = collect_get_track(collect, result_offset, zone, &layer2);
474 track2->bytes_free -= length;
477 static
478 collect_t
479 collect_get(hammer_off_t phys_offset)
481 collect_t collect;
483 collect = RB_LOOKUP(collect_rb_tree, &CollectTree, phys_offset);
484 if (collect)
485 return(collect);
487 collect = calloc(1, sizeof(*collect));
488 collect->track2 = calloc(1, HAMMER_BIGBLOCK_SIZE); /* 1<<23 bytes */
489 collect->layer2 = calloc(1, HAMMER_BIGBLOCK_SIZE); /* 1<<23 bytes */
490 collect->offsets = calloc(HAMMER_BLOCKMAP_RADIX2, sizeof(hammer_off_t));
491 collect->phys_offset = phys_offset;
492 RB_INSERT(collect_rb_tree, &CollectTree, collect);
494 return (collect);
497 static
498 void
499 collect_rel(collect_t collect)
501 free(collect->offsets);
502 free(collect->layer2);
503 free(collect->track2);
504 free(collect);
507 static
508 hammer_blockmap_layer2_t
509 collect_get_track(collect_t collect, hammer_off_t offset, int zone,
510 hammer_blockmap_layer2_t layer2)
512 hammer_blockmap_layer2_t track2;
513 size_t i;
515 i = HAMMER_BLOCKMAP_LAYER2_INDEX(offset);
516 track2 = &collect->track2[i];
517 if (track2->entry_crc == 0) {
518 collect->layer2[i] = *layer2;
519 collect->offsets[i] = offset & ~HAMMER_BIGBLOCK_MASK64;
520 track2->zone = zone;
521 track2->bytes_free = HAMMER_BIGBLOCK_SIZE;
522 track2->entry_crc = 1; /* steal field to tag track load */
524 return (track2);
527 static
528 void
529 dump_collect_table(void)
531 collect_t collect;
532 int error = 0;
533 zone_stat_t stats = NULL;
535 if (VerboseOpt)
536 stats = hammer_init_zone_stat();
538 RB_FOREACH(collect, collect_rb_tree, &CollectTree) {
539 dump_collect(collect, stats);
540 error += collect->error;
543 while ((collect = RB_ROOT(&CollectTree)) != NULL) {
544 RB_REMOVE(collect_rb_tree, &CollectTree, collect);
545 collect_rel(collect);
547 assert(RB_EMPTY(&CollectTree));
549 if (stats) {
550 hammer_print_zone_stat(stats);
551 hammer_cleanup_zone_stat(stats);
554 if (num_bad_node || VerboseOpt)
555 printf("%d bad nodes\n", num_bad_node);
556 if (error || VerboseOpt)
557 printf("%d errors\n", error);
560 static
561 void
562 dump_collect(collect_t collect, zone_stat_t stats)
564 hammer_blockmap_layer2_t track2;
565 hammer_blockmap_layer2_t layer2;
566 hammer_off_t offset;
567 int i;
569 for (i = 0; i < HAMMER_BLOCKMAP_RADIX2; ++i) {
570 track2 = &collect->track2[i];
571 layer2 = &collect->layer2[i];
572 offset = collect->offsets[i];
575 * Check big-blocks referenced by freemap, data,
576 * B-Tree nodes and UNDO fifo.
578 if (track2->entry_crc == 0)
579 continue;
581 if (DebugOpt) {
582 assert((layer2->zone == HAMMER_ZONE_UNDO_INDEX) ||
583 (layer2->zone == HAMMER_ZONE_FREEMAP_INDEX) ||
584 hammer_is_index_record(layer2->zone));
586 if (stats)
587 hammer_add_zone_stat_layer2(stats, layer2);
589 if (track2->zone != layer2->zone) {
590 printf("BZ\tblock=%016jx calc zone=%-2d, got zone=%-2d\n",
591 (intmax_t)offset,
592 track2->zone,
593 layer2->zone);
594 collect->error++;
595 } else if (track2->bytes_free != layer2->bytes_free) {
596 printf("BM\tblock=%016jx zone=%-2d calc %d free, got %d\n",
597 (intmax_t)offset,
598 layer2->zone,
599 track2->bytes_free,
600 layer2->bytes_free);
601 collect->error++;
602 } else if (VerboseOpt) {
603 printf("\tblock=%016jx zone=%-2d %d free (correct)\n",
604 (intmax_t)offset,
605 layer2->zone,
606 track2->bytes_free);