installworld: When installing libraries, install libc first
[dragonfly.git] / sbin / hammer / cmd_blockmap.c
blob0cd94fa038a763e24dc6d6a57613c852872da211
1 /*
2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sbin/hammer/cmd_blockmap.c,v 1.4 2008/07/19 18:48:14 dillon Exp $
37 #include "hammer.h"
40 * Each collect covers 1<<(19+23) bytes address space of layer 1.
41 * (plus a copy of 1<<23 bytes that holds layer2 entries in layer 1).
43 typedef struct collect {
44 RB_ENTRY(collect) entry;
45 hammer_off_t phys_offset; /* layer2 address pointed by layer1 */
46 hammer_off_t *offsets; /* big-block offset for layer2[i] */
47 struct hammer_blockmap_layer2 *track2; /* track of layer2 entries */
48 struct hammer_blockmap_layer2 *layer2; /* 1<<19 x 16 bytes entries */
49 int error; /* # of inconsistencies */
50 } *collect_t;
52 static int
53 collect_compare(struct collect *c1, struct collect *c2)
55 if (c1->phys_offset < c2->phys_offset)
56 return(-1);
57 if (c1->phys_offset > c2->phys_offset)
58 return(1);
59 return(0);
62 RB_HEAD(collect_rb_tree, collect) CollectTree = RB_INITIALIZER(&CollectTree);
63 RB_PROTOTYPE2(collect_rb_tree, collect, entry, collect_compare, hammer_off_t);
64 RB_GENERATE2(collect_rb_tree, collect, entry, collect_compare, hammer_off_t,
65 phys_offset);
67 static void dump_blockmap(const char *label, int zone);
68 static void check_freemap(hammer_blockmap_t freemap);
69 static void check_btree_node(hammer_off_t node_offset, int depth);
70 static void check_undo(hammer_blockmap_t undomap);
71 static __inline void collect_btree_root(hammer_off_t node_offset);
72 static __inline void collect_btree_internal(hammer_btree_elm_t elm);
73 static __inline void collect_btree_leaf(hammer_btree_elm_t elm);
74 static __inline void collect_freemap_layer1(hammer_blockmap_t freemap);
75 static __inline void collect_freemap_layer2(struct hammer_blockmap_layer1 *layer1);
76 static __inline void collect_undo(hammer_off_t scan_offset,
77 hammer_fifo_head_t head);
78 static void collect_blockmap(hammer_off_t offset, int32_t length, int zone);
79 static struct hammer_blockmap_layer2 *collect_get_track(
80 collect_t collect, hammer_off_t offset, int zone,
81 struct hammer_blockmap_layer2 *layer2);
82 static collect_t collect_get(hammer_off_t phys_offset);
83 static void dump_collect_table(void);
84 static void dump_collect(collect_t collect, struct zone_stat *stats);
86 static int num_bad_layer1 = 0;
87 static int num_bad_layer2 = 0;
88 static int num_bad_node = 0;
90 void
91 hammer_cmd_blockmap(void)
93 dump_blockmap("freemap", HAMMER_ZONE_FREEMAP_INDEX);
96 static
97 void
98 dump_blockmap(const char *label, int zone)
100 struct volume_info *root_volume;
101 hammer_blockmap_t rootmap;
102 hammer_blockmap_t blockmap;
103 struct hammer_blockmap_layer1 *layer1;
104 struct hammer_blockmap_layer2 *layer2;
105 struct buffer_info *buffer1 = NULL;
106 struct buffer_info *buffer2 = NULL;
107 hammer_off_t layer1_offset;
108 hammer_off_t layer2_offset;
109 hammer_off_t scan1;
110 hammer_off_t scan2;
111 struct zone_stat *stats = NULL;
112 int xerr;
113 int i;
115 assert(RootVolNo >= 0);
116 root_volume = get_volume(RootVolNo);
117 rootmap = &root_volume->ondisk->vol0_blockmap[zone];
118 assert(rootmap->phys_offset != 0);
120 printf(" "
121 "phys first next alloc\n");
122 for (i = 0; i < HAMMER_MAX_ZONES; i++) {
123 blockmap = &root_volume->ondisk->vol0_blockmap[i];
124 if (VerboseOpt || i == zone) {
125 printf("zone %-2d %-10s %016jx %016jx %016jx %016jx\n",
126 i, (i == zone ? label : ""),
127 (uintmax_t)blockmap->phys_offset,
128 (uintmax_t)blockmap->first_offset,
129 (uintmax_t)blockmap->next_offset,
130 (uintmax_t)blockmap->alloc_offset);
134 if (VerboseOpt)
135 stats = hammer_init_zone_stat();
137 for (scan1 = HAMMER_ZONE_ENCODE(zone, 0);
138 scan1 < HAMMER_ZONE_ENCODE(zone, HAMMER_OFF_LONG_MASK);
139 scan1 += HAMMER_BLOCKMAP_LAYER2) {
141 * Dive layer 1.
143 layer1_offset = rootmap->phys_offset +
144 HAMMER_BLOCKMAP_LAYER1_OFFSET(scan1);
145 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
147 xerr = ' '; /* good */
148 if (layer1->layer1_crc !=
149 crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
150 xerr = 'B';
151 ++num_bad_layer1;
153 if (xerr == ' ' &&
154 layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
155 continue;
157 printf("%c layer1 %016jx @%016jx blocks-free %jd\n",
158 xerr,
159 (uintmax_t)scan1,
160 (uintmax_t)layer1->phys_offset,
161 (intmax_t)layer1->blocks_free);
163 for (scan2 = scan1;
164 scan2 < scan1 + HAMMER_BLOCKMAP_LAYER2;
165 scan2 += HAMMER_BIGBLOCK_SIZE) {
167 * Dive layer 2, each entry represents a big-block.
169 layer2_offset = layer1->phys_offset +
170 HAMMER_BLOCKMAP_LAYER2_OFFSET(scan2);
171 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
173 xerr = ' '; /* good */
174 if (layer2->entry_crc !=
175 crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
176 xerr = 'B';
177 ++num_bad_layer2;
179 printf("%c %016jx zone=%-2d ",
180 xerr,
181 (uintmax_t)scan2,
182 layer2->zone);
183 if (VerboseOpt > 1)
184 printf("vol=%-3d L1=%-7lu L2=%-7lu ",
185 HAMMER_VOL_DECODE(scan2),
186 HAMMER_BLOCKMAP_LAYER1_OFFSET(scan2),
187 HAMMER_BLOCKMAP_LAYER2_OFFSET(scan2));
188 else if (VerboseOpt > 0)
189 printf("vol=%-3d L1=%-6lu L2=%-6lu ",
190 HAMMER_VOL_DECODE(scan2),
191 HAMMER_BLOCKMAP_LAYER1_INDEX(scan2),
192 HAMMER_BLOCKMAP_LAYER2_INDEX(scan2));
193 printf("app=%-7d free=%-7d",
194 layer2->append_off,
195 layer2->bytes_free);
196 if (VerboseOpt)
197 printf(" crc=%04x-%04x\n",
198 layer1->layer1_crc,
199 layer2->entry_crc);
200 else
201 printf("\n");
203 if (VerboseOpt)
204 hammer_add_zone_stat_layer2(stats, layer2);
207 rel_buffer(buffer1);
208 rel_buffer(buffer2);
209 rel_volume(root_volume);
211 if (VerboseOpt) {
212 hammer_print_zone_stat(stats);
213 hammer_cleanup_zone_stat(stats);
216 if (num_bad_layer1 || VerboseOpt) {
217 printf("%d bad layer1\n", num_bad_layer1);
219 if (num_bad_layer2 || VerboseOpt) {
220 printf("%d bad layer2\n", num_bad_layer1);
224 void
225 hammer_cmd_checkmap(void)
227 struct volume_info *volume;
228 hammer_blockmap_t freemap;
229 hammer_blockmap_t undomap;
230 hammer_off_t node_offset;
232 volume = get_volume(RootVolNo);
233 node_offset = volume->ondisk->vol0_btree_root;
234 freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
235 undomap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
237 if (QuietOpt < 3) {
238 printf("Volume header\trecords=%jd next_tid=%016jx\n",
239 (intmax_t)volume->ondisk->vol0_stat_records,
240 (uintmax_t)volume->ondisk->vol0_next_tid);
241 printf("\t\tbufoffset=%016jx\n",
242 (uintmax_t)volume->ondisk->vol_buf_beg);
243 printf("\t\tundosize=%jdMB\n",
244 (intmax_t)((undomap->alloc_offset & HAMMER_OFF_LONG_MASK)
245 / (1024 * 1024)));
247 rel_volume(volume);
249 assert(HAMMER_ZONE_UNDO_INDEX < HAMMER_ZONE2_MAPPED_INDEX);
250 assert(HAMMER_ZONE2_MAPPED_INDEX < HAMMER_MAX_ZONES);
251 AssertOnFailure = (DebugOpt != 0);
253 printf("Collecting allocation info from freemap: ");
254 fflush(stdout);
255 check_freemap(freemap);
256 printf("done\n");
258 printf("Collecting allocation info from B-Tree: ");
259 fflush(stdout);
260 check_btree_node(node_offset, 0);
261 printf("done\n");
263 printf("Collecting allocation info from UNDO: ");
264 fflush(stdout);
265 check_undo(undomap);
266 printf("done\n");
268 dump_collect_table();
269 AssertOnFailure = 1;
272 static void
273 check_freemap(hammer_blockmap_t freemap)
275 hammer_off_t offset;
276 struct buffer_info *buffer1 = NULL;
277 struct hammer_blockmap_layer1 *layer1;
278 int i;
280 collect_freemap_layer1(freemap);
282 for (i = 0; i < (int)HAMMER_BLOCKMAP_RADIX1; ++i) {
283 offset = freemap->phys_offset + i * sizeof(*layer1);
284 layer1 = get_buffer_data(offset, &buffer1, 0);
285 if (layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL)
286 collect_freemap_layer2(layer1);
288 rel_buffer(buffer1);
291 static void
292 check_btree_node(hammer_off_t node_offset, int depth)
294 struct buffer_info *buffer = NULL;
295 hammer_node_ondisk_t node;
296 hammer_btree_elm_t elm;
297 int i;
298 char badc = ' '; /* good */
299 char badm = ' '; /* good */
301 if (depth == 0)
302 collect_btree_root(node_offset);
303 node = get_node(node_offset, &buffer);
305 if (node == NULL) {
306 badc = 'B';
307 badm = 'I';
308 } else if (crc32(&node->crc + 1, HAMMER_BTREE_CRCSIZE) != node->crc) {
309 badc = 'B';
312 if (badm != ' ' || badc != ' ') { /* not good */
313 ++num_bad_node;
314 printf("%c%c NODE %016jx ",
315 badc, badm, (uintmax_t)node_offset);
316 if (node == NULL) {
317 printf("(IO ERROR)\n");
318 return;
319 } else {
320 printf("cnt=%02d p=%016jx type=%c depth=%d mirror=%016jx\n",
321 node->count,
322 (uintmax_t)node->parent,
323 (node->type ? node->type : '?'),
324 depth,
325 (uintmax_t)node->mirror_tid);
329 for (i = 0; i < node->count; ++i) {
330 elm = &node->elms[i];
332 switch(node->type) {
333 case HAMMER_BTREE_TYPE_INTERNAL:
334 if (elm->internal.subtree_offset) {
335 collect_btree_internal(elm);
336 check_btree_node(elm->internal.subtree_offset,
337 depth + 1);
339 break;
340 case HAMMER_BTREE_TYPE_LEAF:
341 if (elm->leaf.data_offset)
342 collect_btree_leaf(elm);
343 break;
344 default:
345 if (AssertOnFailure)
346 assert(0);
347 break;
350 rel_buffer(buffer);
353 static void
354 check_undo(hammer_blockmap_t undomap)
356 struct buffer_info *buffer = NULL;
357 hammer_off_t scan_offset;
358 hammer_fifo_head_t head;
360 scan_offset = HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0);
361 while (scan_offset < undomap->alloc_offset) {
362 head = get_buffer_data(scan_offset, &buffer, 0);
363 switch (head->hdr_type) {
364 case HAMMER_HEAD_TYPE_PAD:
365 case HAMMER_HEAD_TYPE_DUMMY:
366 case HAMMER_HEAD_TYPE_UNDO:
367 case HAMMER_HEAD_TYPE_REDO:
368 collect_undo(scan_offset, head);
369 break;
370 default:
371 if (AssertOnFailure)
372 assert(0);
373 break;
375 if ((head->hdr_size & HAMMER_HEAD_ALIGN_MASK) ||
376 head->hdr_size == 0 ||
377 head->hdr_size > HAMMER_UNDO_ALIGN -
378 ((u_int)scan_offset & HAMMER_UNDO_MASK)) {
379 printf("Illegal size, skipping to next boundary\n");
380 scan_offset = (scan_offset + HAMMER_UNDO_MASK) &
381 ~HAMMER_UNDO_MASK64;
382 } else {
383 scan_offset += head->hdr_size;
386 rel_buffer(buffer);
389 static __inline
390 void
391 collect_freemap_layer1(hammer_blockmap_t freemap)
394 * This translation is necessary to do checkmap properly
395 * as zone4 is really just zone2 address space.
397 hammer_off_t zone4_offset = hammer_xlate_to_zoneX(
398 HAMMER_ZONE_FREEMAP_INDEX, freemap->phys_offset);
399 collect_blockmap(zone4_offset, HAMMER_BIGBLOCK_SIZE,
400 HAMMER_ZONE_FREEMAP_INDEX);
403 static __inline
404 void
405 collect_freemap_layer2(struct hammer_blockmap_layer1 *layer1)
408 * This translation is necessary to do checkmap properly
409 * as zone4 is really just zone2 address space.
411 hammer_off_t zone4_offset = hammer_xlate_to_zoneX(
412 HAMMER_ZONE_FREEMAP_INDEX, layer1->phys_offset);
413 collect_blockmap(zone4_offset, HAMMER_BIGBLOCK_SIZE,
414 HAMMER_ZONE_FREEMAP_INDEX);
417 static __inline
418 void
419 collect_btree_root(hammer_off_t node_offset)
421 collect_blockmap(node_offset,
422 sizeof(struct hammer_node_ondisk), /* 4KB */
423 HAMMER_ZONE_BTREE_INDEX);
426 static __inline
427 void
428 collect_btree_internal(hammer_btree_elm_t elm)
430 collect_blockmap(elm->internal.subtree_offset,
431 sizeof(struct hammer_node_ondisk), /* 4KB */
432 HAMMER_ZONE_BTREE_INDEX);
435 static __inline
436 void
437 collect_btree_leaf(hammer_btree_elm_t elm)
439 int zone;
441 switch (elm->base.rec_type) {
442 case HAMMER_RECTYPE_INODE:
443 case HAMMER_RECTYPE_DIRENTRY:
444 case HAMMER_RECTYPE_EXT:
445 case HAMMER_RECTYPE_FIX:
446 case HAMMER_RECTYPE_PFS:
447 case HAMMER_RECTYPE_SNAPSHOT:
448 case HAMMER_RECTYPE_CONFIG:
449 zone = HAMMER_ZONE_META_INDEX;
450 break;
451 case HAMMER_RECTYPE_DATA:
452 case HAMMER_RECTYPE_DB:
454 * There is an exceptional case where HAMMER uses
455 * HAMMER_ZONE_LARGE_DATA when the data length is
456 * >HAMMER_BUFSIZE/2 (not >=HAMMER_BUFSIZE).
457 * This exceptional case is currently being used
458 * by mirror write code, however the following code
459 * can ignore that and simply use the normal way
460 * of selecting a zone using >=HAMMER_BUFSIZE.
461 * See hammer_alloc_data() for details.
463 zone = elm->leaf.data_len >= HAMMER_BUFSIZE ?
464 HAMMER_ZONE_LARGE_DATA_INDEX :
465 HAMMER_ZONE_SMALL_DATA_INDEX;
466 break;
467 default:
468 zone = HAMMER_ZONE_UNAVAIL_INDEX;
469 break;
471 collect_blockmap(elm->leaf.data_offset,
472 (elm->leaf.data_len + 15) & ~15, zone);
475 static __inline
476 void
477 collect_undo(hammer_off_t scan_offset, hammer_fifo_head_t head)
479 collect_blockmap(scan_offset, head->hdr_size,
480 HAMMER_ZONE_UNDO_INDEX);
483 static
484 void
485 collect_blockmap(hammer_off_t offset, int32_t length, int zone)
487 struct hammer_blockmap_layer1 layer1;
488 struct hammer_blockmap_layer2 layer2;
489 struct hammer_blockmap_layer2 *track2;
490 hammer_off_t result_offset;
491 collect_t collect;
492 int error;
494 result_offset = blockmap_lookup(offset, &layer1, &layer2, &error);
495 if (AssertOnFailure) {
496 assert(HAMMER_ZONE_DECODE(offset) == zone);
497 assert(HAMMER_ZONE_DECODE(result_offset) ==
498 HAMMER_ZONE_RAW_BUFFER_INDEX);
499 assert(error == 0);
501 collect = collect_get(layer1.phys_offset); /* layer2 address */
502 track2 = collect_get_track(collect, result_offset, zone, &layer2);
503 track2->bytes_free -= length;
506 static
507 collect_t
508 collect_get(hammer_off_t phys_offset)
510 collect_t collect;
512 collect = RB_LOOKUP(collect_rb_tree, &CollectTree, phys_offset);
513 if (collect)
514 return(collect);
516 collect = calloc(sizeof(*collect), 1);
517 collect->track2 = malloc(HAMMER_BIGBLOCK_SIZE); /* 1<<23 bytes */
518 collect->layer2 = malloc(HAMMER_BIGBLOCK_SIZE); /* 1<<23 bytes */
519 collect->offsets = malloc(sizeof(hammer_off_t) * HAMMER_BLOCKMAP_RADIX2);
520 collect->phys_offset = phys_offset;
521 RB_INSERT(collect_rb_tree, &CollectTree, collect);
522 bzero(collect->track2, HAMMER_BIGBLOCK_SIZE);
523 bzero(collect->layer2, HAMMER_BIGBLOCK_SIZE);
525 return (collect);
528 static
529 void
530 collect_rel(collect_t collect)
532 free(collect->offsets);
533 free(collect->layer2);
534 free(collect->track2);
535 free(collect);
538 static
539 struct hammer_blockmap_layer2 *
540 collect_get_track(collect_t collect, hammer_off_t offset, int zone,
541 struct hammer_blockmap_layer2 *layer2)
543 struct hammer_blockmap_layer2 *track2;
544 size_t i;
546 i = HAMMER_BLOCKMAP_LAYER2_INDEX(offset);
547 track2 = &collect->track2[i];
548 if (track2->entry_crc == 0) {
549 collect->layer2[i] = *layer2;
550 collect->offsets[i] = offset & ~HAMMER_BIGBLOCK_MASK64;
551 track2->zone = zone;
552 track2->bytes_free = HAMMER_BIGBLOCK_SIZE;
553 track2->entry_crc = 1; /* steal field to tag track load */
555 return (track2);
558 static
559 void
560 dump_collect_table(void)
562 collect_t collect;
563 int error = 0;
564 struct zone_stat *stats = NULL;
566 if (VerboseOpt)
567 stats = hammer_init_zone_stat();
569 RB_FOREACH(collect, collect_rb_tree, &CollectTree) {
570 dump_collect(collect, stats);
571 error += collect->error;
574 while ((collect = RB_ROOT(&CollectTree)) != NULL) {
575 RB_REMOVE(collect_rb_tree, &CollectTree, collect);
576 collect_rel(collect);
578 assert(RB_EMPTY(&CollectTree));
580 if (VerboseOpt) {
581 hammer_print_zone_stat(stats);
582 hammer_cleanup_zone_stat(stats);
585 if (num_bad_node || VerboseOpt) {
586 printf("%d bad nodes\n", num_bad_node);
588 if (error || VerboseOpt) {
589 printf("%d errors\n", error);
593 static
594 void
595 dump_collect(collect_t collect, struct zone_stat *stats)
597 struct hammer_blockmap_layer2 *track2;
598 struct hammer_blockmap_layer2 *layer2;
599 hammer_off_t offset;
600 size_t i;
601 int zone;
603 for (i = 0; i < HAMMER_BLOCKMAP_RADIX2; ++i) {
604 track2 = &collect->track2[i];
605 layer2 = &collect->layer2[i];
606 offset = collect->offsets[i];
609 * Check big-blocks referenced by freemap, data,
610 * B-Tree nodes and UNDO fifo.
612 if (track2->entry_crc == 0)
613 continue;
615 zone = layer2->zone;
616 if (AssertOnFailure) {
617 assert((zone == HAMMER_ZONE_UNDO_INDEX) ||
618 (zone == HAMMER_ZONE_FREEMAP_INDEX) ||
619 (zone >= HAMMER_ZONE2_MAPPED_INDEX &&
620 zone < HAMMER_MAX_ZONES));
622 if (VerboseOpt)
623 hammer_add_zone_stat_layer2(stats, layer2);
625 if (track2->zone != layer2->zone) {
626 printf("BZ\tblock=%016jx calc zone=%-2d, got zone=%-2d\n",
627 (intmax_t)offset,
628 track2->zone,
629 layer2->zone);
630 collect->error++;
631 } else if (track2->bytes_free != layer2->bytes_free) {
632 printf("BM\tblock=%016jx zone=%-2d calc %d free, got %d\n",
633 (intmax_t)offset,
634 layer2->zone,
635 track2->bytes_free,
636 layer2->bytes_free);
637 collect->error++;
638 } else if (VerboseOpt) {
639 printf("\tblock=%016jx zone=%-2d %d free (correct)\n",
640 (intmax_t)offset,
641 layer2->zone,
642 track2->bytes_free);