btrfs-progs: Switch to the new asciidoc Documentation.
[btrfs-progs-unstable/devel.git] / chunk-recover.c
bloba05e6447cbdd2a4ee63d6a9a9b7d381b33e0ad3b
1 /*
2 * Copyright (C) 2013 FUJITSU LIMITED. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #define _XOPEN_SOURCE 500
19 #define _GNU_SOURCE
21 #include <stdio.h>
22 #include <stdio_ext.h>
23 #include <stdlib.h>
24 #include <sys/types.h>
25 #include <sys/stat.h>
26 #include <fcntl.h>
27 #include <unistd.h>
28 #include <uuid/uuid.h>
29 #include <pthread.h>
31 #include "kerncompat.h"
32 #include "list.h"
33 #include "radix-tree.h"
34 #include "ctree.h"
35 #include "extent-cache.h"
36 #include "disk-io.h"
37 #include "volumes.h"
38 #include "transaction.h"
39 #include "crc32c.h"
40 #include "utils.h"
41 #include "version.h"
42 #include "btrfsck.h"
43 #include "commands.h"
45 #define BTRFS_NUM_MIRRORS 2
47 struct recover_control {
48 int verbose;
49 int yes;
51 u16 csum_size;
52 u32 sectorsize;
53 u32 leafsize;
54 u64 generation;
55 u64 chunk_root_generation;
57 struct btrfs_fs_devices *fs_devices;
59 struct cache_tree chunk;
60 struct block_group_tree bg;
61 struct device_extent_tree devext;
62 struct cache_tree eb_cache;
64 struct list_head good_chunks;
65 struct list_head bad_chunks;
66 struct list_head unrepaired_chunks;
67 pthread_mutex_t rc_lock;
70 struct extent_record {
71 struct cache_extent cache;
72 u64 generation;
73 u8 csum[BTRFS_CSUM_SIZE];
74 struct btrfs_device *devices[BTRFS_NUM_MIRRORS];
75 u64 offsets[BTRFS_NUM_MIRRORS];
76 int nmirrors;
79 struct device_scan {
80 struct recover_control *rc;
81 struct btrfs_device *dev;
82 int fd;
85 static struct extent_record *btrfs_new_extent_record(struct extent_buffer *eb)
87 struct extent_record *rec;
89 rec = malloc(sizeof(*rec));
90 if (!rec) {
91 fprintf(stderr, "Fail to allocate memory for extent record.\n");
92 exit(1);
95 memset(rec, 0, sizeof(*rec));
96 rec->cache.start = btrfs_header_bytenr(eb);
97 rec->cache.size = eb->len;
98 rec->generation = btrfs_header_generation(eb);
99 read_extent_buffer(eb, rec->csum, (unsigned long)btrfs_header_csum(eb),
100 BTRFS_CSUM_SIZE);
101 return rec;
104 static int process_extent_buffer(struct cache_tree *eb_cache,
105 struct extent_buffer *eb,
106 struct btrfs_device *device, u64 offset)
108 struct extent_record *rec;
109 struct extent_record *exist;
110 struct cache_extent *cache;
111 int ret = 0;
113 rec = btrfs_new_extent_record(eb);
114 if (!rec->cache.size)
115 goto free_out;
116 again:
117 cache = lookup_cache_extent(eb_cache,
118 rec->cache.start,
119 rec->cache.size);
120 if (cache) {
121 exist = container_of(cache, struct extent_record, cache);
123 if (exist->generation > rec->generation)
124 goto free_out;
125 if (exist->generation == rec->generation) {
126 if (exist->cache.start != rec->cache.start ||
127 exist->cache.size != rec->cache.size ||
128 memcmp(exist->csum, rec->csum, BTRFS_CSUM_SIZE)) {
129 ret = -EEXIST;
130 } else {
131 BUG_ON(exist->nmirrors >= BTRFS_NUM_MIRRORS);
132 exist->devices[exist->nmirrors] = device;
133 exist->offsets[exist->nmirrors] = offset;
134 exist->nmirrors++;
136 goto free_out;
138 remove_cache_extent(eb_cache, cache);
139 free(exist);
140 goto again;
143 rec->devices[0] = device;
144 rec->offsets[0] = offset;
145 rec->nmirrors++;
146 ret = insert_cache_extent(eb_cache, &rec->cache);
147 BUG_ON(ret);
148 out:
149 return ret;
150 free_out:
151 free(rec);
152 goto out;
155 static void free_extent_record(struct cache_extent *cache)
157 struct extent_record *er;
159 er = container_of(cache, struct extent_record, cache);
160 free(er);
163 FREE_EXTENT_CACHE_BASED_TREE(extent_record, free_extent_record);
165 static struct btrfs_chunk *create_chunk_item(struct chunk_record *record)
167 struct btrfs_chunk *ret;
168 struct btrfs_stripe *chunk_stripe;
169 int i;
171 if (!record || record->num_stripes == 0)
172 return NULL;
173 ret = malloc(btrfs_chunk_item_size(record->num_stripes));
174 if (!ret)
175 return NULL;
176 btrfs_set_stack_chunk_length(ret, record->length);
177 btrfs_set_stack_chunk_owner(ret, record->owner);
178 btrfs_set_stack_chunk_stripe_len(ret, record->stripe_len);
179 btrfs_set_stack_chunk_type(ret, record->type_flags);
180 btrfs_set_stack_chunk_io_align(ret, record->io_align);
181 btrfs_set_stack_chunk_io_width(ret, record->io_width);
182 btrfs_set_stack_chunk_sector_size(ret, record->sector_size);
183 btrfs_set_stack_chunk_num_stripes(ret, record->num_stripes);
184 btrfs_set_stack_chunk_sub_stripes(ret, record->sub_stripes);
185 for (i = 0, chunk_stripe = &ret->stripe; i < record->num_stripes;
186 i++, chunk_stripe++) {
187 btrfs_set_stack_stripe_devid(chunk_stripe,
188 record->stripes[i].devid);
189 btrfs_set_stack_stripe_offset(chunk_stripe,
190 record->stripes[i].offset);
191 memcpy(chunk_stripe->dev_uuid, record->stripes[i].dev_uuid,
192 BTRFS_UUID_SIZE);
194 return ret;
197 static void init_recover_control(struct recover_control *rc, int verbose,
198 int yes)
200 memset(rc, 0, sizeof(struct recover_control));
201 cache_tree_init(&rc->chunk);
202 cache_tree_init(&rc->eb_cache);
203 block_group_tree_init(&rc->bg);
204 device_extent_tree_init(&rc->devext);
206 INIT_LIST_HEAD(&rc->good_chunks);
207 INIT_LIST_HEAD(&rc->bad_chunks);
208 INIT_LIST_HEAD(&rc->unrepaired_chunks);
210 rc->verbose = verbose;
211 rc->yes = yes;
212 pthread_mutex_init(&rc->rc_lock, NULL);
215 static void free_recover_control(struct recover_control *rc)
217 free_block_group_tree(&rc->bg);
218 free_chunk_cache_tree(&rc->chunk);
219 free_device_extent_tree(&rc->devext);
220 free_extent_record_tree(&rc->eb_cache);
221 pthread_mutex_destroy(&rc->rc_lock);
224 static int process_block_group_item(struct block_group_tree *bg_cache,
225 struct extent_buffer *leaf,
226 struct btrfs_key *key, int slot)
228 struct block_group_record *rec;
229 struct block_group_record *exist;
230 struct cache_extent *cache;
231 int ret = 0;
233 rec = btrfs_new_block_group_record(leaf, key, slot);
234 if (!rec->cache.size)
235 goto free_out;
236 again:
237 cache = lookup_cache_extent(&bg_cache->tree,
238 rec->cache.start,
239 rec->cache.size);
240 if (cache) {
241 exist = container_of(cache, struct block_group_record, cache);
243 /*check the generation and replace if needed*/
244 if (exist->generation > rec->generation)
245 goto free_out;
246 if (exist->generation == rec->generation) {
247 int offset = offsetof(struct block_group_record,
248 generation);
250 * According to the current kernel code, the following
251 * case is impossble, or there is something wrong in
252 * the kernel code.
254 if (memcmp(((void *)exist) + offset,
255 ((void *)rec) + offset,
256 sizeof(*rec) - offset))
257 ret = -EEXIST;
258 goto free_out;
260 remove_cache_extent(&bg_cache->tree, cache);
261 list_del_init(&exist->list);
262 free(exist);
264 * We must do seach again to avoid the following cache.
265 * /--old bg 1--//--old bg 2--/
266 * /--new bg--/
268 goto again;
271 ret = insert_block_group_record(bg_cache, rec);
272 BUG_ON(ret);
273 out:
274 return ret;
275 free_out:
276 free(rec);
277 goto out;
280 static int process_chunk_item(struct cache_tree *chunk_cache,
281 struct extent_buffer *leaf, struct btrfs_key *key,
282 int slot)
284 struct chunk_record *rec;
285 struct chunk_record *exist;
286 struct cache_extent *cache;
287 int ret = 0;
289 rec = btrfs_new_chunk_record(leaf, key, slot);
290 if (!rec->cache.size)
291 goto free_out;
292 again:
293 cache = lookup_cache_extent(chunk_cache, rec->offset, rec->length);
294 if (cache) {
295 exist = container_of(cache, struct chunk_record, cache);
297 if (exist->generation > rec->generation)
298 goto free_out;
299 if (exist->generation == rec->generation) {
300 int num_stripes = rec->num_stripes;
301 int rec_size = btrfs_chunk_record_size(num_stripes);
302 int offset = offsetof(struct chunk_record, generation);
304 if (exist->num_stripes != rec->num_stripes ||
305 memcmp(((void *)exist) + offset,
306 ((void *)rec) + offset,
307 rec_size - offset))
308 ret = -EEXIST;
309 goto free_out;
311 remove_cache_extent(chunk_cache, cache);
312 free(exist);
313 goto again;
315 ret = insert_cache_extent(chunk_cache, &rec->cache);
316 BUG_ON(ret);
317 out:
318 return ret;
319 free_out:
320 free(rec);
321 goto out;
324 static int process_device_extent_item(struct device_extent_tree *devext_cache,
325 struct extent_buffer *leaf,
326 struct btrfs_key *key, int slot)
328 struct device_extent_record *rec;
329 struct device_extent_record *exist;
330 struct cache_extent *cache;
331 int ret = 0;
333 rec = btrfs_new_device_extent_record(leaf, key, slot);
334 if (!rec->cache.size)
335 goto free_out;
336 again:
337 cache = lookup_cache_extent2(&devext_cache->tree,
338 rec->cache.objectid,
339 rec->cache.start,
340 rec->cache.size);
341 if (cache) {
342 exist = container_of(cache, struct device_extent_record, cache);
343 if (exist->generation > rec->generation)
344 goto free_out;
345 if (exist->generation == rec->generation) {
346 int offset = offsetof(struct device_extent_record,
347 generation);
348 if (memcmp(((void *)exist) + offset,
349 ((void *)rec) + offset,
350 sizeof(*rec) - offset))
351 ret = -EEXIST;
352 goto free_out;
354 remove_cache_extent(&devext_cache->tree, cache);
355 list_del_init(&exist->chunk_list);
356 list_del_init(&exist->device_list);
357 free(exist);
358 goto again;
361 ret = insert_device_extent_record(devext_cache, rec);
362 BUG_ON(ret);
363 out:
364 return ret;
365 free_out:
366 free(rec);
367 goto out;
370 static void print_block_group_info(struct block_group_record *rec, char *prefix)
372 if (prefix)
373 printf("%s", prefix);
374 printf("Block Group: start = %llu, len = %llu, flag = %llx\n",
375 rec->objectid, rec->offset, rec->flags);
378 static void print_block_group_tree(struct block_group_tree *tree)
380 struct cache_extent *cache;
381 struct block_group_record *rec;
383 printf("All Block Groups:\n");
384 for (cache = first_cache_extent(&tree->tree); cache;
385 cache = next_cache_extent(cache)) {
386 rec = container_of(cache, struct block_group_record, cache);
387 print_block_group_info(rec, "\t");
389 printf("\n");
392 static void print_stripe_info(struct stripe *data, char *prefix1, char *prefix2,
393 int index)
395 if (prefix1)
396 printf("%s", prefix1);
397 if (prefix2)
398 printf("%s", prefix2);
399 printf("[%2d] Stripe: devid = %llu, offset = %llu\n",
400 index, data->devid, data->offset);
403 static void print_chunk_self_info(struct chunk_record *rec, char *prefix)
405 int i;
407 if (prefix)
408 printf("%s", prefix);
409 printf("Chunk: start = %llu, len = %llu, type = %llx, num_stripes = %u\n",
410 rec->offset, rec->length, rec->type_flags, rec->num_stripes);
411 if (prefix)
412 printf("%s", prefix);
413 printf(" Stripes list:\n");
414 for (i = 0; i < rec->num_stripes; i++)
415 print_stripe_info(&rec->stripes[i], prefix, " ", i);
418 static void print_chunk_tree(struct cache_tree *tree)
420 struct cache_extent *n;
421 struct chunk_record *entry;
423 printf("All Chunks:\n");
424 for (n = first_cache_extent(tree); n;
425 n = next_cache_extent(n)) {
426 entry = container_of(n, struct chunk_record, cache);
427 print_chunk_self_info(entry, "\t");
429 printf("\n");
432 static void print_device_extent_info(struct device_extent_record *rec,
433 char *prefix)
435 if (prefix)
436 printf("%s", prefix);
437 printf("Device extent: devid = %llu, start = %llu, len = %llu, chunk offset = %llu\n",
438 rec->objectid, rec->offset, rec->length, rec->chunk_offset);
441 static void print_device_extent_tree(struct device_extent_tree *tree)
443 struct cache_extent *n;
444 struct device_extent_record *entry;
446 printf("All Device Extents:\n");
447 for (n = first_cache_extent(&tree->tree); n;
448 n = next_cache_extent(n)) {
449 entry = container_of(n, struct device_extent_record, cache);
450 print_device_extent_info(entry, "\t");
452 printf("\n");
455 static void print_device_info(struct btrfs_device *device, char *prefix)
457 if (prefix)
458 printf("%s", prefix);
459 printf("Device: id = %llu, name = %s\n",
460 device->devid, device->name);
463 static void print_all_devices(struct list_head *devices)
465 struct btrfs_device *dev;
467 printf("All Devices:\n");
468 list_for_each_entry(dev, devices, dev_list)
469 print_device_info(dev, "\t");
470 printf("\n");
473 static void print_scan_result(struct recover_control *rc)
475 if (!rc->verbose)
476 return;
478 printf("DEVICE SCAN RESULT:\n");
479 printf("Filesystem Information:\n");
480 printf("\tsectorsize: %d\n", rc->sectorsize);
481 printf("\tleafsize: %d\n", rc->leafsize);
482 printf("\ttree root generation: %llu\n", rc->generation);
483 printf("\tchunk root generation: %llu\n", rc->chunk_root_generation);
484 printf("\n");
486 print_all_devices(&rc->fs_devices->devices);
487 print_block_group_tree(&rc->bg);
488 print_chunk_tree(&rc->chunk);
489 print_device_extent_tree(&rc->devext);
492 static void print_chunk_info(struct chunk_record *chunk, char *prefix)
494 struct device_extent_record *devext;
495 int i;
497 print_chunk_self_info(chunk, prefix);
498 if (prefix)
499 printf("%s", prefix);
500 if (chunk->bg_rec)
501 print_block_group_info(chunk->bg_rec, " ");
502 else
503 printf(" No block group.\n");
504 if (prefix)
505 printf("%s", prefix);
506 if (list_empty(&chunk->dextents)) {
507 printf(" No device extent.\n");
508 } else {
509 printf(" Device extent list:\n");
510 i = 0;
511 list_for_each_entry(devext, &chunk->dextents, chunk_list) {
512 if (prefix)
513 printf("%s", prefix);
514 printf("%s[%2d]", " ", i);
515 print_device_extent_info(devext, NULL);
516 i++;
521 static void print_check_result(struct recover_control *rc)
523 struct chunk_record *chunk;
524 struct block_group_record *bg;
525 struct device_extent_record *devext;
526 int total = 0;
527 int good = 0;
528 int bad = 0;
530 if (!rc->verbose)
531 return;
533 printf("CHECK RESULT:\n");
534 printf("Healthy Chunks:\n");
535 list_for_each_entry(chunk, &rc->good_chunks, list) {
536 print_chunk_info(chunk, " ");
537 good++;
538 total++;
540 printf("Bad Chunks:\n");
541 list_for_each_entry(chunk, &rc->bad_chunks, list) {
542 print_chunk_info(chunk, " ");
543 bad++;
544 total++;
546 printf("\n");
547 printf("Total Chunks:\t%d\n", total);
548 printf(" Heathy:\t%d\n", good);
549 printf(" Bad:\t%d\n", bad);
551 printf("\n");
552 printf("Orphan Block Groups:\n");
553 list_for_each_entry(bg, &rc->bg.block_groups, list)
554 print_block_group_info(bg, " ");
556 printf("\n");
557 printf("Orphan Device Extents:\n");
558 list_for_each_entry(devext, &rc->devext.no_chunk_orphans, chunk_list)
559 print_device_extent_info(devext, " ");
562 static int check_chunk_by_metadata(struct recover_control *rc,
563 struct btrfs_root *root,
564 struct chunk_record *chunk, int bg_only)
566 int ret;
567 int i;
568 int slot;
569 struct btrfs_path path;
570 struct btrfs_key key;
571 struct btrfs_root *dev_root;
572 struct stripe *stripe;
573 struct btrfs_dev_extent *dev_extent;
574 struct btrfs_block_group_item *bg_ptr;
575 struct extent_buffer *l;
577 btrfs_init_path(&path);
579 if (bg_only)
580 goto bg_check;
582 dev_root = root->fs_info->dev_root;
583 for (i = 0; i < chunk->num_stripes; i++) {
584 stripe = &chunk->stripes[i];
586 key.objectid = stripe->devid;
587 key.offset = stripe->offset;
588 key.type = BTRFS_DEV_EXTENT_KEY;
590 ret = btrfs_search_slot(NULL, dev_root, &key, &path, 0, 0);
591 if (ret < 0) {
592 fprintf(stderr, "Search device extent failed(%d)\n",
593 ret);
594 btrfs_release_path(&path);
595 return ret;
596 } else if (ret > 0) {
597 if (rc->verbose)
598 fprintf(stderr,
599 "No device extent[%llu, %llu]\n",
600 stripe->devid, stripe->offset);
601 btrfs_release_path(&path);
602 return -ENOENT;
604 l = path.nodes[0];
605 slot = path.slots[0];
606 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
607 if (chunk->offset !=
608 btrfs_dev_extent_chunk_offset(l, dev_extent)) {
609 if (rc->verbose)
610 fprintf(stderr,
611 "Device tree unmatch with chunks dev_extent[%llu, %llu], chunk[%llu, %llu]\n",
612 btrfs_dev_extent_chunk_offset(l,
613 dev_extent),
614 btrfs_dev_extent_length(l, dev_extent),
615 chunk->offset, chunk->length);
616 btrfs_release_path(&path);
617 return -ENOENT;
619 btrfs_release_path(&path);
622 bg_check:
623 key.objectid = chunk->offset;
624 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
625 key.offset = chunk->length;
627 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, &path,
628 0, 0);
629 if (ret < 0) {
630 fprintf(stderr, "Search block group failed(%d)\n", ret);
631 btrfs_release_path(&path);
632 return ret;
633 } else if (ret > 0) {
634 if (rc->verbose)
635 fprintf(stderr, "No block group[%llu, %llu]\n",
636 key.objectid, key.offset);
637 btrfs_release_path(&path);
638 return -ENOENT;
641 l = path.nodes[0];
642 slot = path.slots[0];
643 bg_ptr = btrfs_item_ptr(l, slot, struct btrfs_block_group_item);
644 if (chunk->type_flags != btrfs_disk_block_group_flags(l, bg_ptr)) {
645 if (rc->verbose)
646 fprintf(stderr,
647 "Chunk[%llu, %llu]'s type(%llu) is differemt with Block Group's type(%llu)\n",
648 chunk->offset, chunk->length, chunk->type_flags,
649 btrfs_disk_block_group_flags(l, bg_ptr));
650 btrfs_release_path(&path);
651 return -ENOENT;
653 btrfs_release_path(&path);
654 return 0;
657 static int check_all_chunks_by_metadata(struct recover_control *rc,
658 struct btrfs_root *root)
660 struct chunk_record *chunk;
661 struct chunk_record *next;
662 LIST_HEAD(orphan_chunks);
663 int ret = 0;
664 int err;
666 list_for_each_entry_safe(chunk, next, &rc->good_chunks, list) {
667 err = check_chunk_by_metadata(rc, root, chunk, 0);
668 if (err) {
669 if (err == -ENOENT)
670 list_move_tail(&chunk->list, &orphan_chunks);
671 else if (err && !ret)
672 ret = err;
676 list_for_each_entry_safe(chunk, next, &rc->unrepaired_chunks, list) {
677 err = check_chunk_by_metadata(rc, root, chunk, 1);
678 if (err == -ENOENT)
679 list_move_tail(&chunk->list, &orphan_chunks);
680 else if (err && !ret)
681 ret = err;
684 list_for_each_entry(chunk, &rc->bad_chunks, list) {
685 err = check_chunk_by_metadata(rc, root, chunk, 1);
686 if (err != -ENOENT && !ret)
687 ret = err ? err : -EINVAL;
689 list_splice(&orphan_chunks, &rc->bad_chunks);
690 return ret;
693 static int extract_metadata_record(struct recover_control *rc,
694 struct extent_buffer *leaf)
696 struct btrfs_key key;
697 int ret = 0;
698 int i;
699 u32 nritems;
701 nritems = btrfs_header_nritems(leaf);
702 for (i = 0; i < nritems; i++) {
703 btrfs_item_key_to_cpu(leaf, &key, i);
704 switch (key.type) {
705 case BTRFS_BLOCK_GROUP_ITEM_KEY:
706 pthread_mutex_lock(&rc->rc_lock);
707 ret = process_block_group_item(&rc->bg, leaf, &key, i);
708 pthread_mutex_unlock(&rc->rc_lock);
709 break;
710 case BTRFS_CHUNK_ITEM_KEY:
711 pthread_mutex_lock(&rc->rc_lock);
712 ret = process_chunk_item(&rc->chunk, leaf, &key, i);
713 pthread_mutex_unlock(&rc->rc_lock);
714 break;
715 case BTRFS_DEV_EXTENT_KEY:
716 pthread_mutex_lock(&rc->rc_lock);
717 ret = process_device_extent_item(&rc->devext, leaf,
718 &key, i);
719 pthread_mutex_unlock(&rc->rc_lock);
720 break;
722 if (ret)
723 break;
725 return ret;
728 static inline int is_super_block_address(u64 offset)
730 int i;
732 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
733 if (offset == btrfs_sb_offset(i))
734 return 1;
736 return 0;
739 static int scan_one_device(void *dev_scan_struct)
741 struct extent_buffer *buf;
742 u64 bytenr;
743 int ret = 0;
744 struct device_scan *dev_scan = (struct device_scan *)dev_scan_struct;
745 struct recover_control *rc = dev_scan->rc;
746 struct btrfs_device *device = dev_scan->dev;
747 int fd = dev_scan->fd;
749 ret = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
750 if (ret)
751 return 1;
753 buf = malloc(sizeof(*buf) + rc->leafsize);
754 if (!buf)
755 return -ENOMEM;
756 buf->len = rc->leafsize;
758 bytenr = 0;
759 while (1) {
760 if (is_super_block_address(bytenr))
761 bytenr += rc->sectorsize;
763 if (pread64(fd, buf->data, rc->leafsize, bytenr) <
764 rc->leafsize)
765 break;
767 if (memcmp_extent_buffer(buf, rc->fs_devices->fsid,
768 btrfs_header_fsid(),
769 BTRFS_FSID_SIZE)) {
770 bytenr += rc->sectorsize;
771 continue;
774 if (verify_tree_block_csum_silent(buf, rc->csum_size)) {
775 bytenr += rc->sectorsize;
776 continue;
779 pthread_mutex_lock(&rc->rc_lock);
780 ret = process_extent_buffer(&rc->eb_cache, buf, device, bytenr);
781 pthread_mutex_unlock(&rc->rc_lock);
782 if (ret)
783 goto out;
785 if (btrfs_header_level(buf) != 0)
786 goto next_node;
788 switch (btrfs_header_owner(buf)) {
789 case BTRFS_EXTENT_TREE_OBJECTID:
790 case BTRFS_DEV_TREE_OBJECTID:
791 /* different tree use different generation */
792 if (btrfs_header_generation(buf) > rc->generation)
793 break;
794 ret = extract_metadata_record(rc, buf);
795 if (ret)
796 goto out;
797 break;
798 case BTRFS_CHUNK_TREE_OBJECTID:
799 if (btrfs_header_generation(buf) >
800 rc->chunk_root_generation)
801 break;
802 ret = extract_metadata_record(rc, buf);
803 if (ret)
804 goto out;
805 break;
807 next_node:
808 bytenr += rc->leafsize;
810 out:
811 close(fd);
812 free(buf);
813 return ret;
816 static int scan_devices(struct recover_control *rc)
818 int ret = 0;
819 int fd;
820 struct btrfs_device *dev;
821 struct device_scan *dev_scans;
822 pthread_t *t_scans;
823 int *t_rets;
824 int devnr = 0;
825 int devidx = 0;
826 int cancel_from = 0;
827 int cancel_to = 0;
828 int i;
830 list_for_each_entry(dev, &rc->fs_devices->devices, dev_list)
831 devnr++;
832 dev_scans = (struct device_scan *)malloc(sizeof(struct device_scan)
833 * devnr);
834 if (!dev_scans)
835 return -ENOMEM;
836 t_scans = (pthread_t *)malloc(sizeof(pthread_t) * devnr);
837 if (!t_scans)
838 return -ENOMEM;
839 t_rets = (int *)malloc(sizeof(int) * devnr);
840 if (!t_rets)
841 return -ENOMEM;
843 list_for_each_entry(dev, &rc->fs_devices->devices, dev_list) {
844 fd = open(dev->name, O_RDONLY);
845 if (fd < 0) {
846 fprintf(stderr, "Failed to open device %s\n",
847 dev->name);
848 return -1;
850 dev_scans[devidx].rc = rc;
851 dev_scans[devidx].dev = dev;
852 dev_scans[devidx].fd = fd;
853 ret = pthread_create(&t_scans[devidx], NULL,
854 (void *)scan_one_device,
855 (void *)&dev_scans[devidx]);
856 if (ret) {
857 cancel_from = 0;
858 cancel_to = devidx - 1;
859 goto out;
861 devidx++;
864 i = 0;
865 while (i < devidx) {
866 ret = pthread_join(t_scans[i], (void **)&t_rets[i]);
867 if (ret || t_rets[i]) {
868 ret = 1;
869 cancel_from = i + 1;
870 cancel_to = devnr - 1;
871 break;
873 i++;
875 out:
876 while (cancel_from <= cancel_to) {
877 pthread_cancel(t_scans[cancel_from]);
878 cancel_from++;
880 free(dev_scans);
881 free(t_scans);
882 free(t_rets);
883 return !!ret;
886 static int build_device_map_by_chunk_record(struct btrfs_root *root,
887 struct chunk_record *chunk)
889 int ret = 0;
890 int i;
891 u64 devid;
892 u8 uuid[BTRFS_UUID_SIZE];
893 u16 num_stripes;
894 struct btrfs_mapping_tree *map_tree;
895 struct map_lookup *map;
896 struct stripe *stripe;
898 map_tree = &root->fs_info->mapping_tree;
899 num_stripes = chunk->num_stripes;
900 map = malloc(btrfs_map_lookup_size(num_stripes));
901 if (!map)
902 return -ENOMEM;
903 map->ce.start = chunk->offset;
904 map->ce.size = chunk->length;
905 map->num_stripes = num_stripes;
906 map->io_width = chunk->io_width;
907 map->io_align = chunk->io_align;
908 map->sector_size = chunk->sector_size;
909 map->stripe_len = chunk->stripe_len;
910 map->type = chunk->type_flags;
911 map->sub_stripes = chunk->sub_stripes;
913 for (i = 0, stripe = chunk->stripes; i < num_stripes; i++, stripe++) {
914 devid = stripe->devid;
915 memcpy(uuid, stripe->dev_uuid, BTRFS_UUID_SIZE);
916 map->stripes[i].physical = stripe->offset;
917 map->stripes[i].dev = btrfs_find_device(root, devid,
918 uuid, NULL);
919 if (!map->stripes[i].dev) {
920 kfree(map);
921 return -EIO;
925 ret = insert_cache_extent(&map_tree->cache_tree, &map->ce);
926 return ret;
929 static int build_device_maps_by_chunk_records(struct recover_control *rc,
930 struct btrfs_root *root)
932 int ret = 0;
933 struct chunk_record *chunk;
935 list_for_each_entry(chunk, &rc->good_chunks, list) {
936 ret = build_device_map_by_chunk_record(root, chunk);
937 if (ret)
938 return ret;
940 return ret;
943 static int block_group_remove_all_extent_items(struct btrfs_trans_handle *trans,
944 struct btrfs_root *root,
945 struct block_group_record *bg)
947 struct btrfs_fs_info *fs_info = root->fs_info;
948 struct btrfs_key key;
949 struct btrfs_path path;
950 struct extent_buffer *leaf;
951 u64 start = bg->objectid;
952 u64 end = bg->objectid + bg->offset;
953 u64 old_val;
954 int nitems;
955 int ret;
956 int i;
957 int del_s, del_nr;
959 btrfs_init_path(&path);
960 root = root->fs_info->extent_root;
962 key.objectid = start;
963 key.offset = 0;
964 key.type = BTRFS_EXTENT_ITEM_KEY;
965 again:
966 ret = btrfs_search_slot(trans, root, &key, &path, -1, 1);
967 if (ret < 0)
968 goto err;
969 else if (ret > 0)
970 ret = 0;
972 leaf = path.nodes[0];
973 nitems = btrfs_header_nritems(leaf);
974 if (!nitems) {
975 /* The tree is empty. */
976 ret = 0;
977 goto err;
980 if (path.slots[0] >= nitems) {
981 ret = btrfs_next_leaf(root, &path);
982 if (ret < 0)
983 goto err;
984 if (ret > 0) {
985 ret = 0;
986 goto err;
988 leaf = path.nodes[0];
989 btrfs_item_key_to_cpu(leaf, &key, 0);
990 if (key.objectid >= end)
991 goto err;
992 btrfs_release_path(&path);
993 goto again;
996 del_nr = 0;
997 del_s = -1;
998 for (i = path.slots[0]; i < nitems; i++) {
999 btrfs_item_key_to_cpu(leaf, &key, i);
1000 if (key.objectid >= end)
1001 break;
1003 if (key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
1004 if (del_nr == 0)
1005 continue;
1006 else
1007 break;
1010 if (del_s == -1)
1011 del_s = i;
1012 del_nr++;
1013 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
1014 key.type == BTRFS_METADATA_ITEM_KEY) {
1015 old_val = btrfs_super_bytes_used(fs_info->super_copy);
1016 if (key.type == BTRFS_METADATA_ITEM_KEY)
1017 old_val += root->leafsize;
1018 else
1019 old_val += key.offset;
1020 btrfs_set_super_bytes_used(fs_info->super_copy,
1021 old_val);
1025 if (del_nr) {
1026 ret = btrfs_del_items(trans, root, &path, del_s, del_nr);
1027 if (ret)
1028 goto err;
1031 if (key.objectid < end) {
1032 if (key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
1033 key.objectid += root->sectorsize;
1034 key.type = BTRFS_EXTENT_ITEM_KEY;
1035 key.offset = 0;
1037 btrfs_release_path(&path);
1038 goto again;
1040 err:
1041 btrfs_release_path(&path);
1042 return ret;
1045 static int block_group_free_all_extent(struct btrfs_trans_handle *trans,
1046 struct btrfs_root *root,
1047 struct block_group_record *bg)
1049 struct btrfs_block_group_cache *cache;
1050 struct btrfs_fs_info *info;
1051 u64 start;
1052 u64 end;
1054 info = root->fs_info;
1055 cache = btrfs_lookup_block_group(info, bg->objectid);
1056 if (!cache)
1057 return -ENOENT;
1059 start = cache->key.objectid;
1060 end = start + cache->key.offset - 1;
1062 set_extent_bits(&info->block_group_cache, start, end,
1063 BLOCK_GROUP_DIRTY, GFP_NOFS);
1064 set_extent_dirty(&info->free_space_cache, start, end, GFP_NOFS);
1066 btrfs_set_block_group_used(&cache->item, 0);
1068 return 0;
1071 static int remove_chunk_extent_item(struct btrfs_trans_handle *trans,
1072 struct recover_control *rc,
1073 struct btrfs_root *root)
1075 struct chunk_record *chunk;
1076 int ret = 0;
1078 list_for_each_entry(chunk, &rc->good_chunks, list) {
1079 if (!(chunk->type_flags & BTRFS_BLOCK_GROUP_SYSTEM))
1080 continue;
1081 ret = block_group_remove_all_extent_items(trans, root,
1082 chunk->bg_rec);
1083 if (ret)
1084 return ret;
1086 ret = block_group_free_all_extent(trans, root, chunk->bg_rec);
1087 if (ret)
1088 return ret;
1090 return ret;
1093 static int __rebuild_chunk_root(struct btrfs_trans_handle *trans,
1094 struct recover_control *rc,
1095 struct btrfs_root *root)
1097 u64 min_devid = -1;
1098 struct btrfs_device *dev;
1099 struct extent_buffer *cow;
1100 struct btrfs_disk_key disk_key;
1101 int ret = 0;
1103 list_for_each_entry(dev, &rc->fs_devices->devices, dev_list) {
1104 if (min_devid > dev->devid)
1105 min_devid = dev->devid;
1107 disk_key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1108 disk_key.type = BTRFS_DEV_ITEM_KEY;
1109 disk_key.offset = min_devid;
1111 cow = btrfs_alloc_free_block(trans, root, root->nodesize,
1112 BTRFS_CHUNK_TREE_OBJECTID,
1113 &disk_key, 0, 0, 0);
1114 btrfs_set_header_bytenr(cow, cow->start);
1115 btrfs_set_header_generation(cow, trans->transid);
1116 btrfs_set_header_nritems(cow, 0);
1117 btrfs_set_header_level(cow, 0);
1118 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1119 btrfs_set_header_owner(cow, BTRFS_CHUNK_TREE_OBJECTID);
1120 write_extent_buffer(cow, root->fs_info->fsid,
1121 btrfs_header_fsid(), BTRFS_FSID_SIZE);
1123 write_extent_buffer(cow, root->fs_info->chunk_tree_uuid,
1124 btrfs_header_chunk_tree_uuid(cow),
1125 BTRFS_UUID_SIZE);
1127 root->node = cow;
1128 btrfs_mark_buffer_dirty(cow);
1130 return ret;
1133 static int __rebuild_device_items(struct btrfs_trans_handle *trans,
1134 struct recover_control *rc,
1135 struct btrfs_root *root)
1137 struct btrfs_device *dev;
1138 struct btrfs_key key;
1139 struct btrfs_dev_item *dev_item;
1140 int ret = 0;
1142 dev_item = malloc(sizeof(struct btrfs_dev_item));
1143 if (!dev_item)
1144 return -ENOMEM;
1146 list_for_each_entry(dev, &rc->fs_devices->devices, dev_list) {
1147 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1148 key.type = BTRFS_DEV_ITEM_KEY;
1149 key.offset = dev->devid;
1151 btrfs_set_stack_device_generation(dev_item, 0);
1152 btrfs_set_stack_device_type(dev_item, dev->type);
1153 btrfs_set_stack_device_id(dev_item, dev->devid);
1154 btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
1155 btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
1156 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
1157 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
1158 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
1159 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
1160 memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
1162 ret = btrfs_insert_item(trans, root, &key,
1163 dev_item, sizeof(*dev_item));
1166 free(dev_item);
1167 return ret;
1170 static int __rebuild_chunk_items(struct btrfs_trans_handle *trans,
1171 struct recover_control *rc,
1172 struct btrfs_root *root)
1174 struct btrfs_key key;
1175 struct btrfs_chunk *chunk = NULL;
1176 struct btrfs_root *chunk_root;
1177 struct chunk_record *chunk_rec;
1178 int ret;
1180 chunk_root = root->fs_info->chunk_root;
1182 list_for_each_entry(chunk_rec, &rc->good_chunks, list) {
1183 chunk = create_chunk_item(chunk_rec);
1184 if (!chunk)
1185 return -ENOMEM;
1187 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1188 key.type = BTRFS_CHUNK_ITEM_KEY;
1189 key.offset = chunk_rec->offset;
1191 ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
1192 btrfs_chunk_item_size(chunk->num_stripes));
1193 free(chunk);
1194 if (ret)
1195 return ret;
1197 return 0;
1200 static int rebuild_chunk_tree(struct btrfs_trans_handle *trans,
1201 struct recover_control *rc,
1202 struct btrfs_root *root)
1204 int ret = 0;
1206 root = root->fs_info->chunk_root;
1208 ret = __rebuild_chunk_root(trans, rc, root);
1209 if (ret)
1210 return ret;
1212 ret = __rebuild_device_items(trans, rc, root);
1213 if (ret)
1214 return ret;
1216 ret = __rebuild_chunk_items(trans, rc, root);
1218 return ret;
1221 static int rebuild_sys_array(struct recover_control *rc,
1222 struct btrfs_root *root)
1224 struct btrfs_chunk *chunk;
1225 struct btrfs_key key;
1226 struct chunk_record *chunk_rec;
1227 int ret = 0;
1228 u16 num_stripes;
1230 btrfs_set_super_sys_array_size(root->fs_info->super_copy, 0);
1232 list_for_each_entry(chunk_rec, &rc->good_chunks, list) {
1233 if (!(chunk_rec->type_flags & BTRFS_BLOCK_GROUP_SYSTEM))
1234 continue;
1236 num_stripes = chunk_rec->num_stripes;
1237 chunk = create_chunk_item(chunk_rec);
1238 if (!chunk) {
1239 ret = -ENOMEM;
1240 break;
1243 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1244 key.type = BTRFS_CHUNK_ITEM_KEY;
1245 key.offset = chunk_rec->offset;
1247 ret = btrfs_add_system_chunk(NULL, root, &key, chunk,
1248 btrfs_chunk_item_size(num_stripes));
1249 free(chunk);
1250 if (ret)
1251 break;
1253 return ret;
1257 static struct btrfs_root *
1258 open_ctree_with_broken_chunk(struct recover_control *rc)
1260 struct btrfs_fs_info *fs_info;
1261 struct btrfs_super_block *disk_super;
1262 struct extent_buffer *eb;
1263 u32 sectorsize;
1264 u32 nodesize;
1265 u32 leafsize;
1266 u32 stripesize;
1267 int ret;
1269 fs_info = btrfs_new_fs_info(1, BTRFS_SUPER_INFO_OFFSET);
1270 if (!fs_info) {
1271 fprintf(stderr, "Failed to allocate memory for fs_info\n");
1272 return ERR_PTR(-ENOMEM);
1274 fs_info->is_chunk_recover = 1;
1276 fs_info->fs_devices = rc->fs_devices;
1277 ret = btrfs_open_devices(fs_info->fs_devices, O_RDWR);
1278 if (ret)
1279 goto out;
1281 disk_super = fs_info->super_copy;
1282 ret = btrfs_read_dev_super(fs_info->fs_devices->latest_bdev,
1283 disk_super, fs_info->super_bytenr);
1284 if (ret) {
1285 fprintf(stderr, "No valid btrfs found\n");
1286 goto out_devices;
1289 memcpy(fs_info->fsid, &disk_super->fsid, BTRFS_FSID_SIZE);
1291 ret = btrfs_check_fs_compatibility(disk_super, 1);
1292 if (ret)
1293 goto out_devices;
1295 nodesize = btrfs_super_nodesize(disk_super);
1296 leafsize = btrfs_super_leafsize(disk_super);
1297 sectorsize = btrfs_super_sectorsize(disk_super);
1298 stripesize = btrfs_super_stripesize(disk_super);
1300 __setup_root(nodesize, leafsize, sectorsize, stripesize,
1301 fs_info->chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
1303 ret = build_device_maps_by_chunk_records(rc, fs_info->chunk_root);
1304 if (ret)
1305 goto out_cleanup;
1307 ret = btrfs_setup_all_roots(fs_info, 0, 0);
1308 if (ret)
1309 goto out_failed;
1311 eb = fs_info->tree_root->node;
1312 read_extent_buffer(eb, fs_info->chunk_tree_uuid,
1313 btrfs_header_chunk_tree_uuid(eb),
1314 BTRFS_UUID_SIZE);
1316 return fs_info->fs_root;
1317 out_failed:
1318 btrfs_release_all_roots(fs_info);
1319 out_cleanup:
1320 btrfs_cleanup_all_caches(fs_info);
1321 out_devices:
1322 btrfs_close_devices(fs_info->fs_devices);
1323 out:
1324 btrfs_free_fs_info(fs_info);
1325 return ERR_PTR(ret);
1328 static int recover_prepare(struct recover_control *rc, char *path)
1330 int ret;
1331 int fd;
1332 struct btrfs_super_block *sb;
1333 struct btrfs_fs_devices *fs_devices;
1335 ret = 0;
1336 fd = open(path, O_RDONLY);
1337 if (fd < 0) {
1338 fprintf(stderr, "open %s\n error.\n", path);
1339 return -1;
1342 sb = malloc(sizeof(struct btrfs_super_block));
1343 if (!sb) {
1344 fprintf(stderr, "allocating memory for sb failed.\n");
1345 ret = -ENOMEM;
1346 goto fail_close_fd;
1349 ret = btrfs_read_dev_super(fd, sb, BTRFS_SUPER_INFO_OFFSET);
1350 if (ret) {
1351 fprintf(stderr, "read super block error\n");
1352 goto fail_free_sb;
1355 rc->sectorsize = btrfs_super_sectorsize(sb);
1356 rc->leafsize = btrfs_super_leafsize(sb);
1357 rc->generation = btrfs_super_generation(sb);
1358 rc->chunk_root_generation = btrfs_super_chunk_root_generation(sb);
1359 rc->csum_size = btrfs_super_csum_size(sb);
1361 /* if seed, the result of scanning below will be partial */
1362 if (btrfs_super_flags(sb) & BTRFS_SUPER_FLAG_SEEDING) {
1363 fprintf(stderr, "this device is seed device\n");
1364 ret = -1;
1365 goto fail_free_sb;
1368 ret = btrfs_scan_fs_devices(fd, path, &fs_devices, 0, 1);
1369 if (ret)
1370 goto fail_free_sb;
1372 rc->fs_devices = fs_devices;
1374 if (rc->verbose)
1375 print_all_devices(&rc->fs_devices->devices);
1377 fail_free_sb:
1378 free(sb);
1379 fail_close_fd:
1380 close(fd);
1381 return ret;
1384 static int btrfs_get_device_extents(u64 chunk_object,
1385 struct list_head *orphan_devexts,
1386 struct list_head *ret_list)
1388 struct device_extent_record *devext;
1389 struct device_extent_record *next;
1390 int count = 0;
1392 list_for_each_entry_safe(devext, next, orphan_devexts, chunk_list) {
1393 if (devext->chunk_offset == chunk_object) {
1394 list_move_tail(&devext->chunk_list, ret_list);
1395 count++;
1398 return count;
1401 static int calc_num_stripes(u64 type)
1403 if (type & (BTRFS_BLOCK_GROUP_RAID0 |
1404 BTRFS_BLOCK_GROUP_RAID10 |
1405 BTRFS_BLOCK_GROUP_RAID5 |
1406 BTRFS_BLOCK_GROUP_RAID6))
1407 return 0;
1408 else if (type & (BTRFS_BLOCK_GROUP_RAID1 |
1409 BTRFS_BLOCK_GROUP_DUP))
1410 return 2;
1411 else
1412 return 1;
1415 static inline int calc_sub_nstripes(u64 type)
1417 if (type & BTRFS_BLOCK_GROUP_RAID10)
1418 return 2;
1419 else
1420 return 1;
1423 static int btrfs_verify_device_extents(struct block_group_record *bg,
1424 struct list_head *devexts, int ndevexts)
1426 struct device_extent_record *devext;
1427 u64 strpie_length;
1428 int expected_num_stripes;
1430 expected_num_stripes = calc_num_stripes(bg->flags);
1431 if (expected_num_stripes && expected_num_stripes != ndevexts)
1432 return 1;
1434 strpie_length = calc_stripe_length(bg->flags, bg->offset, ndevexts);
1435 list_for_each_entry(devext, devexts, chunk_list) {
1436 if (devext->length != strpie_length)
1437 return 1;
1439 return 0;
1442 static int btrfs_rebuild_unordered_chunk_stripes(struct recover_control *rc,
1443 struct chunk_record *chunk)
1445 struct device_extent_record *devext;
1446 struct btrfs_device *device;
1447 int i;
1449 devext = list_first_entry(&chunk->dextents, struct device_extent_record,
1450 chunk_list);
1451 for (i = 0; i < chunk->num_stripes; i++) {
1452 chunk->stripes[i].devid = devext->objectid;
1453 chunk->stripes[i].offset = devext->offset;
1454 device = btrfs_find_device_by_devid(rc->fs_devices,
1455 devext->objectid,
1457 if (!device)
1458 return -ENOENT;
1459 BUG_ON(btrfs_find_device_by_devid(rc->fs_devices,
1460 devext->objectid,
1461 1));
1462 memcpy(chunk->stripes[i].dev_uuid, device->uuid,
1463 BTRFS_UUID_SIZE);
1464 devext = list_next_entry(devext, chunk_list);
1466 return 0;
1469 static int btrfs_calc_stripe_index(struct chunk_record *chunk, u64 logical)
1471 u64 offset = logical - chunk->offset;
1472 int stripe_nr;
1473 int nr_data_stripes;
1474 int index;
1476 stripe_nr = offset / chunk->stripe_len;
1477 if (chunk->type_flags & BTRFS_BLOCK_GROUP_RAID0) {
1478 index = stripe_nr % chunk->num_stripes;
1479 } else if (chunk->type_flags & BTRFS_BLOCK_GROUP_RAID10) {
1480 index = stripe_nr % (chunk->num_stripes / chunk->sub_stripes);
1481 index *= chunk->sub_stripes;
1482 } else if (chunk->type_flags & BTRFS_BLOCK_GROUP_RAID5) {
1483 nr_data_stripes = chunk->num_stripes - 1;
1484 index = stripe_nr % nr_data_stripes;
1485 stripe_nr /= nr_data_stripes;
1486 index = (index + stripe_nr) % chunk->num_stripes;
1487 } else if (chunk->type_flags & BTRFS_BLOCK_GROUP_RAID6) {
1488 nr_data_stripes = chunk->num_stripes - 2;
1489 index = stripe_nr % nr_data_stripes;
1490 stripe_nr /= nr_data_stripes;
1491 index = (index + stripe_nr) % chunk->num_stripes;
1492 } else {
1493 BUG_ON(1);
1495 return index;
1498 /* calc the logical offset which is the start of the next stripe. */
1499 static inline u64 btrfs_next_stripe_logical_offset(struct chunk_record *chunk,
1500 u64 logical)
1502 u64 offset = logical - chunk->offset;
1504 offset /= chunk->stripe_len;
1505 offset *= chunk->stripe_len;
1506 offset += chunk->stripe_len;
1508 return offset + chunk->offset;
1511 static int is_extent_record_in_device_extent(struct extent_record *er,
1512 struct device_extent_record *dext,
1513 int *mirror)
1515 int i;
1517 for (i = 0; i < er->nmirrors; i++) {
1518 if (er->devices[i]->devid == dext->objectid &&
1519 er->offsets[i] >= dext->offset &&
1520 er->offsets[i] < dext->offset + dext->length) {
1521 *mirror = i;
1522 return 1;
1525 return 0;
1528 static int
1529 btrfs_rebuild_ordered_meta_chunk_stripes(struct recover_control *rc,
1530 struct chunk_record *chunk)
1532 u64 start = chunk->offset;
1533 u64 end = chunk->offset + chunk->length;
1534 struct cache_extent *cache;
1535 struct extent_record *er;
1536 struct device_extent_record *devext;
1537 struct device_extent_record *next;
1538 struct btrfs_device *device;
1539 LIST_HEAD(devexts);
1540 int index;
1541 int mirror;
1542 int ret;
1544 cache = lookup_cache_extent(&rc->eb_cache,
1545 start, chunk->length);
1546 if (!cache) {
1547 /* No used space, we can reorder the stripes freely. */
1548 ret = btrfs_rebuild_unordered_chunk_stripes(rc, chunk);
1549 return ret;
1552 list_splice_init(&chunk->dextents, &devexts);
1553 again:
1554 er = container_of(cache, struct extent_record, cache);
1555 index = btrfs_calc_stripe_index(chunk, er->cache.start);
1556 if (chunk->stripes[index].devid)
1557 goto next;
1558 list_for_each_entry_safe(devext, next, &devexts, chunk_list) {
1559 if (is_extent_record_in_device_extent(er, devext, &mirror)) {
1560 chunk->stripes[index].devid = devext->objectid;
1561 chunk->stripes[index].offset = devext->offset;
1562 memcpy(chunk->stripes[index].dev_uuid,
1563 er->devices[mirror]->uuid,
1564 BTRFS_UUID_SIZE);
1565 index++;
1566 list_move(&devext->chunk_list, &chunk->dextents);
1569 next:
1570 start = btrfs_next_stripe_logical_offset(chunk, er->cache.start);
1571 if (start >= end)
1572 goto no_extent_record;
1574 cache = lookup_cache_extent(&rc->eb_cache, start, end - start);
1575 if (cache)
1576 goto again;
1577 no_extent_record:
1578 if (list_empty(&devexts))
1579 return 0;
1581 if (chunk->type_flags & (BTRFS_BLOCK_GROUP_RAID5 |
1582 BTRFS_BLOCK_GROUP_RAID6)) {
1583 /* Fixme: try to recover the order by the parity block. */
1584 list_splice_tail(&devexts, &chunk->dextents);
1585 return -EINVAL;
1588 /* There is no data on the lost stripes, we can reorder them freely. */
1589 for (index = 0; index < chunk->num_stripes; index++) {
1590 if (chunk->stripes[index].devid)
1591 continue;
1593 devext = list_first_entry(&devexts,
1594 struct device_extent_record,
1595 chunk_list);
1596 list_move(&devext->chunk_list, &chunk->dextents);
1598 chunk->stripes[index].devid = devext->objectid;
1599 chunk->stripes[index].offset = devext->offset;
1600 device = btrfs_find_device_by_devid(rc->fs_devices,
1601 devext->objectid,
1603 if (!device) {
1604 list_splice_tail(&devexts, &chunk->dextents);
1605 return -EINVAL;
1607 BUG_ON(btrfs_find_device_by_devid(rc->fs_devices,
1608 devext->objectid,
1609 1));
1610 memcpy(chunk->stripes[index].dev_uuid, device->uuid,
1611 BTRFS_UUID_SIZE);
1613 return 0;
1616 #define BTRFS_ORDERED_RAID (BTRFS_BLOCK_GROUP_RAID0 | \
1617 BTRFS_BLOCK_GROUP_RAID10 | \
1618 BTRFS_BLOCK_GROUP_RAID5 | \
1619 BTRFS_BLOCK_GROUP_RAID6)
1621 static int btrfs_rebuild_chunk_stripes(struct recover_control *rc,
1622 struct chunk_record *chunk)
1624 int ret;
1627 * All the data in the system metadata chunk will be dropped,
1628 * so we need not guarantee that the data is right or not, that
1629 * is we can reorder the stripes in the system metadata chunk.
1631 if ((chunk->type_flags & BTRFS_BLOCK_GROUP_METADATA) &&
1632 (chunk->type_flags & BTRFS_ORDERED_RAID))
1633 ret =btrfs_rebuild_ordered_meta_chunk_stripes(rc, chunk);
1634 else if ((chunk->type_flags & BTRFS_BLOCK_GROUP_DATA) &&
1635 (chunk->type_flags & BTRFS_ORDERED_RAID))
1636 ret = 1; /* Be handled after the fs is opened. */
1637 else
1638 ret = btrfs_rebuild_unordered_chunk_stripes(rc, chunk);
1640 return ret;
1643 static int next_csum(struct btrfs_root *root,
1644 struct extent_buffer **leaf,
1645 struct btrfs_path *path,
1646 int *slot,
1647 u64 *csum_offset,
1648 u32 *tree_csum,
1649 u64 end,
1650 struct btrfs_key *key)
1652 int ret = 0;
1653 struct btrfs_root *csum_root = root->fs_info->csum_root;
1654 struct btrfs_csum_item *csum_item;
1655 u32 blocksize = root->sectorsize;
1656 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
1657 int csums_in_item = btrfs_item_size_nr(*leaf, *slot) / csum_size;
1659 if (*csum_offset >= csums_in_item) {
1660 ++(*slot);
1661 *csum_offset = 0;
1662 if (*slot >= btrfs_header_nritems(*leaf)) {
1663 ret = btrfs_next_leaf(csum_root, path);
1664 if (ret < 0)
1665 return -1;
1666 else if (ret > 0)
1667 return 1;
1668 *leaf = path->nodes[0];
1669 *slot = path->slots[0];
1671 btrfs_item_key_to_cpu(*leaf, key, *slot);
1674 if (key->offset + (*csum_offset) * blocksize >= end)
1675 return 2;
1676 csum_item = btrfs_item_ptr(*leaf, *slot, struct btrfs_csum_item);
1677 csum_item = (struct btrfs_csum_item *)((unsigned char *)csum_item
1678 + (*csum_offset) * csum_size);
1679 read_extent_buffer(*leaf, tree_csum,
1680 (unsigned long)csum_item, csum_size);
1681 return ret;
1684 static u64 calc_data_offset(struct btrfs_key *key,
1685 struct chunk_record *chunk,
1686 u64 dev_offset,
1687 u64 csum_offset,
1688 u32 blocksize)
1690 u64 data_offset;
1691 int logical_stripe_nr;
1692 int dev_stripe_nr;
1693 int nr_data_stripes;
1695 data_offset = key->offset + csum_offset * blocksize - chunk->offset;
1696 nr_data_stripes = chunk->num_stripes;
1698 if (chunk->type_flags & BTRFS_BLOCK_GROUP_RAID5)
1699 nr_data_stripes -= 1;
1700 else if (chunk->type_flags & BTRFS_BLOCK_GROUP_RAID6)
1701 nr_data_stripes -= 2;
1703 logical_stripe_nr = data_offset / chunk->stripe_len;
1704 dev_stripe_nr = logical_stripe_nr / nr_data_stripes;
1706 data_offset -= logical_stripe_nr * chunk->stripe_len;
1707 data_offset += dev_stripe_nr * chunk->stripe_len;
1709 return dev_offset + data_offset;
1712 static int check_one_csum(int fd, u64 start, u32 len, u32 tree_csum)
1714 char *data;
1715 int ret = 0;
1716 u32 csum_result = ~(u32)0;
1718 data = malloc(len);
1719 if (!data)
1720 return -1;
1721 ret = pread64(fd, data, len, start);
1722 if (ret < 0 || ret != len) {
1723 ret = -1;
1724 goto out;
1726 ret = 0;
1727 csum_result = btrfs_csum_data(NULL, data, csum_result, len);
1728 btrfs_csum_final(csum_result, (char *)&csum_result);
1729 if (csum_result != tree_csum)
1730 ret = 1;
1731 out:
1732 free(data);
1733 return ret;
1736 static u64 item_end_offset(struct btrfs_root *root, struct btrfs_key *key,
1737 struct extent_buffer *leaf, int slot) {
1738 u32 blocksize = root->sectorsize;
1739 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
1741 u64 offset = btrfs_item_size_nr(leaf, slot);
1742 offset /= csum_size;
1743 offset *= blocksize;
1744 offset += key->offset;
1746 return offset;
1749 static int insert_stripe(struct list_head *devexts,
1750 struct recover_control *rc,
1751 struct chunk_record *chunk,
1752 int index) {
1753 struct device_extent_record *devext;
1754 struct btrfs_device *dev;
1756 devext = list_entry(devexts->next, struct device_extent_record,
1757 chunk_list);
1758 dev = btrfs_find_device_by_devid(rc->fs_devices, devext->objectid,
1760 if (!dev)
1761 return 1;
1762 BUG_ON(btrfs_find_device_by_devid(rc->fs_devices, devext->objectid,
1763 1));
1765 chunk->stripes[index].devid = devext->objectid;
1766 chunk->stripes[index].offset = devext->offset;
1767 memcpy(chunk->stripes[index].dev_uuid, dev->uuid, BTRFS_UUID_SIZE);
1769 list_move(&devext->chunk_list, &chunk->dextents);
1771 return 0;
1774 #define EQUAL_STRIPE (1 << 0)
1776 static int rebuild_raid_data_chunk_stripes(struct recover_control *rc,
1777 struct btrfs_root *root,
1778 struct chunk_record *chunk,
1779 u8 *flags)
1781 int i;
1782 int ret = 0;
1783 int slot;
1784 struct btrfs_path path;
1785 struct btrfs_key prev_key;
1786 struct btrfs_key key;
1787 struct btrfs_root *csum_root;
1788 struct extent_buffer *leaf;
1789 struct device_extent_record *devext;
1790 struct device_extent_record *next;
1791 struct btrfs_device *dev;
1792 u64 start = chunk->offset;
1793 u64 end = start + chunk->stripe_len;
1794 u64 chunk_end = chunk->offset + chunk->length;
1795 u64 csum_offset = 0;
1796 u64 data_offset;
1797 u32 blocksize = root->sectorsize;
1798 u32 tree_csum;
1799 int index = 0;
1800 int num_unordered = 0;
1801 LIST_HEAD(unordered);
1802 LIST_HEAD(candidates);
1804 csum_root = root->fs_info->csum_root;
1805 btrfs_init_path(&path);
1806 list_splice_init(&chunk->dextents, &candidates);
1807 again:
1808 if (list_is_last(candidates.next, &candidates))
1809 goto out;
1811 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
1812 key.type = BTRFS_EXTENT_CSUM_KEY;
1813 key.offset = start;
1815 ret = btrfs_search_slot(NULL, csum_root, &key, &path, 0, 0);
1816 if (ret < 0) {
1817 fprintf(stderr, "Search csum failed(%d)\n", ret);
1818 goto fail_out;
1820 leaf = path.nodes[0];
1821 slot = path.slots[0];
1822 if (ret > 0) {
1823 if (slot >= btrfs_header_nritems(leaf)) {
1824 ret = btrfs_next_leaf(csum_root, &path);
1825 if (ret < 0) {
1826 fprintf(stderr,
1827 "Walk tree failed(%d)\n", ret);
1828 goto fail_out;
1829 } else if (ret > 0) {
1830 slot = btrfs_header_nritems(leaf) - 1;
1831 btrfs_item_key_to_cpu(leaf, &key, slot);
1832 if (item_end_offset(root, &key, leaf, slot)
1833 > start) {
1834 csum_offset = start - key.offset;
1835 csum_offset /= blocksize;
1836 goto next_csum;
1838 goto next_stripe;
1840 leaf = path.nodes[0];
1841 slot = path.slots[0];
1843 btrfs_item_key_to_cpu(leaf, &key, slot);
1844 ret = btrfs_previous_item(csum_root, &path, 0,
1845 BTRFS_EXTENT_CSUM_KEY);
1846 if (ret < 0)
1847 goto fail_out;
1848 else if (ret > 0) {
1849 if (key.offset >= end)
1850 goto next_stripe;
1851 else
1852 goto next_csum;
1854 leaf = path.nodes[0];
1855 slot = path.slots[0];
1857 btrfs_item_key_to_cpu(leaf, &prev_key, slot);
1858 if (item_end_offset(root, &prev_key, leaf, slot) > start) {
1859 csum_offset = start - prev_key.offset;
1860 csum_offset /= blocksize;
1861 btrfs_item_key_to_cpu(leaf, &key, slot);
1862 } else {
1863 if (key.offset >= end)
1864 goto next_stripe;
1867 if (key.offset + csum_offset * blocksize > chunk_end)
1868 goto out;
1870 next_csum:
1871 ret = next_csum(root, &leaf, &path, &slot, &csum_offset, &tree_csum,
1872 end, &key);
1873 if (ret < 0) {
1874 fprintf(stderr, "Fetch csum failed\n");
1875 goto fail_out;
1876 } else if (ret == 1) {
1877 list_for_each_entry(devext, &unordered, chunk_list)
1878 num_unordered++;
1879 if (!(*flags & EQUAL_STRIPE))
1880 *flags |= EQUAL_STRIPE;
1881 goto out;
1882 } else if (ret == 2)
1883 goto next_stripe;
1885 list_for_each_entry_safe(devext, next, &candidates, chunk_list) {
1886 data_offset = calc_data_offset(&key, chunk, devext->offset,
1887 csum_offset, blocksize);
1888 dev = btrfs_find_device_by_devid(rc->fs_devices,
1889 devext->objectid, 0);
1890 if (!dev) {
1891 ret = 1;
1892 goto fail_out;
1894 BUG_ON(btrfs_find_device_by_devid(rc->fs_devices,
1895 devext->objectid, 1));
1897 ret = check_one_csum(dev->fd, data_offset, blocksize,
1898 tree_csum);
1899 if (ret < 0)
1900 goto fail_out;
1901 else if (ret > 0)
1902 list_move(&devext->chunk_list, &unordered);
1905 if (list_empty(&candidates)) {
1906 list_for_each_entry(devext, &unordered, chunk_list)
1907 num_unordered++;
1908 if (chunk->type_flags & BTRFS_BLOCK_GROUP_RAID6
1909 && num_unordered == 2) {
1910 list_splice_init(&unordered, &chunk->dextents);
1911 btrfs_release_path(&path);
1912 return 0;
1913 } else
1914 ret = 1;
1916 goto fail_out;
1919 if (list_is_last(candidates.next, &candidates)) {
1920 index = btrfs_calc_stripe_index(chunk,
1921 key.offset + csum_offset * blocksize);
1922 if (chunk->stripes[index].devid)
1923 goto next_stripe;
1924 ret = insert_stripe(&candidates, rc, chunk, index);
1925 if (ret)
1926 goto fail_out;
1927 } else {
1928 csum_offset++;
1929 goto next_csum;
1931 next_stripe:
1932 start = btrfs_next_stripe_logical_offset(chunk, start);
1933 end = min(start + chunk->stripe_len, chunk_end);
1934 list_splice_init(&unordered, &candidates);
1935 btrfs_release_path(&path);
1936 csum_offset = 0;
1937 if (end < chunk_end)
1938 goto again;
1939 out:
1940 ret = 0;
1941 list_splice_init(&candidates, &unordered);
1942 list_for_each_entry(devext, &unordered, chunk_list)
1943 num_unordered++;
1944 if (num_unordered == 1) {
1945 for (i = 0; i < chunk->num_stripes; i++) {
1946 if (!chunk->stripes[i].devid) {
1947 index = i;
1948 break;
1951 ret = insert_stripe(&unordered, rc, chunk, index);
1952 if (ret)
1953 goto fail_out;
1954 } else {
1955 if ((num_unordered == 2 && chunk->type_flags
1956 & BTRFS_BLOCK_GROUP_RAID5)
1957 || (num_unordered == 3 && chunk->type_flags
1958 & BTRFS_BLOCK_GROUP_RAID6)) {
1959 for (i = 0; i < chunk->num_stripes; i++) {
1960 if (!chunk->stripes[i].devid) {
1961 ret = insert_stripe(&unordered, rc,
1962 chunk, i);
1963 if (ret)
1964 break;
1969 fail_out:
1970 ret = !!ret || (list_empty(&unordered) ? 0 : 1);
1971 list_splice_init(&candidates, &chunk->dextents);
1972 list_splice_init(&unordered, &chunk->dextents);
1973 btrfs_release_path(&path);
1975 return ret;
1978 static int btrfs_rebuild_ordered_data_chunk_stripes(struct recover_control *rc,
1979 struct btrfs_root *root)
1981 struct chunk_record *chunk;
1982 struct chunk_record *next;
1983 int ret = 0;
1984 int err;
1985 u8 flags;
1987 list_for_each_entry_safe(chunk, next, &rc->unrepaired_chunks, list) {
1988 if ((chunk->type_flags & BTRFS_BLOCK_GROUP_DATA)
1989 && (chunk->type_flags & BTRFS_ORDERED_RAID)) {
1990 flags = 0;
1991 err = rebuild_raid_data_chunk_stripes(rc, root, chunk,
1992 &flags);
1993 if (err) {
1994 list_move(&chunk->list, &rc->bad_chunks);
1995 if (flags & EQUAL_STRIPE)
1996 fprintf(stderr,
1997 "Failure: too many equal stripes in chunk[%llu %llu]\n",
1998 chunk->offset, chunk->length);
1999 if (!ret)
2000 ret = err;
2001 } else
2002 list_move(&chunk->list, &rc->good_chunks);
2005 return ret;
2008 static int btrfs_recover_chunks(struct recover_control *rc)
2010 struct chunk_record *chunk;
2011 struct block_group_record *bg;
2012 struct block_group_record *next;
2013 LIST_HEAD(new_chunks);
2014 LIST_HEAD(devexts);
2015 int nstripes;
2016 int ret;
2018 /* create the chunk by block group */
2019 list_for_each_entry_safe(bg, next, &rc->bg.block_groups, list) {
2020 nstripes = btrfs_get_device_extents(bg->objectid,
2021 &rc->devext.no_chunk_orphans,
2022 &devexts);
2023 chunk = malloc(btrfs_chunk_record_size(nstripes));
2024 if (!chunk)
2025 return -ENOMEM;
2026 memset(chunk, 0, btrfs_chunk_record_size(nstripes));
2027 INIT_LIST_HEAD(&chunk->dextents);
2028 chunk->bg_rec = bg;
2029 chunk->cache.start = bg->objectid;
2030 chunk->cache.size = bg->offset;
2031 chunk->objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2032 chunk->type = BTRFS_CHUNK_ITEM_KEY;
2033 chunk->offset = bg->objectid;
2034 chunk->generation = bg->generation;
2035 chunk->length = bg->offset;
2036 chunk->owner = BTRFS_CHUNK_TREE_OBJECTID;
2037 chunk->stripe_len = BTRFS_STRIPE_LEN;
2038 chunk->type_flags = bg->flags;
2039 chunk->io_width = BTRFS_STRIPE_LEN;
2040 chunk->io_align = BTRFS_STRIPE_LEN;
2041 chunk->sector_size = rc->sectorsize;
2042 chunk->sub_stripes = calc_sub_nstripes(bg->flags);
2044 ret = insert_cache_extent(&rc->chunk, &chunk->cache);
2045 BUG_ON(ret);
2047 if (!nstripes) {
2048 list_add_tail(&chunk->list, &rc->bad_chunks);
2049 continue;
2052 list_splice_init(&devexts, &chunk->dextents);
2054 ret = btrfs_verify_device_extents(bg, &devexts, nstripes);
2055 if (ret) {
2056 list_add_tail(&chunk->list, &rc->bad_chunks);
2057 continue;
2060 chunk->num_stripes = nstripes;
2061 ret = btrfs_rebuild_chunk_stripes(rc, chunk);
2062 if (ret > 0)
2063 list_add_tail(&chunk->list, &rc->unrepaired_chunks);
2064 else if (ret < 0)
2065 list_add_tail(&chunk->list, &rc->bad_chunks);
2066 else
2067 list_add_tail(&chunk->list, &rc->good_chunks);
2070 * Don't worry about the lost orphan device extents, they don't
2071 * have its chunk and block group, they must be the old ones that
2072 * we have dropped.
2074 return 0;
2078 * Return 0 when succesful, < 0 on error and > 0 if aborted by user
2080 int btrfs_recover_chunk_tree(char *path, int verbose, int yes)
2082 int ret = 0;
2083 struct btrfs_root *root = NULL;
2084 struct btrfs_trans_handle *trans;
2085 struct recover_control rc;
2087 init_recover_control(&rc, verbose, yes);
2089 ret = recover_prepare(&rc, path);
2090 if (ret) {
2091 fprintf(stderr, "recover prepare error\n");
2092 return ret;
2095 ret = scan_devices(&rc);
2096 if (ret) {
2097 fprintf(stderr, "scan chunk headers error\n");
2098 goto fail_rc;
2101 if (cache_tree_empty(&rc.chunk) &&
2102 cache_tree_empty(&rc.bg.tree) &&
2103 cache_tree_empty(&rc.devext.tree)) {
2104 fprintf(stderr, "no recoverable chunk\n");
2105 goto fail_rc;
2108 print_scan_result(&rc);
2110 ret = check_chunks(&rc.chunk, &rc.bg, &rc.devext, &rc.good_chunks,
2111 &rc.bad_chunks, 1);
2112 print_check_result(&rc);
2113 if (ret) {
2114 if (!list_empty(&rc.bg.block_groups) ||
2115 !list_empty(&rc.devext.no_chunk_orphans)) {
2116 ret = btrfs_recover_chunks(&rc);
2117 if (ret)
2118 goto fail_rc;
2121 * If the chunk is healthy, its block group item and device
2122 * extent item should be written on the disks. So, it is very
2123 * likely that the bad chunk is a old one that has been
2124 * droppped from the fs. Don't deal with them now, we will
2125 * check it after the fs is opened.
2127 } else {
2128 fprintf(stderr, "Check chunks successfully with no orphans\n");
2129 goto fail_rc;
2132 root = open_ctree_with_broken_chunk(&rc);
2133 if (IS_ERR(root)) {
2134 fprintf(stderr, "open with broken chunk error\n");
2135 ret = PTR_ERR(root);
2136 goto fail_rc;
2139 ret = check_all_chunks_by_metadata(&rc, root);
2140 if (ret) {
2141 fprintf(stderr, "The chunks in memory can not match the metadata of the fs. Repair failed.\n");
2142 goto fail_close_ctree;
2145 ret = btrfs_rebuild_ordered_data_chunk_stripes(&rc, root);
2146 if (ret) {
2147 fprintf(stderr, "Failed to rebuild ordered chunk stripes.\n");
2148 goto fail_close_ctree;
2151 if (!rc.yes) {
2152 ret = ask_user("We are going to rebuild the chunk tree on disk, it might destroy the old metadata on the disk, Are you sure?");
2153 if (!ret) {
2154 ret = 1;
2155 goto fail_close_ctree;
2159 trans = btrfs_start_transaction(root, 1);
2160 ret = remove_chunk_extent_item(trans, &rc, root);
2161 BUG_ON(ret);
2163 ret = rebuild_chunk_tree(trans, &rc, root);
2164 BUG_ON(ret);
2166 ret = rebuild_sys_array(&rc, root);
2167 BUG_ON(ret);
2169 btrfs_commit_transaction(trans, root);
2170 fail_close_ctree:
2171 close_ctree(root);
2172 fail_rc:
2173 free_recover_control(&rc);
2174 return ret;