2 * Copyright (C) 2013 FUJITSU LIMITED. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #define _XOPEN_SOURCE 500
22 #include <stdio_ext.h>
24 #include <sys/types.h>
28 #include <uuid/uuid.h>
31 #include "kerncompat.h"
33 #include "radix-tree.h"
35 #include "extent-cache.h"
38 #include "transaction.h"
45 struct recover_control
{
53 u64 chunk_root_generation
;
55 struct btrfs_fs_devices
*fs_devices
;
57 struct cache_tree chunk
;
58 struct block_group_tree bg
;
59 struct device_extent_tree devext
;
60 struct cache_tree eb_cache
;
62 struct list_head good_chunks
;
63 struct list_head bad_chunks
;
64 struct list_head unrepaired_chunks
;
65 pthread_mutex_t rc_lock
;
68 struct extent_record
{
69 struct cache_extent cache
;
71 u8 csum
[BTRFS_CSUM_SIZE
];
72 struct btrfs_device
*devices
[BTRFS_MAX_MIRRORS
];
73 u64 offsets
[BTRFS_MAX_MIRRORS
];
78 struct recover_control
*rc
;
79 struct btrfs_device
*dev
;
83 static struct extent_record
*btrfs_new_extent_record(struct extent_buffer
*eb
)
85 struct extent_record
*rec
;
87 rec
= malloc(sizeof(*rec
));
89 fprintf(stderr
, "Fail to allocate memory for extent record.\n");
93 memset(rec
, 0, sizeof(*rec
));
94 rec
->cache
.start
= btrfs_header_bytenr(eb
);
95 rec
->cache
.size
= eb
->len
;
96 rec
->generation
= btrfs_header_generation(eb
);
97 read_extent_buffer(eb
, rec
->csum
, (unsigned long)btrfs_header_csum(eb
),
102 static int process_extent_buffer(struct cache_tree
*eb_cache
,
103 struct extent_buffer
*eb
,
104 struct btrfs_device
*device
, u64 offset
)
106 struct extent_record
*rec
;
107 struct extent_record
*exist
;
108 struct cache_extent
*cache
;
111 rec
= btrfs_new_extent_record(eb
);
112 if (!rec
->cache
.size
)
115 cache
= lookup_cache_extent(eb_cache
,
119 exist
= container_of(cache
, struct extent_record
, cache
);
121 if (exist
->generation
> rec
->generation
)
123 if (exist
->generation
== rec
->generation
) {
124 if (exist
->cache
.start
!= rec
->cache
.start
||
125 exist
->cache
.size
!= rec
->cache
.size
||
126 memcmp(exist
->csum
, rec
->csum
, BTRFS_CSUM_SIZE
)) {
129 BUG_ON(exist
->nmirrors
>= BTRFS_MAX_MIRRORS
);
130 exist
->devices
[exist
->nmirrors
] = device
;
131 exist
->offsets
[exist
->nmirrors
] = offset
;
136 remove_cache_extent(eb_cache
, cache
);
141 rec
->devices
[0] = device
;
142 rec
->offsets
[0] = offset
;
144 ret
= insert_cache_extent(eb_cache
, &rec
->cache
);
153 static void free_extent_record(struct cache_extent
*cache
)
155 struct extent_record
*er
;
157 er
= container_of(cache
, struct extent_record
, cache
);
161 FREE_EXTENT_CACHE_BASED_TREE(extent_record
, free_extent_record
);
163 static struct btrfs_chunk
*create_chunk_item(struct chunk_record
*record
)
165 struct btrfs_chunk
*ret
;
166 struct btrfs_stripe
*chunk_stripe
;
169 if (!record
|| record
->num_stripes
== 0)
171 ret
= malloc(btrfs_chunk_item_size(record
->num_stripes
));
174 btrfs_set_stack_chunk_length(ret
, record
->length
);
175 btrfs_set_stack_chunk_owner(ret
, record
->owner
);
176 btrfs_set_stack_chunk_stripe_len(ret
, record
->stripe_len
);
177 btrfs_set_stack_chunk_type(ret
, record
->type_flags
);
178 btrfs_set_stack_chunk_io_align(ret
, record
->io_align
);
179 btrfs_set_stack_chunk_io_width(ret
, record
->io_width
);
180 btrfs_set_stack_chunk_sector_size(ret
, record
->sector_size
);
181 btrfs_set_stack_chunk_num_stripes(ret
, record
->num_stripes
);
182 btrfs_set_stack_chunk_sub_stripes(ret
, record
->sub_stripes
);
183 for (i
= 0, chunk_stripe
= &ret
->stripe
; i
< record
->num_stripes
;
184 i
++, chunk_stripe
++) {
185 btrfs_set_stack_stripe_devid(chunk_stripe
,
186 record
->stripes
[i
].devid
);
187 btrfs_set_stack_stripe_offset(chunk_stripe
,
188 record
->stripes
[i
].offset
);
189 memcpy(chunk_stripe
->dev_uuid
, record
->stripes
[i
].dev_uuid
,
195 static void init_recover_control(struct recover_control
*rc
, int verbose
,
198 memset(rc
, 0, sizeof(struct recover_control
));
199 cache_tree_init(&rc
->chunk
);
200 cache_tree_init(&rc
->eb_cache
);
201 block_group_tree_init(&rc
->bg
);
202 device_extent_tree_init(&rc
->devext
);
204 INIT_LIST_HEAD(&rc
->good_chunks
);
205 INIT_LIST_HEAD(&rc
->bad_chunks
);
206 INIT_LIST_HEAD(&rc
->unrepaired_chunks
);
208 rc
->verbose
= verbose
;
210 pthread_mutex_init(&rc
->rc_lock
, NULL
);
213 static void free_recover_control(struct recover_control
*rc
)
215 free_block_group_tree(&rc
->bg
);
216 free_chunk_cache_tree(&rc
->chunk
);
217 free_device_extent_tree(&rc
->devext
);
218 free_extent_record_tree(&rc
->eb_cache
);
219 pthread_mutex_destroy(&rc
->rc_lock
);
222 static int process_block_group_item(struct block_group_tree
*bg_cache
,
223 struct extent_buffer
*leaf
,
224 struct btrfs_key
*key
, int slot
)
226 struct block_group_record
*rec
;
227 struct block_group_record
*exist
;
228 struct cache_extent
*cache
;
231 rec
= btrfs_new_block_group_record(leaf
, key
, slot
);
232 if (!rec
->cache
.size
)
235 cache
= lookup_cache_extent(&bg_cache
->tree
,
239 exist
= container_of(cache
, struct block_group_record
, cache
);
241 /*check the generation and replace if needed*/
242 if (exist
->generation
> rec
->generation
)
244 if (exist
->generation
== rec
->generation
) {
245 int offset
= offsetof(struct block_group_record
,
248 * According to the current kernel code, the following
249 * case is impossble, or there is something wrong in
252 if (memcmp(((void *)exist
) + offset
,
253 ((void *)rec
) + offset
,
254 sizeof(*rec
) - offset
))
258 remove_cache_extent(&bg_cache
->tree
, cache
);
259 list_del_init(&exist
->list
);
262 * We must do seach again to avoid the following cache.
263 * /--old bg 1--//--old bg 2--/
269 ret
= insert_block_group_record(bg_cache
, rec
);
278 static int process_chunk_item(struct cache_tree
*chunk_cache
,
279 struct extent_buffer
*leaf
, struct btrfs_key
*key
,
282 struct chunk_record
*rec
;
283 struct chunk_record
*exist
;
284 struct cache_extent
*cache
;
287 rec
= btrfs_new_chunk_record(leaf
, key
, slot
);
288 if (!rec
->cache
.size
)
291 cache
= lookup_cache_extent(chunk_cache
, rec
->offset
, rec
->length
);
293 exist
= container_of(cache
, struct chunk_record
, cache
);
295 if (exist
->generation
> rec
->generation
)
297 if (exist
->generation
== rec
->generation
) {
298 int num_stripes
= rec
->num_stripes
;
299 int rec_size
= btrfs_chunk_record_size(num_stripes
);
300 int offset
= offsetof(struct chunk_record
, generation
);
302 if (exist
->num_stripes
!= rec
->num_stripes
||
303 memcmp(((void *)exist
) + offset
,
304 ((void *)rec
) + offset
,
309 remove_cache_extent(chunk_cache
, cache
);
313 ret
= insert_cache_extent(chunk_cache
, &rec
->cache
);
322 static int process_device_extent_item(struct device_extent_tree
*devext_cache
,
323 struct extent_buffer
*leaf
,
324 struct btrfs_key
*key
, int slot
)
326 struct device_extent_record
*rec
;
327 struct device_extent_record
*exist
;
328 struct cache_extent
*cache
;
331 rec
= btrfs_new_device_extent_record(leaf
, key
, slot
);
332 if (!rec
->cache
.size
)
335 cache
= lookup_cache_extent2(&devext_cache
->tree
,
340 exist
= container_of(cache
, struct device_extent_record
, cache
);
341 if (exist
->generation
> rec
->generation
)
343 if (exist
->generation
== rec
->generation
) {
344 int offset
= offsetof(struct device_extent_record
,
346 if (memcmp(((void *)exist
) + offset
,
347 ((void *)rec
) + offset
,
348 sizeof(*rec
) - offset
))
352 remove_cache_extent(&devext_cache
->tree
, cache
);
353 list_del_init(&exist
->chunk_list
);
354 list_del_init(&exist
->device_list
);
359 ret
= insert_device_extent_record(devext_cache
, rec
);
368 static void print_block_group_info(struct block_group_record
*rec
, char *prefix
)
371 printf("%s", prefix
);
372 printf("Block Group: start = %llu, len = %llu, flag = %llx\n",
373 rec
->objectid
, rec
->offset
, rec
->flags
);
376 static void print_block_group_tree(struct block_group_tree
*tree
)
378 struct cache_extent
*cache
;
379 struct block_group_record
*rec
;
381 printf("All Block Groups:\n");
382 for (cache
= first_cache_extent(&tree
->tree
); cache
;
383 cache
= next_cache_extent(cache
)) {
384 rec
= container_of(cache
, struct block_group_record
, cache
);
385 print_block_group_info(rec
, "\t");
390 static void print_stripe_info(struct stripe
*data
, char *prefix1
, char *prefix2
,
394 printf("%s", prefix1
);
396 printf("%s", prefix2
);
397 printf("[%2d] Stripe: devid = %llu, offset = %llu\n",
398 index
, data
->devid
, data
->offset
);
401 static void print_chunk_self_info(struct chunk_record
*rec
, char *prefix
)
406 printf("%s", prefix
);
407 printf("Chunk: start = %llu, len = %llu, type = %llx, num_stripes = %u\n",
408 rec
->offset
, rec
->length
, rec
->type_flags
, rec
->num_stripes
);
410 printf("%s", prefix
);
411 printf(" Stripes list:\n");
412 for (i
= 0; i
< rec
->num_stripes
; i
++)
413 print_stripe_info(&rec
->stripes
[i
], prefix
, " ", i
);
416 static void print_chunk_tree(struct cache_tree
*tree
)
418 struct cache_extent
*n
;
419 struct chunk_record
*entry
;
421 printf("All Chunks:\n");
422 for (n
= first_cache_extent(tree
); n
;
423 n
= next_cache_extent(n
)) {
424 entry
= container_of(n
, struct chunk_record
, cache
);
425 print_chunk_self_info(entry
, "\t");
430 static void print_device_extent_info(struct device_extent_record
*rec
,
434 printf("%s", prefix
);
435 printf("Device extent: devid = %llu, start = %llu, len = %llu, chunk offset = %llu\n",
436 rec
->objectid
, rec
->offset
, rec
->length
, rec
->chunk_offset
);
439 static void print_device_extent_tree(struct device_extent_tree
*tree
)
441 struct cache_extent
*n
;
442 struct device_extent_record
*entry
;
444 printf("All Device Extents:\n");
445 for (n
= first_cache_extent(&tree
->tree
); n
;
446 n
= next_cache_extent(n
)) {
447 entry
= container_of(n
, struct device_extent_record
, cache
);
448 print_device_extent_info(entry
, "\t");
453 static void print_device_info(struct btrfs_device
*device
, char *prefix
)
456 printf("%s", prefix
);
457 printf("Device: id = %llu, name = %s\n",
458 device
->devid
, device
->name
);
461 static void print_all_devices(struct list_head
*devices
)
463 struct btrfs_device
*dev
;
465 printf("All Devices:\n");
466 list_for_each_entry(dev
, devices
, dev_list
)
467 print_device_info(dev
, "\t");
471 static void print_scan_result(struct recover_control
*rc
)
476 printf("DEVICE SCAN RESULT:\n");
477 printf("Filesystem Information:\n");
478 printf("\tsectorsize: %d\n", rc
->sectorsize
);
479 printf("\tleafsize: %d\n", rc
->leafsize
);
480 printf("\ttree root generation: %llu\n", rc
->generation
);
481 printf("\tchunk root generation: %llu\n", rc
->chunk_root_generation
);
484 print_all_devices(&rc
->fs_devices
->devices
);
485 print_block_group_tree(&rc
->bg
);
486 print_chunk_tree(&rc
->chunk
);
487 print_device_extent_tree(&rc
->devext
);
490 static void print_chunk_info(struct chunk_record
*chunk
, char *prefix
)
492 struct device_extent_record
*devext
;
495 print_chunk_self_info(chunk
, prefix
);
497 printf("%s", prefix
);
499 print_block_group_info(chunk
->bg_rec
, " ");
501 printf(" No block group.\n");
503 printf("%s", prefix
);
504 if (list_empty(&chunk
->dextents
)) {
505 printf(" No device extent.\n");
507 printf(" Device extent list:\n");
509 list_for_each_entry(devext
, &chunk
->dextents
, chunk_list
) {
511 printf("%s", prefix
);
512 printf("%s[%2d]", " ", i
);
513 print_device_extent_info(devext
, NULL
);
519 static void print_check_result(struct recover_control
*rc
)
521 struct chunk_record
*chunk
;
522 struct block_group_record
*bg
;
523 struct device_extent_record
*devext
;
531 printf("CHECK RESULT:\n");
532 printf("Healthy Chunks:\n");
533 list_for_each_entry(chunk
, &rc
->good_chunks
, list
) {
534 print_chunk_info(chunk
, " ");
538 printf("Bad Chunks:\n");
539 list_for_each_entry(chunk
, &rc
->bad_chunks
, list
) {
540 print_chunk_info(chunk
, " ");
545 printf("Total Chunks:\t%d\n", total
);
546 printf(" Heathy:\t%d\n", good
);
547 printf(" Bad:\t%d\n", bad
);
550 printf("Orphan Block Groups:\n");
551 list_for_each_entry(bg
, &rc
->bg
.block_groups
, list
)
552 print_block_group_info(bg
, " ");
555 printf("Orphan Device Extents:\n");
556 list_for_each_entry(devext
, &rc
->devext
.no_chunk_orphans
, chunk_list
)
557 print_device_extent_info(devext
, " ");
560 static int check_chunk_by_metadata(struct recover_control
*rc
,
561 struct btrfs_root
*root
,
562 struct chunk_record
*chunk
, int bg_only
)
567 struct btrfs_path path
;
568 struct btrfs_key key
;
569 struct btrfs_root
*dev_root
;
570 struct stripe
*stripe
;
571 struct btrfs_dev_extent
*dev_extent
;
572 struct btrfs_block_group_item
*bg_ptr
;
573 struct extent_buffer
*l
;
575 btrfs_init_path(&path
);
580 dev_root
= root
->fs_info
->dev_root
;
581 for (i
= 0; i
< chunk
->num_stripes
; i
++) {
582 stripe
= &chunk
->stripes
[i
];
584 key
.objectid
= stripe
->devid
;
585 key
.offset
= stripe
->offset
;
586 key
.type
= BTRFS_DEV_EXTENT_KEY
;
588 ret
= btrfs_search_slot(NULL
, dev_root
, &key
, &path
, 0, 0);
590 fprintf(stderr
, "Search device extent failed(%d)\n",
592 btrfs_release_path(&path
);
594 } else if (ret
> 0) {
597 "No device extent[%llu, %llu]\n",
598 stripe
->devid
, stripe
->offset
);
599 btrfs_release_path(&path
);
603 slot
= path
.slots
[0];
604 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
606 btrfs_dev_extent_chunk_offset(l
, dev_extent
)) {
609 "Device tree unmatch with chunks dev_extent[%llu, %llu], chunk[%llu, %llu]\n",
610 btrfs_dev_extent_chunk_offset(l
,
612 btrfs_dev_extent_length(l
, dev_extent
),
613 chunk
->offset
, chunk
->length
);
614 btrfs_release_path(&path
);
617 btrfs_release_path(&path
);
621 key
.objectid
= chunk
->offset
;
622 key
.type
= BTRFS_BLOCK_GROUP_ITEM_KEY
;
623 key
.offset
= chunk
->length
;
625 ret
= btrfs_search_slot(NULL
, root
->fs_info
->extent_root
, &key
, &path
,
628 fprintf(stderr
, "Search block group failed(%d)\n", ret
);
629 btrfs_release_path(&path
);
631 } else if (ret
> 0) {
633 fprintf(stderr
, "No block group[%llu, %llu]\n",
634 key
.objectid
, key
.offset
);
635 btrfs_release_path(&path
);
640 slot
= path
.slots
[0];
641 bg_ptr
= btrfs_item_ptr(l
, slot
, struct btrfs_block_group_item
);
642 if (chunk
->type_flags
!= btrfs_disk_block_group_flags(l
, bg_ptr
)) {
645 "Chunk[%llu, %llu]'s type(%llu) is differemt with Block Group's type(%llu)\n",
646 chunk
->offset
, chunk
->length
, chunk
->type_flags
,
647 btrfs_disk_block_group_flags(l
, bg_ptr
));
648 btrfs_release_path(&path
);
651 btrfs_release_path(&path
);
655 static int check_all_chunks_by_metadata(struct recover_control
*rc
,
656 struct btrfs_root
*root
)
658 struct chunk_record
*chunk
;
659 struct chunk_record
*next
;
660 LIST_HEAD(orphan_chunks
);
664 list_for_each_entry_safe(chunk
, next
, &rc
->good_chunks
, list
) {
665 err
= check_chunk_by_metadata(rc
, root
, chunk
, 0);
668 list_move_tail(&chunk
->list
, &orphan_chunks
);
669 else if (err
&& !ret
)
674 list_for_each_entry_safe(chunk
, next
, &rc
->unrepaired_chunks
, list
) {
675 err
= check_chunk_by_metadata(rc
, root
, chunk
, 1);
677 list_move_tail(&chunk
->list
, &orphan_chunks
);
678 else if (err
&& !ret
)
682 list_for_each_entry(chunk
, &rc
->bad_chunks
, list
) {
683 err
= check_chunk_by_metadata(rc
, root
, chunk
, 1);
684 if (err
!= -ENOENT
&& !ret
)
685 ret
= err
? err
: -EINVAL
;
687 list_splice(&orphan_chunks
, &rc
->bad_chunks
);
691 static int extract_metadata_record(struct recover_control
*rc
,
692 struct extent_buffer
*leaf
)
694 struct btrfs_key key
;
699 nritems
= btrfs_header_nritems(leaf
);
700 for (i
= 0; i
< nritems
; i
++) {
701 btrfs_item_key_to_cpu(leaf
, &key
, i
);
703 case BTRFS_BLOCK_GROUP_ITEM_KEY
:
704 pthread_mutex_lock(&rc
->rc_lock
);
705 ret
= process_block_group_item(&rc
->bg
, leaf
, &key
, i
);
706 pthread_mutex_unlock(&rc
->rc_lock
);
708 case BTRFS_CHUNK_ITEM_KEY
:
709 pthread_mutex_lock(&rc
->rc_lock
);
710 ret
= process_chunk_item(&rc
->chunk
, leaf
, &key
, i
);
711 pthread_mutex_unlock(&rc
->rc_lock
);
713 case BTRFS_DEV_EXTENT_KEY
:
714 pthread_mutex_lock(&rc
->rc_lock
);
715 ret
= process_device_extent_item(&rc
->devext
, leaf
,
717 pthread_mutex_unlock(&rc
->rc_lock
);
726 static inline int is_super_block_address(u64 offset
)
730 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
731 if (offset
== btrfs_sb_offset(i
))
737 static int scan_one_device(void *dev_scan_struct
)
739 struct extent_buffer
*buf
;
742 struct device_scan
*dev_scan
= (struct device_scan
*)dev_scan_struct
;
743 struct recover_control
*rc
= dev_scan
->rc
;
744 struct btrfs_device
*device
= dev_scan
->dev
;
745 int fd
= dev_scan
->fd
;
748 ret
= pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS
, &oldtype
);
752 buf
= malloc(sizeof(*buf
) + rc
->leafsize
);
755 buf
->len
= rc
->leafsize
;
759 if (is_super_block_address(bytenr
))
760 bytenr
+= rc
->sectorsize
;
762 if (pread64(fd
, buf
->data
, rc
->leafsize
, bytenr
) <
766 if (memcmp_extent_buffer(buf
, rc
->fs_devices
->fsid
,
769 bytenr
+= rc
->sectorsize
;
773 if (verify_tree_block_csum_silent(buf
, rc
->csum_size
)) {
774 bytenr
+= rc
->sectorsize
;
778 pthread_mutex_lock(&rc
->rc_lock
);
779 ret
= process_extent_buffer(&rc
->eb_cache
, buf
, device
, bytenr
);
780 pthread_mutex_unlock(&rc
->rc_lock
);
784 if (btrfs_header_level(buf
) != 0)
787 switch (btrfs_header_owner(buf
)) {
788 case BTRFS_EXTENT_TREE_OBJECTID
:
789 case BTRFS_DEV_TREE_OBJECTID
:
790 /* different tree use different generation */
791 if (btrfs_header_generation(buf
) > rc
->generation
)
793 ret
= extract_metadata_record(rc
, buf
);
797 case BTRFS_CHUNK_TREE_OBJECTID
:
798 if (btrfs_header_generation(buf
) >
799 rc
->chunk_root_generation
)
801 ret
= extract_metadata_record(rc
, buf
);
807 bytenr
+= rc
->leafsize
;
815 static int scan_devices(struct recover_control
*rc
)
819 struct btrfs_device
*dev
;
820 struct device_scan
*dev_scans
;
829 list_for_each_entry(dev
, &rc
->fs_devices
->devices
, dev_list
)
831 dev_scans
= (struct device_scan
*)malloc(sizeof(struct device_scan
)
835 t_scans
= (pthread_t
*)malloc(sizeof(pthread_t
) * devnr
);
838 t_rets
= (int *)malloc(sizeof(int) * devnr
);
842 list_for_each_entry(dev
, &rc
->fs_devices
->devices
, dev_list
) {
843 fd
= open(dev
->name
, O_RDONLY
);
845 fprintf(stderr
, "Failed to open device %s\n",
850 dev_scans
[devidx
].rc
= rc
;
851 dev_scans
[devidx
].dev
= dev
;
852 dev_scans
[devidx
].fd
= fd
;
853 ret
= pthread_create(&t_scans
[devidx
], NULL
,
854 (void *)scan_one_device
,
855 (void *)&dev_scans
[devidx
]);
858 cancel_to
= devidx
- 1;
866 ret
= pthread_join(t_scans
[i
], (void **)&t_rets
[i
]);
867 if (ret
|| t_rets
[i
]) {
870 cancel_to
= devnr
- 1;
876 while (ret
&& (cancel_from
<= cancel_to
)) {
877 pthread_cancel(t_scans
[cancel_from
]);
887 static int build_device_map_by_chunk_record(struct btrfs_root
*root
,
888 struct chunk_record
*chunk
)
893 u8 uuid
[BTRFS_UUID_SIZE
];
895 struct btrfs_mapping_tree
*map_tree
;
896 struct map_lookup
*map
;
897 struct stripe
*stripe
;
899 map_tree
= &root
->fs_info
->mapping_tree
;
900 num_stripes
= chunk
->num_stripes
;
901 map
= malloc(btrfs_map_lookup_size(num_stripes
));
904 map
->ce
.start
= chunk
->offset
;
905 map
->ce
.size
= chunk
->length
;
906 map
->num_stripes
= num_stripes
;
907 map
->io_width
= chunk
->io_width
;
908 map
->io_align
= chunk
->io_align
;
909 map
->sector_size
= chunk
->sector_size
;
910 map
->stripe_len
= chunk
->stripe_len
;
911 map
->type
= chunk
->type_flags
;
912 map
->sub_stripes
= chunk
->sub_stripes
;
914 for (i
= 0, stripe
= chunk
->stripes
; i
< num_stripes
; i
++, stripe
++) {
915 devid
= stripe
->devid
;
916 memcpy(uuid
, stripe
->dev_uuid
, BTRFS_UUID_SIZE
);
917 map
->stripes
[i
].physical
= stripe
->offset
;
918 map
->stripes
[i
].dev
= btrfs_find_device(root
, devid
,
920 if (!map
->stripes
[i
].dev
) {
926 ret
= insert_cache_extent(&map_tree
->cache_tree
, &map
->ce
);
930 static int build_device_maps_by_chunk_records(struct recover_control
*rc
,
931 struct btrfs_root
*root
)
934 struct chunk_record
*chunk
;
936 list_for_each_entry(chunk
, &rc
->good_chunks
, list
) {
937 ret
= build_device_map_by_chunk_record(root
, chunk
);
944 static int block_group_remove_all_extent_items(struct btrfs_trans_handle
*trans
,
945 struct btrfs_root
*root
,
946 struct block_group_record
*bg
)
948 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
949 struct btrfs_key key
;
950 struct btrfs_path path
;
951 struct extent_buffer
*leaf
;
952 u64 start
= bg
->objectid
;
953 u64 end
= bg
->objectid
+ bg
->offset
;
960 btrfs_init_path(&path
);
961 root
= root
->fs_info
->extent_root
;
963 key
.objectid
= start
;
965 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
967 ret
= btrfs_search_slot(trans
, root
, &key
, &path
, -1, 1);
973 leaf
= path
.nodes
[0];
974 nitems
= btrfs_header_nritems(leaf
);
976 /* The tree is empty. */
981 if (path
.slots
[0] >= nitems
) {
982 ret
= btrfs_next_leaf(root
, &path
);
989 leaf
= path
.nodes
[0];
990 btrfs_item_key_to_cpu(leaf
, &key
, 0);
991 if (key
.objectid
>= end
)
993 btrfs_release_path(&path
);
999 for (i
= path
.slots
[0]; i
< nitems
; i
++) {
1000 btrfs_item_key_to_cpu(leaf
, &key
, i
);
1001 if (key
.objectid
>= end
)
1004 if (key
.type
== BTRFS_BLOCK_GROUP_ITEM_KEY
) {
1014 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
||
1015 key
.type
== BTRFS_METADATA_ITEM_KEY
) {
1016 old_val
= btrfs_super_bytes_used(fs_info
->super_copy
);
1017 if (key
.type
== BTRFS_METADATA_ITEM_KEY
)
1018 old_val
+= root
->leafsize
;
1020 old_val
+= key
.offset
;
1021 btrfs_set_super_bytes_used(fs_info
->super_copy
,
1027 ret
= btrfs_del_items(trans
, root
, &path
, del_s
, del_nr
);
1032 if (key
.objectid
< end
) {
1033 if (key
.type
== BTRFS_BLOCK_GROUP_ITEM_KEY
) {
1034 key
.objectid
+= root
->sectorsize
;
1035 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1038 btrfs_release_path(&path
);
1042 btrfs_release_path(&path
);
1046 static int block_group_free_all_extent(struct btrfs_trans_handle
*trans
,
1047 struct btrfs_root
*root
,
1048 struct block_group_record
*bg
)
1050 struct btrfs_block_group_cache
*cache
;
1051 struct btrfs_fs_info
*info
;
1055 info
= root
->fs_info
;
1056 cache
= btrfs_lookup_block_group(info
, bg
->objectid
);
1060 start
= cache
->key
.objectid
;
1061 end
= start
+ cache
->key
.offset
- 1;
1063 set_extent_bits(&info
->block_group_cache
, start
, end
,
1064 BLOCK_GROUP_DIRTY
, GFP_NOFS
);
1065 set_extent_dirty(&info
->free_space_cache
, start
, end
, GFP_NOFS
);
1067 btrfs_set_block_group_used(&cache
->item
, 0);
1072 static int remove_chunk_extent_item(struct btrfs_trans_handle
*trans
,
1073 struct recover_control
*rc
,
1074 struct btrfs_root
*root
)
1076 struct chunk_record
*chunk
;
1079 list_for_each_entry(chunk
, &rc
->good_chunks
, list
) {
1080 if (!(chunk
->type_flags
& BTRFS_BLOCK_GROUP_SYSTEM
))
1082 ret
= block_group_remove_all_extent_items(trans
, root
,
1087 ret
= block_group_free_all_extent(trans
, root
, chunk
->bg_rec
);
1094 static int __rebuild_chunk_root(struct btrfs_trans_handle
*trans
,
1095 struct recover_control
*rc
,
1096 struct btrfs_root
*root
)
1099 struct btrfs_device
*dev
;
1100 struct extent_buffer
*cow
;
1101 struct btrfs_disk_key disk_key
;
1104 list_for_each_entry(dev
, &rc
->fs_devices
->devices
, dev_list
) {
1105 if (min_devid
> dev
->devid
)
1106 min_devid
= dev
->devid
;
1108 disk_key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1109 disk_key
.type
= BTRFS_DEV_ITEM_KEY
;
1110 disk_key
.offset
= min_devid
;
1112 cow
= btrfs_alloc_free_block(trans
, root
, root
->nodesize
,
1113 BTRFS_CHUNK_TREE_OBJECTID
,
1114 &disk_key
, 0, 0, 0);
1115 btrfs_set_header_bytenr(cow
, cow
->start
);
1116 btrfs_set_header_generation(cow
, trans
->transid
);
1117 btrfs_set_header_nritems(cow
, 0);
1118 btrfs_set_header_level(cow
, 0);
1119 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
1120 btrfs_set_header_owner(cow
, BTRFS_CHUNK_TREE_OBJECTID
);
1121 write_extent_buffer(cow
, root
->fs_info
->fsid
,
1122 btrfs_header_fsid(), BTRFS_FSID_SIZE
);
1124 write_extent_buffer(cow
, root
->fs_info
->chunk_tree_uuid
,
1125 btrfs_header_chunk_tree_uuid(cow
),
1129 btrfs_mark_buffer_dirty(cow
);
1134 static int __rebuild_device_items(struct btrfs_trans_handle
*trans
,
1135 struct recover_control
*rc
,
1136 struct btrfs_root
*root
)
1138 struct btrfs_device
*dev
;
1139 struct btrfs_key key
;
1140 struct btrfs_dev_item
*dev_item
;
1143 dev_item
= malloc(sizeof(struct btrfs_dev_item
));
1147 list_for_each_entry(dev
, &rc
->fs_devices
->devices
, dev_list
) {
1148 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1149 key
.type
= BTRFS_DEV_ITEM_KEY
;
1150 key
.offset
= dev
->devid
;
1152 btrfs_set_stack_device_generation(dev_item
, 0);
1153 btrfs_set_stack_device_type(dev_item
, dev
->type
);
1154 btrfs_set_stack_device_id(dev_item
, dev
->devid
);
1155 btrfs_set_stack_device_total_bytes(dev_item
, dev
->total_bytes
);
1156 btrfs_set_stack_device_bytes_used(dev_item
, dev
->bytes_used
);
1157 btrfs_set_stack_device_io_align(dev_item
, dev
->io_align
);
1158 btrfs_set_stack_device_io_width(dev_item
, dev
->io_width
);
1159 btrfs_set_stack_device_sector_size(dev_item
, dev
->sector_size
);
1160 memcpy(dev_item
->uuid
, dev
->uuid
, BTRFS_UUID_SIZE
);
1161 memcpy(dev_item
->fsid
, dev
->fs_devices
->fsid
, BTRFS_UUID_SIZE
);
1163 ret
= btrfs_insert_item(trans
, root
, &key
,
1164 dev_item
, sizeof(*dev_item
));
1171 static int __rebuild_chunk_items(struct btrfs_trans_handle
*trans
,
1172 struct recover_control
*rc
,
1173 struct btrfs_root
*root
)
1175 struct btrfs_key key
;
1176 struct btrfs_chunk
*chunk
= NULL
;
1177 struct btrfs_root
*chunk_root
;
1178 struct chunk_record
*chunk_rec
;
1181 chunk_root
= root
->fs_info
->chunk_root
;
1183 list_for_each_entry(chunk_rec
, &rc
->good_chunks
, list
) {
1184 chunk
= create_chunk_item(chunk_rec
);
1188 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1189 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1190 key
.offset
= chunk_rec
->offset
;
1192 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
,
1193 btrfs_chunk_item_size(chunk
->num_stripes
));
1201 static int rebuild_chunk_tree(struct btrfs_trans_handle
*trans
,
1202 struct recover_control
*rc
,
1203 struct btrfs_root
*root
)
1207 root
= root
->fs_info
->chunk_root
;
1209 ret
= __rebuild_chunk_root(trans
, rc
, root
);
1213 ret
= __rebuild_device_items(trans
, rc
, root
);
1217 ret
= __rebuild_chunk_items(trans
, rc
, root
);
1222 static int rebuild_sys_array(struct recover_control
*rc
,
1223 struct btrfs_root
*root
)
1225 struct btrfs_chunk
*chunk
;
1226 struct btrfs_key key
;
1227 struct chunk_record
*chunk_rec
;
1231 btrfs_set_super_sys_array_size(root
->fs_info
->super_copy
, 0);
1233 list_for_each_entry(chunk_rec
, &rc
->good_chunks
, list
) {
1234 if (!(chunk_rec
->type_flags
& BTRFS_BLOCK_GROUP_SYSTEM
))
1237 num_stripes
= chunk_rec
->num_stripes
;
1238 chunk
= create_chunk_item(chunk_rec
);
1244 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1245 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1246 key
.offset
= chunk_rec
->offset
;
1248 ret
= btrfs_add_system_chunk(NULL
, root
, &key
, chunk
,
1249 btrfs_chunk_item_size(num_stripes
));
1258 static struct btrfs_root
*
1259 open_ctree_with_broken_chunk(struct recover_control
*rc
)
1261 struct btrfs_fs_info
*fs_info
;
1262 struct btrfs_super_block
*disk_super
;
1263 struct extent_buffer
*eb
;
1270 fs_info
= btrfs_new_fs_info(1, BTRFS_SUPER_INFO_OFFSET
);
1272 fprintf(stderr
, "Failed to allocate memory for fs_info\n");
1273 return ERR_PTR(-ENOMEM
);
1275 fs_info
->is_chunk_recover
= 1;
1277 fs_info
->fs_devices
= rc
->fs_devices
;
1278 ret
= btrfs_open_devices(fs_info
->fs_devices
, O_RDWR
);
1282 disk_super
= fs_info
->super_copy
;
1283 ret
= btrfs_read_dev_super(fs_info
->fs_devices
->latest_bdev
,
1284 disk_super
, fs_info
->super_bytenr
, 1);
1286 fprintf(stderr
, "No valid btrfs found\n");
1290 memcpy(fs_info
->fsid
, &disk_super
->fsid
, BTRFS_FSID_SIZE
);
1292 ret
= btrfs_check_fs_compatibility(disk_super
, 1);
1296 nodesize
= btrfs_super_nodesize(disk_super
);
1297 leafsize
= btrfs_super_leafsize(disk_super
);
1298 sectorsize
= btrfs_super_sectorsize(disk_super
);
1299 stripesize
= btrfs_super_stripesize(disk_super
);
1301 __setup_root(nodesize
, leafsize
, sectorsize
, stripesize
,
1302 fs_info
->chunk_root
, fs_info
, BTRFS_CHUNK_TREE_OBJECTID
);
1304 ret
= build_device_maps_by_chunk_records(rc
, fs_info
->chunk_root
);
1308 ret
= btrfs_setup_all_roots(fs_info
, 0, 0);
1312 eb
= fs_info
->tree_root
->node
;
1313 read_extent_buffer(eb
, fs_info
->chunk_tree_uuid
,
1314 btrfs_header_chunk_tree_uuid(eb
),
1317 return fs_info
->fs_root
;
1319 btrfs_release_all_roots(fs_info
);
1321 btrfs_cleanup_all_caches(fs_info
);
1323 btrfs_close_devices(fs_info
->fs_devices
);
1325 btrfs_free_fs_info(fs_info
);
1326 return ERR_PTR(ret
);
1329 static int recover_prepare(struct recover_control
*rc
, char *path
)
1333 struct btrfs_super_block
*sb
;
1334 struct btrfs_fs_devices
*fs_devices
;
1337 fd
= open(path
, O_RDONLY
);
1339 fprintf(stderr
, "open %s\n error.\n", path
);
1343 sb
= malloc(BTRFS_SUPER_INFO_SIZE
);
1345 fprintf(stderr
, "allocating memory for sb failed.\n");
1350 ret
= btrfs_read_dev_super(fd
, sb
, BTRFS_SUPER_INFO_OFFSET
, 1);
1352 fprintf(stderr
, "read super block error\n");
1356 rc
->sectorsize
= btrfs_super_sectorsize(sb
);
1357 rc
->leafsize
= btrfs_super_leafsize(sb
);
1358 rc
->generation
= btrfs_super_generation(sb
);
1359 rc
->chunk_root_generation
= btrfs_super_chunk_root_generation(sb
);
1360 rc
->csum_size
= btrfs_super_csum_size(sb
);
1362 /* if seed, the result of scanning below will be partial */
1363 if (btrfs_super_flags(sb
) & BTRFS_SUPER_FLAG_SEEDING
) {
1364 fprintf(stderr
, "this device is seed device\n");
1369 ret
= btrfs_scan_fs_devices(fd
, path
, &fs_devices
, 0, 1, 1);
1373 rc
->fs_devices
= fs_devices
;
1376 print_all_devices(&rc
->fs_devices
->devices
);
1385 static int btrfs_get_device_extents(u64 chunk_object
,
1386 struct list_head
*orphan_devexts
,
1387 struct list_head
*ret_list
)
1389 struct device_extent_record
*devext
;
1390 struct device_extent_record
*next
;
1393 list_for_each_entry_safe(devext
, next
, orphan_devexts
, chunk_list
) {
1394 if (devext
->chunk_offset
== chunk_object
) {
1395 list_move_tail(&devext
->chunk_list
, ret_list
);
1402 static int calc_num_stripes(u64 type
)
1404 if (type
& (BTRFS_BLOCK_GROUP_RAID0
|
1405 BTRFS_BLOCK_GROUP_RAID10
|
1406 BTRFS_BLOCK_GROUP_RAID5
|
1407 BTRFS_BLOCK_GROUP_RAID6
))
1409 else if (type
& (BTRFS_BLOCK_GROUP_RAID1
|
1410 BTRFS_BLOCK_GROUP_DUP
))
1416 static inline int calc_sub_nstripes(u64 type
)
1418 if (type
& BTRFS_BLOCK_GROUP_RAID10
)
1424 static int btrfs_verify_device_extents(struct block_group_record
*bg
,
1425 struct list_head
*devexts
, int ndevexts
)
1427 struct device_extent_record
*devext
;
1429 int expected_num_stripes
;
1431 expected_num_stripes
= calc_num_stripes(bg
->flags
);
1432 if (expected_num_stripes
&& expected_num_stripes
!= ndevexts
)
1435 strpie_length
= calc_stripe_length(bg
->flags
, bg
->offset
, ndevexts
);
1436 list_for_each_entry(devext
, devexts
, chunk_list
) {
1437 if (devext
->length
!= strpie_length
)
1443 static int btrfs_rebuild_unordered_chunk_stripes(struct recover_control
*rc
,
1444 struct chunk_record
*chunk
)
1446 struct device_extent_record
*devext
;
1447 struct btrfs_device
*device
;
1450 devext
= list_first_entry(&chunk
->dextents
, struct device_extent_record
,
1452 for (i
= 0; i
< chunk
->num_stripes
; i
++) {
1453 chunk
->stripes
[i
].devid
= devext
->objectid
;
1454 chunk
->stripes
[i
].offset
= devext
->offset
;
1455 device
= btrfs_find_device_by_devid(rc
->fs_devices
,
1460 BUG_ON(btrfs_find_device_by_devid(rc
->fs_devices
,
1463 memcpy(chunk
->stripes
[i
].dev_uuid
, device
->uuid
,
1465 devext
= list_next_entry(devext
, chunk_list
);
1470 static int btrfs_calc_stripe_index(struct chunk_record
*chunk
, u64 logical
)
1472 u64 offset
= logical
- chunk
->offset
;
1474 int nr_data_stripes
;
1477 stripe_nr
= offset
/ chunk
->stripe_len
;
1478 if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID0
) {
1479 index
= stripe_nr
% chunk
->num_stripes
;
1480 } else if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID10
) {
1481 index
= stripe_nr
% (chunk
->num_stripes
/ chunk
->sub_stripes
);
1482 index
*= chunk
->sub_stripes
;
1483 } else if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID5
) {
1484 nr_data_stripes
= chunk
->num_stripes
- 1;
1485 index
= stripe_nr
% nr_data_stripes
;
1486 stripe_nr
/= nr_data_stripes
;
1487 index
= (index
+ stripe_nr
) % chunk
->num_stripes
;
1488 } else if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID6
) {
1489 nr_data_stripes
= chunk
->num_stripes
- 2;
1490 index
= stripe_nr
% nr_data_stripes
;
1491 stripe_nr
/= nr_data_stripes
;
1492 index
= (index
+ stripe_nr
) % chunk
->num_stripes
;
1499 /* calc the logical offset which is the start of the next stripe. */
1500 static inline u64
btrfs_next_stripe_logical_offset(struct chunk_record
*chunk
,
1503 u64 offset
= logical
- chunk
->offset
;
1505 offset
/= chunk
->stripe_len
;
1506 offset
*= chunk
->stripe_len
;
1507 offset
+= chunk
->stripe_len
;
1509 return offset
+ chunk
->offset
;
1512 static int is_extent_record_in_device_extent(struct extent_record
*er
,
1513 struct device_extent_record
*dext
,
1518 for (i
= 0; i
< er
->nmirrors
; i
++) {
1519 if (er
->devices
[i
]->devid
== dext
->objectid
&&
1520 er
->offsets
[i
] >= dext
->offset
&&
1521 er
->offsets
[i
] < dext
->offset
+ dext
->length
) {
1530 btrfs_rebuild_ordered_meta_chunk_stripes(struct recover_control
*rc
,
1531 struct chunk_record
*chunk
)
1533 u64 start
= chunk
->offset
;
1534 u64 end
= chunk
->offset
+ chunk
->length
;
1535 struct cache_extent
*cache
;
1536 struct extent_record
*er
;
1537 struct device_extent_record
*devext
;
1538 struct device_extent_record
*next
;
1539 struct btrfs_device
*device
;
1545 cache
= lookup_cache_extent(&rc
->eb_cache
,
1546 start
, chunk
->length
);
1548 /* No used space, we can reorder the stripes freely. */
1549 ret
= btrfs_rebuild_unordered_chunk_stripes(rc
, chunk
);
1553 list_splice_init(&chunk
->dextents
, &devexts
);
1555 er
= container_of(cache
, struct extent_record
, cache
);
1556 index
= btrfs_calc_stripe_index(chunk
, er
->cache
.start
);
1557 BUG_ON(index
== -1);
1558 if (chunk
->stripes
[index
].devid
)
1560 list_for_each_entry_safe(devext
, next
, &devexts
, chunk_list
) {
1561 if (is_extent_record_in_device_extent(er
, devext
, &mirror
)) {
1562 chunk
->stripes
[index
].devid
= devext
->objectid
;
1563 chunk
->stripes
[index
].offset
= devext
->offset
;
1564 memcpy(chunk
->stripes
[index
].dev_uuid
,
1565 er
->devices
[mirror
]->uuid
,
1568 list_move(&devext
->chunk_list
, &chunk
->dextents
);
1572 start
= btrfs_next_stripe_logical_offset(chunk
, er
->cache
.start
);
1574 goto no_extent_record
;
1576 cache
= lookup_cache_extent(&rc
->eb_cache
, start
, end
- start
);
1580 if (list_empty(&devexts
))
1583 if (chunk
->type_flags
& (BTRFS_BLOCK_GROUP_RAID5
|
1584 BTRFS_BLOCK_GROUP_RAID6
)) {
1585 /* Fixme: try to recover the order by the parity block. */
1586 list_splice_tail(&devexts
, &chunk
->dextents
);
1590 /* There is no data on the lost stripes, we can reorder them freely. */
1591 for (index
= 0; index
< chunk
->num_stripes
; index
++) {
1592 if (chunk
->stripes
[index
].devid
)
1595 devext
= list_first_entry(&devexts
,
1596 struct device_extent_record
,
1598 list_move(&devext
->chunk_list
, &chunk
->dextents
);
1600 chunk
->stripes
[index
].devid
= devext
->objectid
;
1601 chunk
->stripes
[index
].offset
= devext
->offset
;
1602 device
= btrfs_find_device_by_devid(rc
->fs_devices
,
1606 list_splice_tail(&devexts
, &chunk
->dextents
);
1609 BUG_ON(btrfs_find_device_by_devid(rc
->fs_devices
,
1612 memcpy(chunk
->stripes
[index
].dev_uuid
, device
->uuid
,
1618 #define BTRFS_ORDERED_RAID (BTRFS_BLOCK_GROUP_RAID0 | \
1619 BTRFS_BLOCK_GROUP_RAID10 | \
1620 BTRFS_BLOCK_GROUP_RAID5 | \
1621 BTRFS_BLOCK_GROUP_RAID6)
1623 static int btrfs_rebuild_chunk_stripes(struct recover_control
*rc
,
1624 struct chunk_record
*chunk
)
1629 * All the data in the system metadata chunk will be dropped,
1630 * so we need not guarantee that the data is right or not, that
1631 * is we can reorder the stripes in the system metadata chunk.
1633 if ((chunk
->type_flags
& BTRFS_BLOCK_GROUP_METADATA
) &&
1634 (chunk
->type_flags
& BTRFS_ORDERED_RAID
))
1635 ret
=btrfs_rebuild_ordered_meta_chunk_stripes(rc
, chunk
);
1636 else if ((chunk
->type_flags
& BTRFS_BLOCK_GROUP_DATA
) &&
1637 (chunk
->type_flags
& BTRFS_ORDERED_RAID
))
1638 ret
= 1; /* Be handled after the fs is opened. */
1640 ret
= btrfs_rebuild_unordered_chunk_stripes(rc
, chunk
);
1645 static int next_csum(struct btrfs_root
*root
,
1646 struct extent_buffer
**leaf
,
1647 struct btrfs_path
*path
,
1652 struct btrfs_key
*key
)
1655 struct btrfs_root
*csum_root
= root
->fs_info
->csum_root
;
1656 struct btrfs_csum_item
*csum_item
;
1657 u32 blocksize
= root
->sectorsize
;
1658 u16 csum_size
= btrfs_super_csum_size(root
->fs_info
->super_copy
);
1659 int csums_in_item
= btrfs_item_size_nr(*leaf
, *slot
) / csum_size
;
1661 if (*csum_offset
>= csums_in_item
) {
1664 if (*slot
>= btrfs_header_nritems(*leaf
)) {
1665 ret
= btrfs_next_leaf(csum_root
, path
);
1670 *leaf
= path
->nodes
[0];
1671 *slot
= path
->slots
[0];
1673 btrfs_item_key_to_cpu(*leaf
, key
, *slot
);
1676 if (key
->offset
+ (*csum_offset
) * blocksize
>= end
)
1678 csum_item
= btrfs_item_ptr(*leaf
, *slot
, struct btrfs_csum_item
);
1679 csum_item
= (struct btrfs_csum_item
*)((unsigned char *)csum_item
1680 + (*csum_offset
) * csum_size
);
1681 read_extent_buffer(*leaf
, tree_csum
,
1682 (unsigned long)csum_item
, csum_size
);
1686 static u64
calc_data_offset(struct btrfs_key
*key
,
1687 struct chunk_record
*chunk
,
1693 int logical_stripe_nr
;
1695 int nr_data_stripes
;
1697 data_offset
= key
->offset
+ csum_offset
* blocksize
- chunk
->offset
;
1698 nr_data_stripes
= chunk
->num_stripes
;
1700 if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID5
)
1701 nr_data_stripes
-= 1;
1702 else if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID6
)
1703 nr_data_stripes
-= 2;
1705 logical_stripe_nr
= data_offset
/ chunk
->stripe_len
;
1706 dev_stripe_nr
= logical_stripe_nr
/ nr_data_stripes
;
1708 data_offset
-= logical_stripe_nr
* chunk
->stripe_len
;
1709 data_offset
+= dev_stripe_nr
* chunk
->stripe_len
;
1711 return dev_offset
+ data_offset
;
1714 static int check_one_csum(int fd
, u64 start
, u32 len
, u32 tree_csum
)
1718 u32 csum_result
= ~(u32
)0;
1723 ret
= pread64(fd
, data
, len
, start
);
1724 if (ret
< 0 || ret
!= len
) {
1729 csum_result
= btrfs_csum_data(NULL
, data
, csum_result
, len
);
1730 btrfs_csum_final(csum_result
, (char *)&csum_result
);
1731 if (csum_result
!= tree_csum
)
1738 static u64
item_end_offset(struct btrfs_root
*root
, struct btrfs_key
*key
,
1739 struct extent_buffer
*leaf
, int slot
) {
1740 u32 blocksize
= root
->sectorsize
;
1741 u16 csum_size
= btrfs_super_csum_size(root
->fs_info
->super_copy
);
1743 u64 offset
= btrfs_item_size_nr(leaf
, slot
);
1744 offset
/= csum_size
;
1745 offset
*= blocksize
;
1746 offset
+= key
->offset
;
1751 static int insert_stripe(struct list_head
*devexts
,
1752 struct recover_control
*rc
,
1753 struct chunk_record
*chunk
,
1755 struct device_extent_record
*devext
;
1756 struct btrfs_device
*dev
;
1758 devext
= list_entry(devexts
->next
, struct device_extent_record
,
1760 dev
= btrfs_find_device_by_devid(rc
->fs_devices
, devext
->objectid
,
1764 BUG_ON(btrfs_find_device_by_devid(rc
->fs_devices
, devext
->objectid
,
1767 chunk
->stripes
[index
].devid
= devext
->objectid
;
1768 chunk
->stripes
[index
].offset
= devext
->offset
;
1769 memcpy(chunk
->stripes
[index
].dev_uuid
, dev
->uuid
, BTRFS_UUID_SIZE
);
1771 list_move(&devext
->chunk_list
, &chunk
->dextents
);
1776 static inline int count_devext_records(struct list_head
*record_list
)
1778 int num_of_records
= 0;
1779 struct device_extent_record
*devext
;
1781 list_for_each_entry(devext
, record_list
, chunk_list
)
1784 return num_of_records
;
1787 static int fill_chunk_up(struct chunk_record
*chunk
, struct list_head
*devexts
,
1788 struct recover_control
*rc
)
1793 for (i
= 0; i
< chunk
->num_stripes
; i
++) {
1794 if (!chunk
->stripes
[i
].devid
) {
1795 ret
= insert_stripe(devexts
, rc
, chunk
, i
);
1804 #define EQUAL_STRIPE (1 << 0)
1806 static int rebuild_raid_data_chunk_stripes(struct recover_control
*rc
,
1807 struct btrfs_root
*root
,
1808 struct chunk_record
*chunk
,
1814 struct btrfs_path path
;
1815 struct btrfs_key prev_key
;
1816 struct btrfs_key key
;
1817 struct btrfs_root
*csum_root
;
1818 struct extent_buffer
*leaf
;
1819 struct device_extent_record
*devext
;
1820 struct device_extent_record
*next
;
1821 struct btrfs_device
*dev
;
1822 u64 start
= chunk
->offset
;
1823 u64 end
= start
+ chunk
->stripe_len
;
1824 u64 chunk_end
= chunk
->offset
+ chunk
->length
;
1825 u64 csum_offset
= 0;
1827 u32 blocksize
= root
->sectorsize
;
1830 int num_unordered
= 0;
1831 LIST_HEAD(unordered
);
1832 LIST_HEAD(candidates
);
1834 csum_root
= root
->fs_info
->csum_root
;
1835 btrfs_init_path(&path
);
1836 list_splice_init(&chunk
->dextents
, &candidates
);
1838 if (list_is_last(candidates
.next
, &candidates
))
1841 key
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
1842 key
.type
= BTRFS_EXTENT_CSUM_KEY
;
1845 ret
= btrfs_search_slot(NULL
, csum_root
, &key
, &path
, 0, 0);
1847 fprintf(stderr
, "Search csum failed(%d)\n", ret
);
1850 leaf
= path
.nodes
[0];
1851 slot
= path
.slots
[0];
1853 if (slot
>= btrfs_header_nritems(leaf
)) {
1854 ret
= btrfs_next_leaf(csum_root
, &path
);
1857 "Walk tree failed(%d)\n", ret
);
1859 } else if (ret
> 0) {
1860 slot
= btrfs_header_nritems(leaf
) - 1;
1861 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
1862 if (item_end_offset(root
, &key
, leaf
, slot
)
1864 csum_offset
= start
- key
.offset
;
1865 csum_offset
/= blocksize
;
1870 leaf
= path
.nodes
[0];
1871 slot
= path
.slots
[0];
1873 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
1874 ret
= btrfs_previous_item(csum_root
, &path
, 0,
1875 BTRFS_EXTENT_CSUM_KEY
);
1879 if (key
.offset
>= end
)
1884 leaf
= path
.nodes
[0];
1885 slot
= path
.slots
[0];
1887 btrfs_item_key_to_cpu(leaf
, &prev_key
, slot
);
1888 if (item_end_offset(root
, &prev_key
, leaf
, slot
) > start
) {
1889 csum_offset
= start
- prev_key
.offset
;
1890 csum_offset
/= blocksize
;
1891 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
1893 if (key
.offset
>= end
)
1897 if (key
.offset
+ csum_offset
* blocksize
> chunk_end
)
1901 ret
= next_csum(root
, &leaf
, &path
, &slot
, &csum_offset
, &tree_csum
,
1904 fprintf(stderr
, "Fetch csum failed\n");
1906 } else if (ret
== 1) {
1907 if (!(*flags
& EQUAL_STRIPE
))
1908 *flags
|= EQUAL_STRIPE
;
1910 } else if (ret
== 2)
1913 list_for_each_entry_safe(devext
, next
, &candidates
, chunk_list
) {
1914 data_offset
= calc_data_offset(&key
, chunk
, devext
->offset
,
1915 csum_offset
, blocksize
);
1916 dev
= btrfs_find_device_by_devid(rc
->fs_devices
,
1917 devext
->objectid
, 0);
1922 BUG_ON(btrfs_find_device_by_devid(rc
->fs_devices
,
1923 devext
->objectid
, 1));
1925 ret
= check_one_csum(dev
->fd
, data_offset
, blocksize
,
1930 list_move(&devext
->chunk_list
, &unordered
);
1933 if (list_empty(&candidates
)) {
1934 num_unordered
= count_devext_records(&unordered
);
1935 if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID6
1936 && num_unordered
== 2) {
1937 btrfs_release_path(&path
);
1938 ret
= fill_chunk_up(chunk
, &unordered
, rc
);
1945 if (list_is_last(candidates
.next
, &candidates
)) {
1946 index
= btrfs_calc_stripe_index(chunk
,
1947 key
.offset
+ csum_offset
* blocksize
);
1948 BUG_ON(index
== -1);
1949 if (chunk
->stripes
[index
].devid
)
1951 ret
= insert_stripe(&candidates
, rc
, chunk
, index
);
1959 start
= btrfs_next_stripe_logical_offset(chunk
, start
);
1960 end
= min(start
+ chunk
->stripe_len
, chunk_end
);
1961 list_splice_init(&unordered
, &candidates
);
1962 btrfs_release_path(&path
);
1964 if (end
< chunk_end
)
1968 list_splice_init(&candidates
, &unordered
);
1969 num_unordered
= count_devext_records(&unordered
);
1970 if (num_unordered
== 1) {
1971 for (i
= 0; i
< chunk
->num_stripes
; i
++) {
1972 if (!chunk
->stripes
[i
].devid
) {
1977 ret
= insert_stripe(&unordered
, rc
, chunk
, index
);
1981 if ((num_unordered
== 2 && chunk
->type_flags
1982 & BTRFS_BLOCK_GROUP_RAID5
)
1983 || (num_unordered
== 3 && chunk
->type_flags
1984 & BTRFS_BLOCK_GROUP_RAID6
)) {
1985 ret
= fill_chunk_up(chunk
, &unordered
, rc
);
1989 ret
= !!ret
|| (list_empty(&unordered
) ? 0 : 1);
1990 list_splice_init(&candidates
, &chunk
->dextents
);
1991 list_splice_init(&unordered
, &chunk
->dextents
);
1992 btrfs_release_path(&path
);
1997 static int btrfs_rebuild_ordered_data_chunk_stripes(struct recover_control
*rc
,
1998 struct btrfs_root
*root
)
2000 struct chunk_record
*chunk
;
2001 struct chunk_record
*next
;
2006 list_for_each_entry_safe(chunk
, next
, &rc
->unrepaired_chunks
, list
) {
2007 if ((chunk
->type_flags
& BTRFS_BLOCK_GROUP_DATA
)
2008 && (chunk
->type_flags
& BTRFS_ORDERED_RAID
)) {
2010 err
= rebuild_raid_data_chunk_stripes(rc
, root
, chunk
,
2013 list_move(&chunk
->list
, &rc
->bad_chunks
);
2014 if (flags
& EQUAL_STRIPE
)
2016 "Failure: too many equal stripes in chunk[%llu %llu]\n",
2017 chunk
->offset
, chunk
->length
);
2021 list_move(&chunk
->list
, &rc
->good_chunks
);
2027 static int btrfs_recover_chunks(struct recover_control
*rc
)
2029 struct chunk_record
*chunk
;
2030 struct block_group_record
*bg
;
2031 struct block_group_record
*next
;
2032 LIST_HEAD(new_chunks
);
2037 /* create the chunk by block group */
2038 list_for_each_entry_safe(bg
, next
, &rc
->bg
.block_groups
, list
) {
2039 nstripes
= btrfs_get_device_extents(bg
->objectid
,
2040 &rc
->devext
.no_chunk_orphans
,
2042 chunk
= malloc(btrfs_chunk_record_size(nstripes
));
2045 memset(chunk
, 0, btrfs_chunk_record_size(nstripes
));
2046 INIT_LIST_HEAD(&chunk
->dextents
);
2048 chunk
->cache
.start
= bg
->objectid
;
2049 chunk
->cache
.size
= bg
->offset
;
2050 chunk
->objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
2051 chunk
->type
= BTRFS_CHUNK_ITEM_KEY
;
2052 chunk
->offset
= bg
->objectid
;
2053 chunk
->generation
= bg
->generation
;
2054 chunk
->length
= bg
->offset
;
2055 chunk
->owner
= BTRFS_CHUNK_TREE_OBJECTID
;
2056 chunk
->stripe_len
= BTRFS_STRIPE_LEN
;
2057 chunk
->type_flags
= bg
->flags
;
2058 chunk
->io_width
= BTRFS_STRIPE_LEN
;
2059 chunk
->io_align
= BTRFS_STRIPE_LEN
;
2060 chunk
->sector_size
= rc
->sectorsize
;
2061 chunk
->sub_stripes
= calc_sub_nstripes(bg
->flags
);
2063 ret
= insert_cache_extent(&rc
->chunk
, &chunk
->cache
);
2067 list_add_tail(&chunk
->list
, &rc
->bad_chunks
);
2071 list_splice_init(&devexts
, &chunk
->dextents
);
2073 ret
= btrfs_verify_device_extents(bg
, &devexts
, nstripes
);
2075 list_add_tail(&chunk
->list
, &rc
->bad_chunks
);
2079 chunk
->num_stripes
= nstripes
;
2080 ret
= btrfs_rebuild_chunk_stripes(rc
, chunk
);
2082 list_add_tail(&chunk
->list
, &rc
->unrepaired_chunks
);
2084 list_add_tail(&chunk
->list
, &rc
->bad_chunks
);
2086 list_add_tail(&chunk
->list
, &rc
->good_chunks
);
2089 * Don't worry about the lost orphan device extents, they don't
2090 * have its chunk and block group, they must be the old ones that
2097 * Return 0 when succesful, < 0 on error and > 0 if aborted by user
2099 int btrfs_recover_chunk_tree(char *path
, int verbose
, int yes
)
2102 struct btrfs_root
*root
= NULL
;
2103 struct btrfs_trans_handle
*trans
;
2104 struct recover_control rc
;
2106 init_recover_control(&rc
, verbose
, yes
);
2108 ret
= recover_prepare(&rc
, path
);
2110 fprintf(stderr
, "recover prepare error\n");
2114 ret
= scan_devices(&rc
);
2116 fprintf(stderr
, "scan chunk headers error\n");
2120 if (cache_tree_empty(&rc
.chunk
) &&
2121 cache_tree_empty(&rc
.bg
.tree
) &&
2122 cache_tree_empty(&rc
.devext
.tree
)) {
2123 fprintf(stderr
, "no recoverable chunk\n");
2127 print_scan_result(&rc
);
2129 ret
= check_chunks(&rc
.chunk
, &rc
.bg
, &rc
.devext
, &rc
.good_chunks
,
2131 print_check_result(&rc
);
2133 if (!list_empty(&rc
.bg
.block_groups
) ||
2134 !list_empty(&rc
.devext
.no_chunk_orphans
)) {
2135 ret
= btrfs_recover_chunks(&rc
);
2140 * If the chunk is healthy, its block group item and device
2141 * extent item should be written on the disks. So, it is very
2142 * likely that the bad chunk is a old one that has been
2143 * droppped from the fs. Don't deal with them now, we will
2144 * check it after the fs is opened.
2147 fprintf(stderr
, "Check chunks successfully with no orphans\n");
2151 root
= open_ctree_with_broken_chunk(&rc
);
2153 fprintf(stderr
, "open with broken chunk error\n");
2154 ret
= PTR_ERR(root
);
2158 ret
= check_all_chunks_by_metadata(&rc
, root
);
2160 fprintf(stderr
, "The chunks in memory can not match the metadata of the fs. Repair failed.\n");
2161 goto fail_close_ctree
;
2164 ret
= btrfs_rebuild_ordered_data_chunk_stripes(&rc
, root
);
2166 fprintf(stderr
, "Failed to rebuild ordered chunk stripes.\n");
2167 goto fail_close_ctree
;
2171 ret
= ask_user("We are going to rebuild the chunk tree on disk, it might destroy the old metadata on the disk, Are you sure?");
2174 goto fail_close_ctree
;
2178 trans
= btrfs_start_transaction(root
, 1);
2179 ret
= remove_chunk_extent_item(trans
, &rc
, root
);
2182 ret
= rebuild_chunk_tree(trans
, &rc
, root
);
2185 ret
= rebuild_sys_array(&rc
, root
);
2188 btrfs_commit_transaction(trans
, root
);
2192 free_recover_control(&rc
);