2 * Copyright (C) 2013 FUJITSU LIMITED. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <stdio_ext.h>
22 #include <sys/types.h>
26 #include <uuid/uuid.h>
29 #include "kerncompat.h"
31 #include "radix-tree.h"
33 #include "extent-cache.h"
36 #include "transaction.h"
42 struct recover_control
{
50 u64 chunk_root_generation
;
52 struct btrfs_fs_devices
*fs_devices
;
54 struct cache_tree chunk
;
55 struct block_group_tree bg
;
56 struct device_extent_tree devext
;
57 struct cache_tree eb_cache
;
59 struct list_head good_chunks
;
60 struct list_head bad_chunks
;
61 struct list_head rebuild_chunks
;
62 struct list_head unrepaired_chunks
;
63 pthread_mutex_t rc_lock
;
66 struct extent_record
{
67 struct cache_extent cache
;
69 u8 csum
[BTRFS_CSUM_SIZE
];
70 struct btrfs_device
*devices
[BTRFS_MAX_MIRRORS
];
71 u64 offsets
[BTRFS_MAX_MIRRORS
];
76 struct recover_control
*rc
;
77 struct btrfs_device
*dev
;
81 static struct extent_record
*btrfs_new_extent_record(struct extent_buffer
*eb
)
83 struct extent_record
*rec
;
85 rec
= malloc(sizeof(*rec
));
87 fprintf(stderr
, "Fail to allocate memory for extent record.\n");
91 memset(rec
, 0, sizeof(*rec
));
92 rec
->cache
.start
= btrfs_header_bytenr(eb
);
93 rec
->cache
.size
= eb
->len
;
94 rec
->generation
= btrfs_header_generation(eb
);
95 read_extent_buffer(eb
, rec
->csum
, (unsigned long)btrfs_header_csum(eb
),
100 static int process_extent_buffer(struct cache_tree
*eb_cache
,
101 struct extent_buffer
*eb
,
102 struct btrfs_device
*device
, u64 offset
)
104 struct extent_record
*rec
;
105 struct extent_record
*exist
;
106 struct cache_extent
*cache
;
109 rec
= btrfs_new_extent_record(eb
);
110 if (!rec
->cache
.size
)
113 cache
= lookup_cache_extent(eb_cache
,
117 exist
= container_of(cache
, struct extent_record
, cache
);
119 if (exist
->generation
> rec
->generation
)
121 if (exist
->generation
== rec
->generation
) {
122 if (exist
->cache
.start
!= rec
->cache
.start
||
123 exist
->cache
.size
!= rec
->cache
.size
||
124 memcmp(exist
->csum
, rec
->csum
, BTRFS_CSUM_SIZE
)) {
127 BUG_ON(exist
->nmirrors
>= BTRFS_MAX_MIRRORS
);
128 exist
->devices
[exist
->nmirrors
] = device
;
129 exist
->offsets
[exist
->nmirrors
] = offset
;
134 remove_cache_extent(eb_cache
, cache
);
139 rec
->devices
[0] = device
;
140 rec
->offsets
[0] = offset
;
142 ret
= insert_cache_extent(eb_cache
, &rec
->cache
);
151 static void free_extent_record(struct cache_extent
*cache
)
153 struct extent_record
*er
;
155 er
= container_of(cache
, struct extent_record
, cache
);
159 FREE_EXTENT_CACHE_BASED_TREE(extent_record
, free_extent_record
);
161 static struct btrfs_chunk
*create_chunk_item(struct chunk_record
*record
)
163 struct btrfs_chunk
*ret
;
164 struct btrfs_stripe
*chunk_stripe
;
167 if (!record
|| record
->num_stripes
== 0)
169 ret
= malloc(btrfs_chunk_item_size(record
->num_stripes
));
172 btrfs_set_stack_chunk_length(ret
, record
->length
);
173 btrfs_set_stack_chunk_owner(ret
, record
->owner
);
174 btrfs_set_stack_chunk_stripe_len(ret
, record
->stripe_len
);
175 btrfs_set_stack_chunk_type(ret
, record
->type_flags
);
176 btrfs_set_stack_chunk_io_align(ret
, record
->io_align
);
177 btrfs_set_stack_chunk_io_width(ret
, record
->io_width
);
178 btrfs_set_stack_chunk_sector_size(ret
, record
->sector_size
);
179 btrfs_set_stack_chunk_num_stripes(ret
, record
->num_stripes
);
180 btrfs_set_stack_chunk_sub_stripes(ret
, record
->sub_stripes
);
181 for (i
= 0, chunk_stripe
= &ret
->stripe
; i
< record
->num_stripes
;
182 i
++, chunk_stripe
++) {
183 btrfs_set_stack_stripe_devid(chunk_stripe
,
184 record
->stripes
[i
].devid
);
185 btrfs_set_stack_stripe_offset(chunk_stripe
,
186 record
->stripes
[i
].offset
);
187 memcpy(chunk_stripe
->dev_uuid
, record
->stripes
[i
].dev_uuid
,
193 static void init_recover_control(struct recover_control
*rc
, int verbose
,
196 memset(rc
, 0, sizeof(struct recover_control
));
197 cache_tree_init(&rc
->chunk
);
198 cache_tree_init(&rc
->eb_cache
);
199 block_group_tree_init(&rc
->bg
);
200 device_extent_tree_init(&rc
->devext
);
202 INIT_LIST_HEAD(&rc
->good_chunks
);
203 INIT_LIST_HEAD(&rc
->bad_chunks
);
204 INIT_LIST_HEAD(&rc
->rebuild_chunks
);
205 INIT_LIST_HEAD(&rc
->unrepaired_chunks
);
207 rc
->verbose
= verbose
;
209 pthread_mutex_init(&rc
->rc_lock
, NULL
);
212 static void free_recover_control(struct recover_control
*rc
)
214 free_block_group_tree(&rc
->bg
);
215 free_chunk_cache_tree(&rc
->chunk
);
216 free_device_extent_tree(&rc
->devext
);
217 free_extent_record_tree(&rc
->eb_cache
);
218 pthread_mutex_destroy(&rc
->rc_lock
);
221 static int process_block_group_item(struct block_group_tree
*bg_cache
,
222 struct extent_buffer
*leaf
,
223 struct btrfs_key
*key
, int slot
)
225 struct block_group_record
*rec
;
226 struct block_group_record
*exist
;
227 struct cache_extent
*cache
;
230 rec
= btrfs_new_block_group_record(leaf
, key
, slot
);
231 if (!rec
->cache
.size
)
234 cache
= lookup_cache_extent(&bg_cache
->tree
,
238 exist
= container_of(cache
, struct block_group_record
, cache
);
240 /*check the generation and replace if needed*/
241 if (exist
->generation
> rec
->generation
)
243 if (exist
->generation
== rec
->generation
) {
244 int offset
= offsetof(struct block_group_record
,
247 * According to the current kernel code, the following
248 * case is impossble, or there is something wrong in
251 if (memcmp(((void *)exist
) + offset
,
252 ((void *)rec
) + offset
,
253 sizeof(*rec
) - offset
))
257 remove_cache_extent(&bg_cache
->tree
, cache
);
258 list_del_init(&exist
->list
);
261 * We must do seach again to avoid the following cache.
262 * /--old bg 1--//--old bg 2--/
268 ret
= insert_block_group_record(bg_cache
, rec
);
277 static int process_chunk_item(struct cache_tree
*chunk_cache
,
278 struct extent_buffer
*leaf
, struct btrfs_key
*key
,
281 struct chunk_record
*rec
;
282 struct chunk_record
*exist
;
283 struct cache_extent
*cache
;
286 rec
= btrfs_new_chunk_record(leaf
, key
, slot
);
287 if (!rec
->cache
.size
)
290 cache
= lookup_cache_extent(chunk_cache
, rec
->offset
, rec
->length
);
292 exist
= container_of(cache
, struct chunk_record
, cache
);
294 if (exist
->generation
> rec
->generation
)
296 if (exist
->generation
== rec
->generation
) {
297 int num_stripes
= rec
->num_stripes
;
298 int rec_size
= btrfs_chunk_record_size(num_stripes
);
299 int offset
= offsetof(struct chunk_record
, generation
);
301 if (exist
->num_stripes
!= rec
->num_stripes
||
302 memcmp(((void *)exist
) + offset
,
303 ((void *)rec
) + offset
,
308 remove_cache_extent(chunk_cache
, cache
);
312 ret
= insert_cache_extent(chunk_cache
, &rec
->cache
);
321 static int process_device_extent_item(struct device_extent_tree
*devext_cache
,
322 struct extent_buffer
*leaf
,
323 struct btrfs_key
*key
, int slot
)
325 struct device_extent_record
*rec
;
326 struct device_extent_record
*exist
;
327 struct cache_extent
*cache
;
330 rec
= btrfs_new_device_extent_record(leaf
, key
, slot
);
331 if (!rec
->cache
.size
)
334 cache
= lookup_cache_extent2(&devext_cache
->tree
,
339 exist
= container_of(cache
, struct device_extent_record
, cache
);
340 if (exist
->generation
> rec
->generation
)
342 if (exist
->generation
== rec
->generation
) {
343 int offset
= offsetof(struct device_extent_record
,
345 if (memcmp(((void *)exist
) + offset
,
346 ((void *)rec
) + offset
,
347 sizeof(*rec
) - offset
))
351 remove_cache_extent(&devext_cache
->tree
, cache
);
352 list_del_init(&exist
->chunk_list
);
353 list_del_init(&exist
->device_list
);
358 ret
= insert_device_extent_record(devext_cache
, rec
);
367 static void print_block_group_info(struct block_group_record
*rec
, char *prefix
)
370 printf("%s", prefix
);
371 printf("Block Group: start = %llu, len = %llu, flag = %llx\n",
372 rec
->objectid
, rec
->offset
, rec
->flags
);
375 static void print_block_group_tree(struct block_group_tree
*tree
)
377 struct cache_extent
*cache
;
378 struct block_group_record
*rec
;
380 printf("All Block Groups:\n");
381 for (cache
= first_cache_extent(&tree
->tree
); cache
;
382 cache
= next_cache_extent(cache
)) {
383 rec
= container_of(cache
, struct block_group_record
, cache
);
384 print_block_group_info(rec
, "\t");
389 static void print_stripe_info(struct stripe
*data
, char *prefix1
, char *prefix2
,
393 printf("%s", prefix1
);
395 printf("%s", prefix2
);
396 printf("[%2d] Stripe: devid = %llu, offset = %llu\n",
397 index
, data
->devid
, data
->offset
);
400 static void print_chunk_self_info(struct chunk_record
*rec
, char *prefix
)
405 printf("%s", prefix
);
406 printf("Chunk: start = %llu, len = %llu, type = %llx, num_stripes = %u\n",
407 rec
->offset
, rec
->length
, rec
->type_flags
, rec
->num_stripes
);
409 printf("%s", prefix
);
410 printf(" Stripes list:\n");
411 for (i
= 0; i
< rec
->num_stripes
; i
++)
412 print_stripe_info(&rec
->stripes
[i
], prefix
, " ", i
);
415 static void print_chunk_tree(struct cache_tree
*tree
)
417 struct cache_extent
*n
;
418 struct chunk_record
*entry
;
420 printf("All Chunks:\n");
421 for (n
= first_cache_extent(tree
); n
;
422 n
= next_cache_extent(n
)) {
423 entry
= container_of(n
, struct chunk_record
, cache
);
424 print_chunk_self_info(entry
, "\t");
429 static void print_device_extent_info(struct device_extent_record
*rec
,
433 printf("%s", prefix
);
434 printf("Device extent: devid = %llu, start = %llu, len = %llu, chunk offset = %llu\n",
435 rec
->objectid
, rec
->offset
, rec
->length
, rec
->chunk_offset
);
438 static void print_device_extent_tree(struct device_extent_tree
*tree
)
440 struct cache_extent
*n
;
441 struct device_extent_record
*entry
;
443 printf("All Device Extents:\n");
444 for (n
= first_cache_extent(&tree
->tree
); n
;
445 n
= next_cache_extent(n
)) {
446 entry
= container_of(n
, struct device_extent_record
, cache
);
447 print_device_extent_info(entry
, "\t");
452 static void print_device_info(struct btrfs_device
*device
, char *prefix
)
455 printf("%s", prefix
);
456 printf("Device: id = %llu, name = %s\n",
457 device
->devid
, device
->name
);
460 static void print_all_devices(struct list_head
*devices
)
462 struct btrfs_device
*dev
;
464 printf("All Devices:\n");
465 list_for_each_entry(dev
, devices
, dev_list
)
466 print_device_info(dev
, "\t");
470 static void print_scan_result(struct recover_control
*rc
)
475 printf("DEVICE SCAN RESULT:\n");
476 printf("Filesystem Information:\n");
477 printf("\tsectorsize: %d\n", rc
->sectorsize
);
478 printf("\tleafsize: %d\n", rc
->leafsize
);
479 printf("\ttree root generation: %llu\n", rc
->generation
);
480 printf("\tchunk root generation: %llu\n", rc
->chunk_root_generation
);
483 print_all_devices(&rc
->fs_devices
->devices
);
484 print_block_group_tree(&rc
->bg
);
485 print_chunk_tree(&rc
->chunk
);
486 print_device_extent_tree(&rc
->devext
);
489 static void print_chunk_info(struct chunk_record
*chunk
, char *prefix
)
491 struct device_extent_record
*devext
;
494 print_chunk_self_info(chunk
, prefix
);
496 printf("%s", prefix
);
498 print_block_group_info(chunk
->bg_rec
, " ");
500 printf(" No block group.\n");
502 printf("%s", prefix
);
503 if (list_empty(&chunk
->dextents
)) {
504 printf(" No device extent.\n");
506 printf(" Device extent list:\n");
508 list_for_each_entry(devext
, &chunk
->dextents
, chunk_list
) {
510 printf("%s", prefix
);
511 printf("%s[%2d]", " ", i
);
512 print_device_extent_info(devext
, NULL
);
518 static void print_check_result(struct recover_control
*rc
)
520 struct chunk_record
*chunk
;
521 struct block_group_record
*bg
;
522 struct device_extent_record
*devext
;
530 printf("CHECK RESULT:\n");
531 printf("Recoverable Chunks:\n");
532 list_for_each_entry(chunk
, &rc
->good_chunks
, list
) {
533 print_chunk_info(chunk
, " ");
537 list_for_each_entry(chunk
, &rc
->rebuild_chunks
, list
) {
538 print_chunk_info(chunk
, " ");
542 list_for_each_entry(chunk
, &rc
->unrepaired_chunks
, list
) {
543 print_chunk_info(chunk
, " ");
547 printf("Unrecoverable Chunks:\n");
548 list_for_each_entry(chunk
, &rc
->bad_chunks
, list
) {
549 print_chunk_info(chunk
, " ");
554 printf("Total Chunks:\t\t%d\n", total
);
555 printf(" Recoverable:\t\t%d\n", good
);
556 printf(" Unrecoverable:\t%d\n", bad
);
559 printf("Orphan Block Groups:\n");
560 list_for_each_entry(bg
, &rc
->bg
.block_groups
, list
)
561 print_block_group_info(bg
, " ");
564 printf("Orphan Device Extents:\n");
565 list_for_each_entry(devext
, &rc
->devext
.no_chunk_orphans
, chunk_list
)
566 print_device_extent_info(devext
, " ");
570 static int check_chunk_by_metadata(struct recover_control
*rc
,
571 struct btrfs_root
*root
,
572 struct chunk_record
*chunk
, int bg_only
)
577 struct btrfs_path path
;
578 struct btrfs_key key
;
579 struct btrfs_root
*dev_root
;
580 struct stripe
*stripe
;
581 struct btrfs_dev_extent
*dev_extent
;
582 struct btrfs_block_group_item
*bg_ptr
;
583 struct extent_buffer
*l
;
585 btrfs_init_path(&path
);
590 dev_root
= root
->fs_info
->dev_root
;
591 for (i
= 0; i
< chunk
->num_stripes
; i
++) {
592 stripe
= &chunk
->stripes
[i
];
594 key
.objectid
= stripe
->devid
;
595 key
.offset
= stripe
->offset
;
596 key
.type
= BTRFS_DEV_EXTENT_KEY
;
598 ret
= btrfs_search_slot(NULL
, dev_root
, &key
, &path
, 0, 0);
600 fprintf(stderr
, "Search device extent failed(%d)\n",
602 btrfs_release_path(&path
);
604 } else if (ret
> 0) {
607 "No device extent[%llu, %llu]\n",
608 stripe
->devid
, stripe
->offset
);
609 btrfs_release_path(&path
);
613 slot
= path
.slots
[0];
614 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
616 btrfs_dev_extent_chunk_offset(l
, dev_extent
)) {
619 "Device tree unmatch with chunks dev_extent[%llu, %llu], chunk[%llu, %llu]\n",
620 btrfs_dev_extent_chunk_offset(l
,
622 btrfs_dev_extent_length(l
, dev_extent
),
623 chunk
->offset
, chunk
->length
);
624 btrfs_release_path(&path
);
627 btrfs_release_path(&path
);
631 key
.objectid
= chunk
->offset
;
632 key
.type
= BTRFS_BLOCK_GROUP_ITEM_KEY
;
633 key
.offset
= chunk
->length
;
635 ret
= btrfs_search_slot(NULL
, root
->fs_info
->extent_root
, &key
, &path
,
638 fprintf(stderr
, "Search block group failed(%d)\n", ret
);
639 btrfs_release_path(&path
);
641 } else if (ret
> 0) {
643 fprintf(stderr
, "No block group[%llu, %llu]\n",
644 key
.objectid
, key
.offset
);
645 btrfs_release_path(&path
);
650 slot
= path
.slots
[0];
651 bg_ptr
= btrfs_item_ptr(l
, slot
, struct btrfs_block_group_item
);
652 if (chunk
->type_flags
!= btrfs_disk_block_group_flags(l
, bg_ptr
)) {
655 "Chunk[%llu, %llu]'s type(%llu) is differemt with Block Group's type(%llu)\n",
656 chunk
->offset
, chunk
->length
, chunk
->type_flags
,
657 btrfs_disk_block_group_flags(l
, bg_ptr
));
658 btrfs_release_path(&path
);
661 btrfs_release_path(&path
);
665 static int check_all_chunks_by_metadata(struct recover_control
*rc
,
666 struct btrfs_root
*root
)
668 struct chunk_record
*chunk
;
669 struct chunk_record
*next
;
670 LIST_HEAD(orphan_chunks
);
674 list_for_each_entry_safe(chunk
, next
, &rc
->good_chunks
, list
) {
675 err
= check_chunk_by_metadata(rc
, root
, chunk
, 0);
678 list_move_tail(&chunk
->list
, &orphan_chunks
);
679 else if (err
&& !ret
)
684 list_for_each_entry_safe(chunk
, next
, &rc
->unrepaired_chunks
, list
) {
685 err
= check_chunk_by_metadata(rc
, root
, chunk
, 1);
687 list_move_tail(&chunk
->list
, &orphan_chunks
);
688 else if (err
&& !ret
)
692 list_for_each_entry(chunk
, &rc
->bad_chunks
, list
) {
693 err
= check_chunk_by_metadata(rc
, root
, chunk
, 1);
694 if (err
!= -ENOENT
&& !ret
)
695 ret
= err
? err
: -EINVAL
;
697 list_splice(&orphan_chunks
, &rc
->bad_chunks
);
701 static int extract_metadata_record(struct recover_control
*rc
,
702 struct extent_buffer
*leaf
)
704 struct btrfs_key key
;
709 nritems
= btrfs_header_nritems(leaf
);
710 for (i
= 0; i
< nritems
; i
++) {
711 btrfs_item_key_to_cpu(leaf
, &key
, i
);
713 case BTRFS_BLOCK_GROUP_ITEM_KEY
:
714 pthread_mutex_lock(&rc
->rc_lock
);
715 ret
= process_block_group_item(&rc
->bg
, leaf
, &key
, i
);
716 pthread_mutex_unlock(&rc
->rc_lock
);
718 case BTRFS_CHUNK_ITEM_KEY
:
719 pthread_mutex_lock(&rc
->rc_lock
);
720 ret
= process_chunk_item(&rc
->chunk
, leaf
, &key
, i
);
721 pthread_mutex_unlock(&rc
->rc_lock
);
723 case BTRFS_DEV_EXTENT_KEY
:
724 pthread_mutex_lock(&rc
->rc_lock
);
725 ret
= process_device_extent_item(&rc
->devext
, leaf
,
727 pthread_mutex_unlock(&rc
->rc_lock
);
736 static inline int is_super_block_address(u64 offset
)
740 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
741 if (offset
== btrfs_sb_offset(i
))
747 static int scan_one_device(void *dev_scan_struct
)
749 struct extent_buffer
*buf
;
752 struct device_scan
*dev_scan
= (struct device_scan
*)dev_scan_struct
;
753 struct recover_control
*rc
= dev_scan
->rc
;
754 struct btrfs_device
*device
= dev_scan
->dev
;
755 int fd
= dev_scan
->fd
;
758 ret
= pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS
, &oldtype
);
762 buf
= malloc(sizeof(*buf
) + rc
->leafsize
);
765 buf
->len
= rc
->leafsize
;
769 if (is_super_block_address(bytenr
))
770 bytenr
+= rc
->sectorsize
;
772 if (pread64(fd
, buf
->data
, rc
->leafsize
, bytenr
) <
776 if (memcmp_extent_buffer(buf
, rc
->fs_devices
->fsid
,
779 bytenr
+= rc
->sectorsize
;
783 if (verify_tree_block_csum_silent(buf
, rc
->csum_size
)) {
784 bytenr
+= rc
->sectorsize
;
788 pthread_mutex_lock(&rc
->rc_lock
);
789 ret
= process_extent_buffer(&rc
->eb_cache
, buf
, device
, bytenr
);
790 pthread_mutex_unlock(&rc
->rc_lock
);
794 if (btrfs_header_level(buf
) != 0)
797 switch (btrfs_header_owner(buf
)) {
798 case BTRFS_EXTENT_TREE_OBJECTID
:
799 case BTRFS_DEV_TREE_OBJECTID
:
800 /* different tree use different generation */
801 if (btrfs_header_generation(buf
) > rc
->generation
)
803 ret
= extract_metadata_record(rc
, buf
);
807 case BTRFS_CHUNK_TREE_OBJECTID
:
808 if (btrfs_header_generation(buf
) >
809 rc
->chunk_root_generation
)
811 ret
= extract_metadata_record(rc
, buf
);
817 bytenr
+= rc
->leafsize
;
825 static int scan_devices(struct recover_control
*rc
)
829 struct btrfs_device
*dev
;
830 struct device_scan
*dev_scans
;
839 list_for_each_entry(dev
, &rc
->fs_devices
->devices
, dev_list
)
841 dev_scans
= (struct device_scan
*)malloc(sizeof(struct device_scan
)
845 t_scans
= (pthread_t
*)malloc(sizeof(pthread_t
) * devnr
);
848 t_rets
= (int *)malloc(sizeof(int) * devnr
);
852 list_for_each_entry(dev
, &rc
->fs_devices
->devices
, dev_list
) {
853 fd
= open(dev
->name
, O_RDONLY
);
855 fprintf(stderr
, "Failed to open device %s\n",
860 dev_scans
[devidx
].rc
= rc
;
861 dev_scans
[devidx
].dev
= dev
;
862 dev_scans
[devidx
].fd
= fd
;
863 ret
= pthread_create(&t_scans
[devidx
], NULL
,
864 (void *)scan_one_device
,
865 (void *)&dev_scans
[devidx
]);
868 cancel_to
= devidx
- 1;
876 ret
= pthread_join(t_scans
[i
], (void **)&t_rets
[i
]);
877 if (ret
|| t_rets
[i
]) {
880 cancel_to
= devnr
- 1;
886 while (ret
&& (cancel_from
<= cancel_to
)) {
887 pthread_cancel(t_scans
[cancel_from
]);
897 static int build_device_map_by_chunk_record(struct btrfs_root
*root
,
898 struct chunk_record
*chunk
)
903 u8 uuid
[BTRFS_UUID_SIZE
];
905 struct btrfs_mapping_tree
*map_tree
;
906 struct map_lookup
*map
;
907 struct stripe
*stripe
;
909 map_tree
= &root
->fs_info
->mapping_tree
;
910 num_stripes
= chunk
->num_stripes
;
911 map
= malloc(btrfs_map_lookup_size(num_stripes
));
914 map
->ce
.start
= chunk
->offset
;
915 map
->ce
.size
= chunk
->length
;
916 map
->num_stripes
= num_stripes
;
917 map
->io_width
= chunk
->io_width
;
918 map
->io_align
= chunk
->io_align
;
919 map
->sector_size
= chunk
->sector_size
;
920 map
->stripe_len
= chunk
->stripe_len
;
921 map
->type
= chunk
->type_flags
;
922 map
->sub_stripes
= chunk
->sub_stripes
;
924 for (i
= 0, stripe
= chunk
->stripes
; i
< num_stripes
; i
++, stripe
++) {
925 devid
= stripe
->devid
;
926 memcpy(uuid
, stripe
->dev_uuid
, BTRFS_UUID_SIZE
);
927 map
->stripes
[i
].physical
= stripe
->offset
;
928 map
->stripes
[i
].dev
= btrfs_find_device(root
, devid
,
930 if (!map
->stripes
[i
].dev
) {
936 ret
= insert_cache_extent(&map_tree
->cache_tree
, &map
->ce
);
940 static int build_device_maps_by_chunk_records(struct recover_control
*rc
,
941 struct btrfs_root
*root
)
944 struct chunk_record
*chunk
;
946 list_for_each_entry(chunk
, &rc
->good_chunks
, list
) {
947 ret
= build_device_map_by_chunk_record(root
, chunk
);
951 list_for_each_entry(chunk
, &rc
->rebuild_chunks
, list
) {
952 ret
= build_device_map_by_chunk_record(root
, chunk
);
959 static int block_group_remove_all_extent_items(struct btrfs_trans_handle
*trans
,
960 struct btrfs_root
*root
,
961 struct block_group_record
*bg
)
963 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
964 struct btrfs_key key
;
965 struct btrfs_path path
;
966 struct extent_buffer
*leaf
;
967 u64 start
= bg
->objectid
;
968 u64 end
= bg
->objectid
+ bg
->offset
;
975 btrfs_init_path(&path
);
976 root
= root
->fs_info
->extent_root
;
978 key
.objectid
= start
;
980 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
982 ret
= btrfs_search_slot(trans
, root
, &key
, &path
, -1, 1);
988 leaf
= path
.nodes
[0];
989 nitems
= btrfs_header_nritems(leaf
);
991 /* The tree is empty. */
996 if (path
.slots
[0] >= nitems
) {
997 ret
= btrfs_next_leaf(root
, &path
);
1004 leaf
= path
.nodes
[0];
1005 btrfs_item_key_to_cpu(leaf
, &key
, 0);
1006 if (key
.objectid
>= end
)
1008 btrfs_release_path(&path
);
1014 for (i
= path
.slots
[0]; i
< nitems
; i
++) {
1015 btrfs_item_key_to_cpu(leaf
, &key
, i
);
1016 if (key
.objectid
>= end
)
1019 if (key
.type
== BTRFS_BLOCK_GROUP_ITEM_KEY
) {
1029 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
||
1030 key
.type
== BTRFS_METADATA_ITEM_KEY
) {
1031 old_val
= btrfs_super_bytes_used(fs_info
->super_copy
);
1032 if (key
.type
== BTRFS_METADATA_ITEM_KEY
)
1033 old_val
+= root
->leafsize
;
1035 old_val
+= key
.offset
;
1036 btrfs_set_super_bytes_used(fs_info
->super_copy
,
1042 ret
= btrfs_del_items(trans
, root
, &path
, del_s
, del_nr
);
1047 if (key
.objectid
< end
) {
1048 if (key
.type
== BTRFS_BLOCK_GROUP_ITEM_KEY
) {
1049 key
.objectid
+= root
->sectorsize
;
1050 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1053 btrfs_release_path(&path
);
1057 btrfs_release_path(&path
);
1061 static int block_group_free_all_extent(struct btrfs_trans_handle
*trans
,
1062 struct btrfs_root
*root
,
1063 struct block_group_record
*bg
)
1065 struct btrfs_block_group_cache
*cache
;
1066 struct btrfs_fs_info
*info
;
1070 info
= root
->fs_info
;
1071 cache
= btrfs_lookup_block_group(info
, bg
->objectid
);
1075 start
= cache
->key
.objectid
;
1076 end
= start
+ cache
->key
.offset
- 1;
1078 set_extent_bits(&info
->block_group_cache
, start
, end
,
1079 BLOCK_GROUP_DIRTY
, GFP_NOFS
);
1080 set_extent_dirty(&info
->free_space_cache
, start
, end
, GFP_NOFS
);
1082 btrfs_set_block_group_used(&cache
->item
, 0);
1087 static int remove_chunk_extent_item(struct btrfs_trans_handle
*trans
,
1088 struct recover_control
*rc
,
1089 struct btrfs_root
*root
)
1091 struct chunk_record
*chunk
;
1094 list_for_each_entry(chunk
, &rc
->good_chunks
, list
) {
1095 if (!(chunk
->type_flags
& BTRFS_BLOCK_GROUP_SYSTEM
))
1097 ret
= block_group_remove_all_extent_items(trans
, root
,
1102 ret
= block_group_free_all_extent(trans
, root
, chunk
->bg_rec
);
1109 static int __rebuild_chunk_root(struct btrfs_trans_handle
*trans
,
1110 struct recover_control
*rc
,
1111 struct btrfs_root
*root
)
1114 struct btrfs_device
*dev
;
1115 struct extent_buffer
*cow
;
1116 struct btrfs_disk_key disk_key
;
1119 list_for_each_entry(dev
, &rc
->fs_devices
->devices
, dev_list
) {
1120 if (min_devid
> dev
->devid
)
1121 min_devid
= dev
->devid
;
1123 disk_key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1124 disk_key
.type
= BTRFS_DEV_ITEM_KEY
;
1125 disk_key
.offset
= min_devid
;
1127 cow
= btrfs_alloc_free_block(trans
, root
, root
->nodesize
,
1128 BTRFS_CHUNK_TREE_OBJECTID
,
1129 &disk_key
, 0, 0, 0);
1130 btrfs_set_header_bytenr(cow
, cow
->start
);
1131 btrfs_set_header_generation(cow
, trans
->transid
);
1132 btrfs_set_header_nritems(cow
, 0);
1133 btrfs_set_header_level(cow
, 0);
1134 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
1135 btrfs_set_header_owner(cow
, BTRFS_CHUNK_TREE_OBJECTID
);
1136 write_extent_buffer(cow
, root
->fs_info
->fsid
,
1137 btrfs_header_fsid(), BTRFS_FSID_SIZE
);
1139 write_extent_buffer(cow
, root
->fs_info
->chunk_tree_uuid
,
1140 btrfs_header_chunk_tree_uuid(cow
),
1144 btrfs_mark_buffer_dirty(cow
);
1149 static int __rebuild_device_items(struct btrfs_trans_handle
*trans
,
1150 struct recover_control
*rc
,
1151 struct btrfs_root
*root
)
1153 struct btrfs_device
*dev
;
1154 struct btrfs_key key
;
1155 struct btrfs_dev_item
*dev_item
;
1158 dev_item
= malloc(sizeof(struct btrfs_dev_item
));
1162 list_for_each_entry(dev
, &rc
->fs_devices
->devices
, dev_list
) {
1163 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1164 key
.type
= BTRFS_DEV_ITEM_KEY
;
1165 key
.offset
= dev
->devid
;
1167 btrfs_set_stack_device_generation(dev_item
, 0);
1168 btrfs_set_stack_device_type(dev_item
, dev
->type
);
1169 btrfs_set_stack_device_id(dev_item
, dev
->devid
);
1170 btrfs_set_stack_device_total_bytes(dev_item
, dev
->total_bytes
);
1171 btrfs_set_stack_device_bytes_used(dev_item
, dev
->bytes_used
);
1172 btrfs_set_stack_device_io_align(dev_item
, dev
->io_align
);
1173 btrfs_set_stack_device_io_width(dev_item
, dev
->io_width
);
1174 btrfs_set_stack_device_sector_size(dev_item
, dev
->sector_size
);
1175 memcpy(dev_item
->uuid
, dev
->uuid
, BTRFS_UUID_SIZE
);
1176 memcpy(dev_item
->fsid
, dev
->fs_devices
->fsid
, BTRFS_UUID_SIZE
);
1178 ret
= btrfs_insert_item(trans
, root
, &key
,
1179 dev_item
, sizeof(*dev_item
));
1186 static int __insert_chunk_item(struct btrfs_trans_handle
*trans
,
1187 struct chunk_record
*chunk_rec
,
1188 struct btrfs_root
*chunk_root
)
1190 struct btrfs_key key
;
1191 struct btrfs_chunk
*chunk
= NULL
;
1194 chunk
= create_chunk_item(chunk_rec
);
1197 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1198 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1199 key
.offset
= chunk_rec
->offset
;
1201 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
,
1202 btrfs_chunk_item_size(chunk
->num_stripes
));
1207 static int __rebuild_chunk_items(struct btrfs_trans_handle
*trans
,
1208 struct recover_control
*rc
,
1209 struct btrfs_root
*root
)
1211 struct btrfs_root
*chunk_root
;
1212 struct chunk_record
*chunk_rec
;
1215 chunk_root
= root
->fs_info
->chunk_root
;
1217 list_for_each_entry(chunk_rec
, &rc
->good_chunks
, list
) {
1218 ret
= __insert_chunk_item(trans
, chunk_rec
, chunk_root
);
1222 list_for_each_entry(chunk_rec
, &rc
->rebuild_chunks
, list
) {
1223 ret
= __insert_chunk_item(trans
, chunk_rec
, chunk_root
);
1230 static int rebuild_chunk_tree(struct btrfs_trans_handle
*trans
,
1231 struct recover_control
*rc
,
1232 struct btrfs_root
*root
)
1236 root
= root
->fs_info
->chunk_root
;
1238 ret
= __rebuild_chunk_root(trans
, rc
, root
);
1242 ret
= __rebuild_device_items(trans
, rc
, root
);
1246 ret
= __rebuild_chunk_items(trans
, rc
, root
);
1251 static int rebuild_sys_array(struct recover_control
*rc
,
1252 struct btrfs_root
*root
)
1254 struct btrfs_chunk
*chunk
;
1255 struct btrfs_key key
;
1256 struct chunk_record
*chunk_rec
;
1260 btrfs_set_super_sys_array_size(root
->fs_info
->super_copy
, 0);
1262 list_for_each_entry(chunk_rec
, &rc
->good_chunks
, list
) {
1263 if (!(chunk_rec
->type_flags
& BTRFS_BLOCK_GROUP_SYSTEM
))
1266 num_stripes
= chunk_rec
->num_stripes
;
1267 chunk
= create_chunk_item(chunk_rec
);
1273 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1274 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1275 key
.offset
= chunk_rec
->offset
;
1277 ret
= btrfs_add_system_chunk(NULL
, root
, &key
, chunk
,
1278 btrfs_chunk_item_size(num_stripes
));
1287 static int calculate_bg_used(struct btrfs_root
*extent_root
,
1288 struct chunk_record
*chunk_rec
,
1289 struct btrfs_path
*path
,
1292 struct extent_buffer
*node
;
1293 struct btrfs_key found_key
;
1299 node
= path
->nodes
[0];
1300 slot
= path
->slots
[0];
1301 btrfs_item_key_to_cpu(node
, &found_key
, slot
);
1302 if (found_key
.objectid
>= chunk_rec
->offset
+ chunk_rec
->length
)
1304 if (found_key
.type
!= BTRFS_METADATA_ITEM_KEY
&&
1305 found_key
.type
!= BTRFS_EXTENT_DATA_KEY
)
1307 if (found_key
.type
== BTRFS_METADATA_ITEM_KEY
)
1308 used_ret
+= extent_root
->nodesize
;
1310 used_ret
+= found_key
.offset
;
1312 if (slot
+ 1 < btrfs_header_nritems(node
)) {
1315 ret
= btrfs_next_leaf(extent_root
, path
);
1329 static int __insert_block_group(struct btrfs_trans_handle
*trans
,
1330 struct chunk_record
*chunk_rec
,
1331 struct btrfs_root
*extent_root
,
1334 struct btrfs_block_group_item bg_item
;
1335 struct btrfs_key key
;
1338 btrfs_set_block_group_used(&bg_item
, used
);
1339 btrfs_set_block_group_chunk_objectid(&bg_item
, used
);
1340 btrfs_set_block_group_flags(&bg_item
, chunk_rec
->type_flags
);
1341 key
.objectid
= chunk_rec
->offset
;
1342 key
.type
= BTRFS_BLOCK_GROUP_ITEM_KEY
;
1343 key
.offset
= chunk_rec
->length
;
1345 ret
= btrfs_insert_item(trans
, extent_root
, &key
, &bg_item
,
1351 * Search through the extent tree to rebuild the 'used' member of the block
1353 * However, since block group and extent item shares the extent tree,
1354 * the extent item may also missing.
1355 * In that case, we fill the 'used' with the length of the block group to
1356 * ensure no write into the block group.
1357 * Btrfsck will hate it but we will inform user to call '--init-extent-tree'
1358 * if possible, or just salvage as much data as possible from the fs.
1360 static int rebuild_block_group(struct btrfs_trans_handle
*trans
,
1361 struct recover_control
*rc
,
1362 struct btrfs_root
*root
)
1364 struct chunk_record
*chunk_rec
;
1365 struct btrfs_key search_key
;
1366 struct btrfs_path
*path
;
1370 if (list_empty(&rc
->rebuild_chunks
))
1373 path
= btrfs_alloc_path();
1376 list_for_each_entry(chunk_rec
, &rc
->rebuild_chunks
, list
) {
1377 search_key
.objectid
= chunk_rec
->offset
;
1378 search_key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1379 search_key
.offset
= 0;
1380 ret
= btrfs_search_slot(NULL
, root
->fs_info
->extent_root
,
1381 &search_key
, path
, 0, 0);
1384 ret
= calculate_bg_used(root
->fs_info
->extent_root
,
1385 chunk_rec
, path
, &used
);
1387 * Extent tree is damaged, better to rebuild the whole extent
1388 * tree. Currently, change the used to chunk's len to prevent
1389 * write/block reserve happening in that block group.
1393 "Fail to search extent tree for block group: [%llu,%llu]\n",
1395 chunk_rec
->offset
+ chunk_rec
->length
);
1397 "Mark the block group full to prevent block rsv problems\n");
1398 used
= chunk_rec
->length
;
1400 btrfs_release_path(path
);
1401 ret
= __insert_block_group(trans
, chunk_rec
,
1402 root
->fs_info
->extent_root
,
1408 btrfs_free_path(path
);
1412 static struct btrfs_root
*
1413 open_ctree_with_broken_chunk(struct recover_control
*rc
)
1415 struct btrfs_fs_info
*fs_info
;
1416 struct btrfs_super_block
*disk_super
;
1417 struct extent_buffer
*eb
;
1424 fs_info
= btrfs_new_fs_info(1, BTRFS_SUPER_INFO_OFFSET
);
1426 fprintf(stderr
, "Failed to allocate memory for fs_info\n");
1427 return ERR_PTR(-ENOMEM
);
1429 fs_info
->is_chunk_recover
= 1;
1431 fs_info
->fs_devices
= rc
->fs_devices
;
1432 ret
= btrfs_open_devices(fs_info
->fs_devices
, O_RDWR
);
1436 disk_super
= fs_info
->super_copy
;
1437 ret
= btrfs_read_dev_super(fs_info
->fs_devices
->latest_bdev
,
1438 disk_super
, fs_info
->super_bytenr
, 1);
1440 fprintf(stderr
, "No valid btrfs found\n");
1444 memcpy(fs_info
->fsid
, &disk_super
->fsid
, BTRFS_FSID_SIZE
);
1446 ret
= btrfs_check_fs_compatibility(disk_super
, 1);
1450 nodesize
= btrfs_super_nodesize(disk_super
);
1451 leafsize
= btrfs_super_leafsize(disk_super
);
1452 sectorsize
= btrfs_super_sectorsize(disk_super
);
1453 stripesize
= btrfs_super_stripesize(disk_super
);
1455 __setup_root(nodesize
, leafsize
, sectorsize
, stripesize
,
1456 fs_info
->chunk_root
, fs_info
, BTRFS_CHUNK_TREE_OBJECTID
);
1458 ret
= build_device_maps_by_chunk_records(rc
, fs_info
->chunk_root
);
1462 ret
= btrfs_setup_all_roots(fs_info
, 0, 0);
1466 eb
= fs_info
->tree_root
->node
;
1467 read_extent_buffer(eb
, fs_info
->chunk_tree_uuid
,
1468 btrfs_header_chunk_tree_uuid(eb
),
1471 return fs_info
->fs_root
;
1473 btrfs_release_all_roots(fs_info
);
1475 btrfs_cleanup_all_caches(fs_info
);
1477 btrfs_close_devices(fs_info
->fs_devices
);
1479 btrfs_free_fs_info(fs_info
);
1480 return ERR_PTR(ret
);
1483 static int recover_prepare(struct recover_control
*rc
, char *path
)
1487 struct btrfs_super_block
*sb
;
1488 struct btrfs_fs_devices
*fs_devices
;
1491 fd
= open(path
, O_RDONLY
);
1493 fprintf(stderr
, "open %s\n error.\n", path
);
1497 sb
= malloc(BTRFS_SUPER_INFO_SIZE
);
1499 fprintf(stderr
, "allocating memory for sb failed.\n");
1504 ret
= btrfs_read_dev_super(fd
, sb
, BTRFS_SUPER_INFO_OFFSET
, 1);
1506 fprintf(stderr
, "read super block error\n");
1510 rc
->sectorsize
= btrfs_super_sectorsize(sb
);
1511 rc
->leafsize
= btrfs_super_leafsize(sb
);
1512 rc
->generation
= btrfs_super_generation(sb
);
1513 rc
->chunk_root_generation
= btrfs_super_chunk_root_generation(sb
);
1514 rc
->csum_size
= btrfs_super_csum_size(sb
);
1516 /* if seed, the result of scanning below will be partial */
1517 if (btrfs_super_flags(sb
) & BTRFS_SUPER_FLAG_SEEDING
) {
1518 fprintf(stderr
, "this device is seed device\n");
1523 ret
= btrfs_scan_fs_devices(fd
, path
, &fs_devices
, 0, 1, 0);
1527 rc
->fs_devices
= fs_devices
;
1530 print_all_devices(&rc
->fs_devices
->devices
);
1539 static int btrfs_get_device_extents(u64 chunk_object
,
1540 struct list_head
*orphan_devexts
,
1541 struct list_head
*ret_list
)
1543 struct device_extent_record
*devext
;
1544 struct device_extent_record
*next
;
1547 list_for_each_entry_safe(devext
, next
, orphan_devexts
, chunk_list
) {
1548 if (devext
->chunk_offset
== chunk_object
) {
1549 list_move_tail(&devext
->chunk_list
, ret_list
);
1556 static int calc_num_stripes(u64 type
)
1558 if (type
& (BTRFS_BLOCK_GROUP_RAID0
|
1559 BTRFS_BLOCK_GROUP_RAID10
|
1560 BTRFS_BLOCK_GROUP_RAID5
|
1561 BTRFS_BLOCK_GROUP_RAID6
))
1563 else if (type
& (BTRFS_BLOCK_GROUP_RAID1
|
1564 BTRFS_BLOCK_GROUP_DUP
))
1570 static inline int calc_sub_nstripes(u64 type
)
1572 if (type
& BTRFS_BLOCK_GROUP_RAID10
)
1578 static int btrfs_verify_device_extents(struct block_group_record
*bg
,
1579 struct list_head
*devexts
, int ndevexts
)
1581 struct device_extent_record
*devext
;
1583 int expected_num_stripes
;
1585 expected_num_stripes
= calc_num_stripes(bg
->flags
);
1586 if (expected_num_stripes
&& expected_num_stripes
!= ndevexts
)
1589 strpie_length
= calc_stripe_length(bg
->flags
, bg
->offset
, ndevexts
);
1590 list_for_each_entry(devext
, devexts
, chunk_list
) {
1591 if (devext
->length
!= strpie_length
)
1597 static int btrfs_rebuild_unordered_chunk_stripes(struct recover_control
*rc
,
1598 struct chunk_record
*chunk
)
1600 struct device_extent_record
*devext
;
1601 struct btrfs_device
*device
;
1604 devext
= list_first_entry(&chunk
->dextents
, struct device_extent_record
,
1606 for (i
= 0; i
< chunk
->num_stripes
; i
++) {
1607 chunk
->stripes
[i
].devid
= devext
->objectid
;
1608 chunk
->stripes
[i
].offset
= devext
->offset
;
1609 device
= btrfs_find_device_by_devid(rc
->fs_devices
,
1614 BUG_ON(btrfs_find_device_by_devid(rc
->fs_devices
,
1617 memcpy(chunk
->stripes
[i
].dev_uuid
, device
->uuid
,
1619 devext
= list_next_entry(devext
, chunk_list
);
1624 static int btrfs_calc_stripe_index(struct chunk_record
*chunk
, u64 logical
)
1626 u64 offset
= logical
- chunk
->offset
;
1628 int nr_data_stripes
;
1631 stripe_nr
= offset
/ chunk
->stripe_len
;
1632 if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID0
) {
1633 index
= stripe_nr
% chunk
->num_stripes
;
1634 } else if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID10
) {
1635 index
= stripe_nr
% (chunk
->num_stripes
/ chunk
->sub_stripes
);
1636 index
*= chunk
->sub_stripes
;
1637 } else if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID5
) {
1638 nr_data_stripes
= chunk
->num_stripes
- 1;
1639 index
= stripe_nr
% nr_data_stripes
;
1640 stripe_nr
/= nr_data_stripes
;
1641 index
= (index
+ stripe_nr
) % chunk
->num_stripes
;
1642 } else if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID6
) {
1643 nr_data_stripes
= chunk
->num_stripes
- 2;
1644 index
= stripe_nr
% nr_data_stripes
;
1645 stripe_nr
/= nr_data_stripes
;
1646 index
= (index
+ stripe_nr
) % chunk
->num_stripes
;
1653 /* calc the logical offset which is the start of the next stripe. */
1654 static inline u64
btrfs_next_stripe_logical_offset(struct chunk_record
*chunk
,
1657 u64 offset
= logical
- chunk
->offset
;
1659 offset
/= chunk
->stripe_len
;
1660 offset
*= chunk
->stripe_len
;
1661 offset
+= chunk
->stripe_len
;
1663 return offset
+ chunk
->offset
;
1666 static int is_extent_record_in_device_extent(struct extent_record
*er
,
1667 struct device_extent_record
*dext
,
1672 for (i
= 0; i
< er
->nmirrors
; i
++) {
1673 if (er
->devices
[i
]->devid
== dext
->objectid
&&
1674 er
->offsets
[i
] >= dext
->offset
&&
1675 er
->offsets
[i
] < dext
->offset
+ dext
->length
) {
1684 btrfs_rebuild_ordered_meta_chunk_stripes(struct recover_control
*rc
,
1685 struct chunk_record
*chunk
)
1687 u64 start
= chunk
->offset
;
1688 u64 end
= chunk
->offset
+ chunk
->length
;
1689 struct cache_extent
*cache
;
1690 struct extent_record
*er
;
1691 struct device_extent_record
*devext
;
1692 struct device_extent_record
*next
;
1693 struct btrfs_device
*device
;
1699 cache
= lookup_cache_extent(&rc
->eb_cache
,
1700 start
, chunk
->length
);
1702 /* No used space, we can reorder the stripes freely. */
1703 ret
= btrfs_rebuild_unordered_chunk_stripes(rc
, chunk
);
1707 list_splice_init(&chunk
->dextents
, &devexts
);
1709 er
= container_of(cache
, struct extent_record
, cache
);
1710 index
= btrfs_calc_stripe_index(chunk
, er
->cache
.start
);
1711 BUG_ON(index
== -1);
1712 if (chunk
->stripes
[index
].devid
)
1714 list_for_each_entry_safe(devext
, next
, &devexts
, chunk_list
) {
1715 if (is_extent_record_in_device_extent(er
, devext
, &mirror
)) {
1716 chunk
->stripes
[index
].devid
= devext
->objectid
;
1717 chunk
->stripes
[index
].offset
= devext
->offset
;
1718 memcpy(chunk
->stripes
[index
].dev_uuid
,
1719 er
->devices
[mirror
]->uuid
,
1722 list_move(&devext
->chunk_list
, &chunk
->dextents
);
1726 start
= btrfs_next_stripe_logical_offset(chunk
, er
->cache
.start
);
1728 goto no_extent_record
;
1730 cache
= lookup_cache_extent(&rc
->eb_cache
, start
, end
- start
);
1734 if (list_empty(&devexts
))
1737 if (chunk
->type_flags
& (BTRFS_BLOCK_GROUP_RAID5
|
1738 BTRFS_BLOCK_GROUP_RAID6
)) {
1739 /* Fixme: try to recover the order by the parity block. */
1740 list_splice_tail(&devexts
, &chunk
->dextents
);
1744 /* There is no data on the lost stripes, we can reorder them freely. */
1745 for (index
= 0; index
< chunk
->num_stripes
; index
++) {
1746 if (chunk
->stripes
[index
].devid
)
1749 devext
= list_first_entry(&devexts
,
1750 struct device_extent_record
,
1752 list_move(&devext
->chunk_list
, &chunk
->dextents
);
1754 chunk
->stripes
[index
].devid
= devext
->objectid
;
1755 chunk
->stripes
[index
].offset
= devext
->offset
;
1756 device
= btrfs_find_device_by_devid(rc
->fs_devices
,
1760 list_splice_tail(&devexts
, &chunk
->dextents
);
1763 BUG_ON(btrfs_find_device_by_devid(rc
->fs_devices
,
1766 memcpy(chunk
->stripes
[index
].dev_uuid
, device
->uuid
,
1772 #define BTRFS_ORDERED_RAID (BTRFS_BLOCK_GROUP_RAID0 | \
1773 BTRFS_BLOCK_GROUP_RAID10 | \
1774 BTRFS_BLOCK_GROUP_RAID5 | \
1775 BTRFS_BLOCK_GROUP_RAID6)
1777 static int btrfs_rebuild_chunk_stripes(struct recover_control
*rc
,
1778 struct chunk_record
*chunk
)
1783 * All the data in the system metadata chunk will be dropped,
1784 * so we need not guarantee that the data is right or not, that
1785 * is we can reorder the stripes in the system metadata chunk.
1787 if ((chunk
->type_flags
& BTRFS_BLOCK_GROUP_METADATA
) &&
1788 (chunk
->type_flags
& BTRFS_ORDERED_RAID
))
1789 ret
=btrfs_rebuild_ordered_meta_chunk_stripes(rc
, chunk
);
1790 else if ((chunk
->type_flags
& BTRFS_BLOCK_GROUP_DATA
) &&
1791 (chunk
->type_flags
& BTRFS_ORDERED_RAID
))
1792 ret
= 1; /* Be handled after the fs is opened. */
1794 ret
= btrfs_rebuild_unordered_chunk_stripes(rc
, chunk
);
1799 static int next_csum(struct btrfs_root
*root
,
1800 struct extent_buffer
**leaf
,
1801 struct btrfs_path
*path
,
1806 struct btrfs_key
*key
)
1809 struct btrfs_root
*csum_root
= root
->fs_info
->csum_root
;
1810 struct btrfs_csum_item
*csum_item
;
1811 u32 blocksize
= root
->sectorsize
;
1812 u16 csum_size
= btrfs_super_csum_size(root
->fs_info
->super_copy
);
1813 int csums_in_item
= btrfs_item_size_nr(*leaf
, *slot
) / csum_size
;
1815 if (*csum_offset
>= csums_in_item
) {
1818 if (*slot
>= btrfs_header_nritems(*leaf
)) {
1819 ret
= btrfs_next_leaf(csum_root
, path
);
1824 *leaf
= path
->nodes
[0];
1825 *slot
= path
->slots
[0];
1827 btrfs_item_key_to_cpu(*leaf
, key
, *slot
);
1830 if (key
->offset
+ (*csum_offset
) * blocksize
>= end
)
1832 csum_item
= btrfs_item_ptr(*leaf
, *slot
, struct btrfs_csum_item
);
1833 csum_item
= (struct btrfs_csum_item
*)((unsigned char *)csum_item
1834 + (*csum_offset
) * csum_size
);
1835 read_extent_buffer(*leaf
, tree_csum
,
1836 (unsigned long)csum_item
, csum_size
);
1840 static u64
calc_data_offset(struct btrfs_key
*key
,
1841 struct chunk_record
*chunk
,
1847 int logical_stripe_nr
;
1849 int nr_data_stripes
;
1851 data_offset
= key
->offset
+ csum_offset
* blocksize
- chunk
->offset
;
1852 nr_data_stripes
= chunk
->num_stripes
;
1854 if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID5
)
1855 nr_data_stripes
-= 1;
1856 else if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID6
)
1857 nr_data_stripes
-= 2;
1859 logical_stripe_nr
= data_offset
/ chunk
->stripe_len
;
1860 dev_stripe_nr
= logical_stripe_nr
/ nr_data_stripes
;
1862 data_offset
-= logical_stripe_nr
* chunk
->stripe_len
;
1863 data_offset
+= dev_stripe_nr
* chunk
->stripe_len
;
1865 return dev_offset
+ data_offset
;
1868 static int check_one_csum(int fd
, u64 start
, u32 len
, u32 tree_csum
)
1872 u32 csum_result
= ~(u32
)0;
1877 ret
= pread64(fd
, data
, len
, start
);
1878 if (ret
< 0 || ret
!= len
) {
1883 csum_result
= btrfs_csum_data(NULL
, data
, csum_result
, len
);
1884 btrfs_csum_final(csum_result
, (char *)&csum_result
);
1885 if (csum_result
!= tree_csum
)
1892 static u64
item_end_offset(struct btrfs_root
*root
, struct btrfs_key
*key
,
1893 struct extent_buffer
*leaf
, int slot
) {
1894 u32 blocksize
= root
->sectorsize
;
1895 u16 csum_size
= btrfs_super_csum_size(root
->fs_info
->super_copy
);
1897 u64 offset
= btrfs_item_size_nr(leaf
, slot
);
1898 offset
/= csum_size
;
1899 offset
*= blocksize
;
1900 offset
+= key
->offset
;
1905 static int insert_stripe(struct list_head
*devexts
,
1906 struct recover_control
*rc
,
1907 struct chunk_record
*chunk
,
1909 struct device_extent_record
*devext
;
1910 struct btrfs_device
*dev
;
1912 devext
= list_entry(devexts
->next
, struct device_extent_record
,
1914 dev
= btrfs_find_device_by_devid(rc
->fs_devices
, devext
->objectid
,
1918 BUG_ON(btrfs_find_device_by_devid(rc
->fs_devices
, devext
->objectid
,
1921 chunk
->stripes
[index
].devid
= devext
->objectid
;
1922 chunk
->stripes
[index
].offset
= devext
->offset
;
1923 memcpy(chunk
->stripes
[index
].dev_uuid
, dev
->uuid
, BTRFS_UUID_SIZE
);
1925 list_move(&devext
->chunk_list
, &chunk
->dextents
);
1930 static inline int count_devext_records(struct list_head
*record_list
)
1932 int num_of_records
= 0;
1933 struct device_extent_record
*devext
;
1935 list_for_each_entry(devext
, record_list
, chunk_list
)
1938 return num_of_records
;
1941 static int fill_chunk_up(struct chunk_record
*chunk
, struct list_head
*devexts
,
1942 struct recover_control
*rc
)
1947 for (i
= 0; i
< chunk
->num_stripes
; i
++) {
1948 if (!chunk
->stripes
[i
].devid
) {
1949 ret
= insert_stripe(devexts
, rc
, chunk
, i
);
1958 #define EQUAL_STRIPE (1 << 0)
1960 static int rebuild_raid_data_chunk_stripes(struct recover_control
*rc
,
1961 struct btrfs_root
*root
,
1962 struct chunk_record
*chunk
,
1968 struct btrfs_path path
;
1969 struct btrfs_key prev_key
;
1970 struct btrfs_key key
;
1971 struct btrfs_root
*csum_root
;
1972 struct extent_buffer
*leaf
;
1973 struct device_extent_record
*devext
;
1974 struct device_extent_record
*next
;
1975 struct btrfs_device
*dev
;
1976 u64 start
= chunk
->offset
;
1977 u64 end
= start
+ chunk
->stripe_len
;
1978 u64 chunk_end
= chunk
->offset
+ chunk
->length
;
1979 u64 csum_offset
= 0;
1981 u32 blocksize
= root
->sectorsize
;
1984 int num_unordered
= 0;
1985 LIST_HEAD(unordered
);
1986 LIST_HEAD(candidates
);
1988 csum_root
= root
->fs_info
->csum_root
;
1989 btrfs_init_path(&path
);
1990 list_splice_init(&chunk
->dextents
, &candidates
);
1992 if (list_is_last(candidates
.next
, &candidates
))
1995 key
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
1996 key
.type
= BTRFS_EXTENT_CSUM_KEY
;
1999 ret
= btrfs_search_slot(NULL
, csum_root
, &key
, &path
, 0, 0);
2001 fprintf(stderr
, "Search csum failed(%d)\n", ret
);
2004 leaf
= path
.nodes
[0];
2005 slot
= path
.slots
[0];
2007 if (slot
>= btrfs_header_nritems(leaf
)) {
2008 ret
= btrfs_next_leaf(csum_root
, &path
);
2011 "Walk tree failed(%d)\n", ret
);
2013 } else if (ret
> 0) {
2014 slot
= btrfs_header_nritems(leaf
) - 1;
2015 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
2016 if (item_end_offset(root
, &key
, leaf
, slot
)
2018 csum_offset
= start
- key
.offset
;
2019 csum_offset
/= blocksize
;
2024 leaf
= path
.nodes
[0];
2025 slot
= path
.slots
[0];
2027 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
2028 ret
= btrfs_previous_item(csum_root
, &path
, 0,
2029 BTRFS_EXTENT_CSUM_KEY
);
2033 if (key
.offset
>= end
)
2038 leaf
= path
.nodes
[0];
2039 slot
= path
.slots
[0];
2041 btrfs_item_key_to_cpu(leaf
, &prev_key
, slot
);
2042 if (item_end_offset(root
, &prev_key
, leaf
, slot
) > start
) {
2043 csum_offset
= start
- prev_key
.offset
;
2044 csum_offset
/= blocksize
;
2045 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
2047 if (key
.offset
>= end
)
2051 if (key
.offset
+ csum_offset
* blocksize
> chunk_end
)
2055 ret
= next_csum(root
, &leaf
, &path
, &slot
, &csum_offset
, &tree_csum
,
2058 fprintf(stderr
, "Fetch csum failed\n");
2060 } else if (ret
== 1) {
2061 if (!(*flags
& EQUAL_STRIPE
))
2062 *flags
|= EQUAL_STRIPE
;
2064 } else if (ret
== 2)
2067 list_for_each_entry_safe(devext
, next
, &candidates
, chunk_list
) {
2068 data_offset
= calc_data_offset(&key
, chunk
, devext
->offset
,
2069 csum_offset
, blocksize
);
2070 dev
= btrfs_find_device_by_devid(rc
->fs_devices
,
2071 devext
->objectid
, 0);
2076 BUG_ON(btrfs_find_device_by_devid(rc
->fs_devices
,
2077 devext
->objectid
, 1));
2079 ret
= check_one_csum(dev
->fd
, data_offset
, blocksize
,
2084 list_move(&devext
->chunk_list
, &unordered
);
2087 if (list_empty(&candidates
)) {
2088 num_unordered
= count_devext_records(&unordered
);
2089 if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID6
2090 && num_unordered
== 2) {
2091 btrfs_release_path(&path
);
2092 ret
= fill_chunk_up(chunk
, &unordered
, rc
);
2099 if (list_is_last(candidates
.next
, &candidates
)) {
2100 index
= btrfs_calc_stripe_index(chunk
,
2101 key
.offset
+ csum_offset
* blocksize
);
2102 BUG_ON(index
== -1);
2103 if (chunk
->stripes
[index
].devid
)
2105 ret
= insert_stripe(&candidates
, rc
, chunk
, index
);
2113 start
= btrfs_next_stripe_logical_offset(chunk
, start
);
2114 end
= min(start
+ chunk
->stripe_len
, chunk_end
);
2115 list_splice_init(&unordered
, &candidates
);
2116 btrfs_release_path(&path
);
2118 if (end
< chunk_end
)
2122 list_splice_init(&candidates
, &unordered
);
2123 num_unordered
= count_devext_records(&unordered
);
2124 if (num_unordered
== 1) {
2125 for (i
= 0; i
< chunk
->num_stripes
; i
++) {
2126 if (!chunk
->stripes
[i
].devid
) {
2131 ret
= insert_stripe(&unordered
, rc
, chunk
, index
);
2135 if ((num_unordered
== 2 && chunk
->type_flags
2136 & BTRFS_BLOCK_GROUP_RAID5
)
2137 || (num_unordered
== 3 && chunk
->type_flags
2138 & BTRFS_BLOCK_GROUP_RAID6
)) {
2139 ret
= fill_chunk_up(chunk
, &unordered
, rc
);
2143 ret
= !!ret
|| (list_empty(&unordered
) ? 0 : 1);
2144 list_splice_init(&candidates
, &chunk
->dextents
);
2145 list_splice_init(&unordered
, &chunk
->dextents
);
2146 btrfs_release_path(&path
);
2151 static int btrfs_rebuild_ordered_data_chunk_stripes(struct recover_control
*rc
,
2152 struct btrfs_root
*root
)
2154 struct chunk_record
*chunk
;
2155 struct chunk_record
*next
;
2160 list_for_each_entry_safe(chunk
, next
, &rc
->unrepaired_chunks
, list
) {
2161 if ((chunk
->type_flags
& BTRFS_BLOCK_GROUP_DATA
)
2162 && (chunk
->type_flags
& BTRFS_ORDERED_RAID
)) {
2164 err
= rebuild_raid_data_chunk_stripes(rc
, root
, chunk
,
2167 list_move(&chunk
->list
, &rc
->bad_chunks
);
2168 if (flags
& EQUAL_STRIPE
)
2170 "Failure: too many equal stripes in chunk[%llu %llu]\n",
2171 chunk
->offset
, chunk
->length
);
2175 list_move(&chunk
->list
, &rc
->good_chunks
);
2181 static int btrfs_recover_chunks(struct recover_control
*rc
)
2183 struct chunk_record
*chunk
;
2184 struct block_group_record
*bg
;
2185 struct block_group_record
*next
;
2186 LIST_HEAD(new_chunks
);
2191 /* create the chunk by block group */
2192 list_for_each_entry_safe(bg
, next
, &rc
->bg
.block_groups
, list
) {
2193 nstripes
= btrfs_get_device_extents(bg
->objectid
,
2194 &rc
->devext
.no_chunk_orphans
,
2196 chunk
= malloc(btrfs_chunk_record_size(nstripes
));
2199 memset(chunk
, 0, btrfs_chunk_record_size(nstripes
));
2200 INIT_LIST_HEAD(&chunk
->dextents
);
2202 chunk
->cache
.start
= bg
->objectid
;
2203 chunk
->cache
.size
= bg
->offset
;
2204 chunk
->objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
2205 chunk
->type
= BTRFS_CHUNK_ITEM_KEY
;
2206 chunk
->offset
= bg
->objectid
;
2207 chunk
->generation
= bg
->generation
;
2208 chunk
->length
= bg
->offset
;
2209 chunk
->owner
= BTRFS_CHUNK_TREE_OBJECTID
;
2210 chunk
->stripe_len
= BTRFS_STRIPE_LEN
;
2211 chunk
->type_flags
= bg
->flags
;
2212 chunk
->io_width
= BTRFS_STRIPE_LEN
;
2213 chunk
->io_align
= BTRFS_STRIPE_LEN
;
2214 chunk
->sector_size
= rc
->sectorsize
;
2215 chunk
->sub_stripes
= calc_sub_nstripes(bg
->flags
);
2217 ret
= insert_cache_extent(&rc
->chunk
, &chunk
->cache
);
2220 list_del_init(&bg
->list
);
2222 list_add_tail(&chunk
->list
, &rc
->bad_chunks
);
2226 list_splice_init(&devexts
, &chunk
->dextents
);
2228 ret
= btrfs_verify_device_extents(bg
, &devexts
, nstripes
);
2230 list_add_tail(&chunk
->list
, &rc
->bad_chunks
);
2234 chunk
->num_stripes
= nstripes
;
2235 ret
= btrfs_rebuild_chunk_stripes(rc
, chunk
);
2237 list_add_tail(&chunk
->list
, &rc
->unrepaired_chunks
);
2239 list_add_tail(&chunk
->list
, &rc
->bad_chunks
);
2241 list_add_tail(&chunk
->list
, &rc
->good_chunks
);
2244 * Don't worry about the lost orphan device extents, they don't
2245 * have its chunk and block group, they must be the old ones that
2251 static inline int is_chunk_overlap(struct chunk_record
*chunk1
,
2252 struct chunk_record
*chunk2
)
2254 if (chunk1
->offset
>= chunk2
->offset
+ chunk2
->length
||
2255 chunk1
->offset
+ chunk1
->length
<= chunk2
->offset
)
2260 /* Move invalid(overlap with good chunks) rebuild chunks to bad chunk list */
2261 static void validate_rebuild_chunks(struct recover_control
*rc
)
2263 struct chunk_record
*good
;
2264 struct chunk_record
*rebuild
;
2265 struct chunk_record
*tmp
;
2267 list_for_each_entry_safe(rebuild
, tmp
, &rc
->rebuild_chunks
, list
) {
2268 list_for_each_entry(good
, &rc
->good_chunks
, list
) {
2269 if (is_chunk_overlap(rebuild
, good
)) {
2270 list_move_tail(&rebuild
->list
,
2279 * Return 0 when succesful, < 0 on error and > 0 if aborted by user
2281 int btrfs_recover_chunk_tree(char *path
, int verbose
, int yes
)
2284 struct btrfs_root
*root
= NULL
;
2285 struct btrfs_trans_handle
*trans
;
2286 struct recover_control rc
;
2288 init_recover_control(&rc
, verbose
, yes
);
2290 ret
= recover_prepare(&rc
, path
);
2292 fprintf(stderr
, "recover prepare error\n");
2296 ret
= scan_devices(&rc
);
2298 fprintf(stderr
, "scan chunk headers error\n");
2302 if (cache_tree_empty(&rc
.chunk
) &&
2303 cache_tree_empty(&rc
.bg
.tree
) &&
2304 cache_tree_empty(&rc
.devext
.tree
)) {
2305 fprintf(stderr
, "no recoverable chunk\n");
2309 print_scan_result(&rc
);
2311 ret
= check_chunks(&rc
.chunk
, &rc
.bg
, &rc
.devext
, &rc
.good_chunks
,
2312 &rc
.bad_chunks
, &rc
.rebuild_chunks
, 1);
2314 if (!list_empty(&rc
.bg
.block_groups
) ||
2315 !list_empty(&rc
.devext
.no_chunk_orphans
)) {
2316 ret
= btrfs_recover_chunks(&rc
);
2321 print_check_result(&rc
);
2322 printf("Check chunks successfully with no orphans\n");
2325 validate_rebuild_chunks(&rc
);
2326 print_check_result(&rc
);
2328 root
= open_ctree_with_broken_chunk(&rc
);
2330 fprintf(stderr
, "open with broken chunk error\n");
2331 ret
= PTR_ERR(root
);
2335 ret
= check_all_chunks_by_metadata(&rc
, root
);
2337 fprintf(stderr
, "The chunks in memory can not match the metadata of the fs. Repair failed.\n");
2338 goto fail_close_ctree
;
2341 ret
= btrfs_rebuild_ordered_data_chunk_stripes(&rc
, root
);
2343 fprintf(stderr
, "Failed to rebuild ordered chunk stripes.\n");
2344 goto fail_close_ctree
;
2348 ret
= ask_user("We are going to rebuild the chunk tree on disk, it might destroy the old metadata on the disk, Are you sure?");
2351 goto fail_close_ctree
;
2355 trans
= btrfs_start_transaction(root
, 1);
2356 ret
= remove_chunk_extent_item(trans
, &rc
, root
);
2359 ret
= rebuild_chunk_tree(trans
, &rc
, root
);
2362 ret
= rebuild_sys_array(&rc
, root
);
2365 ret
= rebuild_block_group(trans
, &rc
, root
);
2367 printf("Fail to rebuild block groups.\n");
2368 printf("Recommend to run 'btrfs check --init-extent-tree <dev>' after recovery\n");
2371 btrfs_commit_transaction(trans
, root
);
2375 free_recover_control(&rc
);