2 * Copyright (C) 2013 FUJITSU LIMITED. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #define _XOPEN_SOURCE 500
22 #include <stdio_ext.h>
24 #include <sys/types.h>
28 #include <uuid/uuid.h>
31 #include "kerncompat.h"
33 #include "radix-tree.h"
35 #include "extent-cache.h"
38 #include "transaction.h"
45 #define BTRFS_NUM_MIRRORS 2
47 struct recover_control
{
55 u64 chunk_root_generation
;
57 struct btrfs_fs_devices
*fs_devices
;
59 struct cache_tree chunk
;
60 struct block_group_tree bg
;
61 struct device_extent_tree devext
;
62 struct cache_tree eb_cache
;
64 struct list_head good_chunks
;
65 struct list_head bad_chunks
;
66 struct list_head unrepaired_chunks
;
67 pthread_mutex_t rc_lock
;
70 struct extent_record
{
71 struct cache_extent cache
;
73 u8 csum
[BTRFS_CSUM_SIZE
];
74 struct btrfs_device
*devices
[BTRFS_NUM_MIRRORS
];
75 u64 offsets
[BTRFS_NUM_MIRRORS
];
80 struct recover_control
*rc
;
81 struct btrfs_device
*dev
;
85 static struct extent_record
*btrfs_new_extent_record(struct extent_buffer
*eb
)
87 struct extent_record
*rec
;
89 rec
= malloc(sizeof(*rec
));
91 fprintf(stderr
, "Fail to allocate memory for extent record.\n");
95 memset(rec
, 0, sizeof(*rec
));
96 rec
->cache
.start
= btrfs_header_bytenr(eb
);
97 rec
->cache
.size
= eb
->len
;
98 rec
->generation
= btrfs_header_generation(eb
);
99 read_extent_buffer(eb
, rec
->csum
, (unsigned long)btrfs_header_csum(eb
),
104 static int process_extent_buffer(struct cache_tree
*eb_cache
,
105 struct extent_buffer
*eb
,
106 struct btrfs_device
*device
, u64 offset
)
108 struct extent_record
*rec
;
109 struct extent_record
*exist
;
110 struct cache_extent
*cache
;
113 rec
= btrfs_new_extent_record(eb
);
114 if (!rec
->cache
.size
)
117 cache
= lookup_cache_extent(eb_cache
,
121 exist
= container_of(cache
, struct extent_record
, cache
);
123 if (exist
->generation
> rec
->generation
)
125 if (exist
->generation
== rec
->generation
) {
126 if (exist
->cache
.start
!= rec
->cache
.start
||
127 exist
->cache
.size
!= rec
->cache
.size
||
128 memcmp(exist
->csum
, rec
->csum
, BTRFS_CSUM_SIZE
)) {
131 BUG_ON(exist
->nmirrors
>= BTRFS_NUM_MIRRORS
);
132 exist
->devices
[exist
->nmirrors
] = device
;
133 exist
->offsets
[exist
->nmirrors
] = offset
;
138 remove_cache_extent(eb_cache
, cache
);
143 rec
->devices
[0] = device
;
144 rec
->offsets
[0] = offset
;
146 ret
= insert_cache_extent(eb_cache
, &rec
->cache
);
155 static void free_extent_record(struct cache_extent
*cache
)
157 struct extent_record
*er
;
159 er
= container_of(cache
, struct extent_record
, cache
);
163 FREE_EXTENT_CACHE_BASED_TREE(extent_record
, free_extent_record
);
165 static struct btrfs_chunk
*create_chunk_item(struct chunk_record
*record
)
167 struct btrfs_chunk
*ret
;
168 struct btrfs_stripe
*chunk_stripe
;
171 if (!record
|| record
->num_stripes
== 0)
173 ret
= malloc(btrfs_chunk_item_size(record
->num_stripes
));
176 btrfs_set_stack_chunk_length(ret
, record
->length
);
177 btrfs_set_stack_chunk_owner(ret
, record
->owner
);
178 btrfs_set_stack_chunk_stripe_len(ret
, record
->stripe_len
);
179 btrfs_set_stack_chunk_type(ret
, record
->type_flags
);
180 btrfs_set_stack_chunk_io_align(ret
, record
->io_align
);
181 btrfs_set_stack_chunk_io_width(ret
, record
->io_width
);
182 btrfs_set_stack_chunk_sector_size(ret
, record
->sector_size
);
183 btrfs_set_stack_chunk_num_stripes(ret
, record
->num_stripes
);
184 btrfs_set_stack_chunk_sub_stripes(ret
, record
->sub_stripes
);
185 for (i
= 0, chunk_stripe
= &ret
->stripe
; i
< record
->num_stripes
;
186 i
++, chunk_stripe
++) {
187 btrfs_set_stack_stripe_devid(chunk_stripe
,
188 record
->stripes
[i
].devid
);
189 btrfs_set_stack_stripe_offset(chunk_stripe
,
190 record
->stripes
[i
].offset
);
191 memcpy(chunk_stripe
->dev_uuid
, record
->stripes
[i
].dev_uuid
,
197 static void init_recover_control(struct recover_control
*rc
, int verbose
,
200 memset(rc
, 0, sizeof(struct recover_control
));
201 cache_tree_init(&rc
->chunk
);
202 cache_tree_init(&rc
->eb_cache
);
203 block_group_tree_init(&rc
->bg
);
204 device_extent_tree_init(&rc
->devext
);
206 INIT_LIST_HEAD(&rc
->good_chunks
);
207 INIT_LIST_HEAD(&rc
->bad_chunks
);
208 INIT_LIST_HEAD(&rc
->unrepaired_chunks
);
210 rc
->verbose
= verbose
;
212 pthread_mutex_init(&rc
->rc_lock
, NULL
);
215 static void free_recover_control(struct recover_control
*rc
)
217 free_block_group_tree(&rc
->bg
);
218 free_chunk_cache_tree(&rc
->chunk
);
219 free_device_extent_tree(&rc
->devext
);
220 free_extent_record_tree(&rc
->eb_cache
);
221 pthread_mutex_destroy(&rc
->rc_lock
);
224 static int process_block_group_item(struct block_group_tree
*bg_cache
,
225 struct extent_buffer
*leaf
,
226 struct btrfs_key
*key
, int slot
)
228 struct block_group_record
*rec
;
229 struct block_group_record
*exist
;
230 struct cache_extent
*cache
;
233 rec
= btrfs_new_block_group_record(leaf
, key
, slot
);
234 if (!rec
->cache
.size
)
237 cache
= lookup_cache_extent(&bg_cache
->tree
,
241 exist
= container_of(cache
, struct block_group_record
, cache
);
243 /*check the generation and replace if needed*/
244 if (exist
->generation
> rec
->generation
)
246 if (exist
->generation
== rec
->generation
) {
247 int offset
= offsetof(struct block_group_record
,
250 * According to the current kernel code, the following
251 * case is impossble, or there is something wrong in
254 if (memcmp(((void *)exist
) + offset
,
255 ((void *)rec
) + offset
,
256 sizeof(*rec
) - offset
))
260 remove_cache_extent(&bg_cache
->tree
, cache
);
261 list_del_init(&exist
->list
);
264 * We must do seach again to avoid the following cache.
265 * /--old bg 1--//--old bg 2--/
271 ret
= insert_block_group_record(bg_cache
, rec
);
280 static int process_chunk_item(struct cache_tree
*chunk_cache
,
281 struct extent_buffer
*leaf
, struct btrfs_key
*key
,
284 struct chunk_record
*rec
;
285 struct chunk_record
*exist
;
286 struct cache_extent
*cache
;
289 rec
= btrfs_new_chunk_record(leaf
, key
, slot
);
290 if (!rec
->cache
.size
)
293 cache
= lookup_cache_extent(chunk_cache
, rec
->offset
, rec
->length
);
295 exist
= container_of(cache
, struct chunk_record
, cache
);
297 if (exist
->generation
> rec
->generation
)
299 if (exist
->generation
== rec
->generation
) {
300 int num_stripes
= rec
->num_stripes
;
301 int rec_size
= btrfs_chunk_record_size(num_stripes
);
302 int offset
= offsetof(struct chunk_record
, generation
);
304 if (exist
->num_stripes
!= rec
->num_stripes
||
305 memcmp(((void *)exist
) + offset
,
306 ((void *)rec
) + offset
,
311 remove_cache_extent(chunk_cache
, cache
);
315 ret
= insert_cache_extent(chunk_cache
, &rec
->cache
);
324 static int process_device_extent_item(struct device_extent_tree
*devext_cache
,
325 struct extent_buffer
*leaf
,
326 struct btrfs_key
*key
, int slot
)
328 struct device_extent_record
*rec
;
329 struct device_extent_record
*exist
;
330 struct cache_extent
*cache
;
333 rec
= btrfs_new_device_extent_record(leaf
, key
, slot
);
334 if (!rec
->cache
.size
)
337 cache
= lookup_cache_extent2(&devext_cache
->tree
,
342 exist
= container_of(cache
, struct device_extent_record
, cache
);
343 if (exist
->generation
> rec
->generation
)
345 if (exist
->generation
== rec
->generation
) {
346 int offset
= offsetof(struct device_extent_record
,
348 if (memcmp(((void *)exist
) + offset
,
349 ((void *)rec
) + offset
,
350 sizeof(*rec
) - offset
))
354 remove_cache_extent(&devext_cache
->tree
, cache
);
355 list_del_init(&exist
->chunk_list
);
356 list_del_init(&exist
->device_list
);
361 ret
= insert_device_extent_record(devext_cache
, rec
);
370 static void print_block_group_info(struct block_group_record
*rec
, char *prefix
)
373 printf("%s", prefix
);
374 printf("Block Group: start = %llu, len = %llu, flag = %llx\n",
375 rec
->objectid
, rec
->offset
, rec
->flags
);
378 static void print_block_group_tree(struct block_group_tree
*tree
)
380 struct cache_extent
*cache
;
381 struct block_group_record
*rec
;
383 printf("All Block Groups:\n");
384 for (cache
= first_cache_extent(&tree
->tree
); cache
;
385 cache
= next_cache_extent(cache
)) {
386 rec
= container_of(cache
, struct block_group_record
, cache
);
387 print_block_group_info(rec
, "\t");
392 static void print_stripe_info(struct stripe
*data
, char *prefix1
, char *prefix2
,
396 printf("%s", prefix1
);
398 printf("%s", prefix2
);
399 printf("[%2d] Stripe: devid = %llu, offset = %llu\n",
400 index
, data
->devid
, data
->offset
);
403 static void print_chunk_self_info(struct chunk_record
*rec
, char *prefix
)
408 printf("%s", prefix
);
409 printf("Chunk: start = %llu, len = %llu, type = %llx, num_stripes = %u\n",
410 rec
->offset
, rec
->length
, rec
->type_flags
, rec
->num_stripes
);
412 printf("%s", prefix
);
413 printf(" Stripes list:\n");
414 for (i
= 0; i
< rec
->num_stripes
; i
++)
415 print_stripe_info(&rec
->stripes
[i
], prefix
, " ", i
);
418 static void print_chunk_tree(struct cache_tree
*tree
)
420 struct cache_extent
*n
;
421 struct chunk_record
*entry
;
423 printf("All Chunks:\n");
424 for (n
= first_cache_extent(tree
); n
;
425 n
= next_cache_extent(n
)) {
426 entry
= container_of(n
, struct chunk_record
, cache
);
427 print_chunk_self_info(entry
, "\t");
432 static void print_device_extent_info(struct device_extent_record
*rec
,
436 printf("%s", prefix
);
437 printf("Device extent: devid = %llu, start = %llu, len = %llu, chunk offset = %llu\n",
438 rec
->objectid
, rec
->offset
, rec
->length
, rec
->chunk_offset
);
441 static void print_device_extent_tree(struct device_extent_tree
*tree
)
443 struct cache_extent
*n
;
444 struct device_extent_record
*entry
;
446 printf("All Device Extents:\n");
447 for (n
= first_cache_extent(&tree
->tree
); n
;
448 n
= next_cache_extent(n
)) {
449 entry
= container_of(n
, struct device_extent_record
, cache
);
450 print_device_extent_info(entry
, "\t");
455 static void print_device_info(struct btrfs_device
*device
, char *prefix
)
458 printf("%s", prefix
);
459 printf("Device: id = %llu, name = %s\n",
460 device
->devid
, device
->name
);
463 static void print_all_devices(struct list_head
*devices
)
465 struct btrfs_device
*dev
;
467 printf("All Devices:\n");
468 list_for_each_entry(dev
, devices
, dev_list
)
469 print_device_info(dev
, "\t");
473 static void print_scan_result(struct recover_control
*rc
)
478 printf("DEVICE SCAN RESULT:\n");
479 printf("Filesystem Information:\n");
480 printf("\tsectorsize: %d\n", rc
->sectorsize
);
481 printf("\tleafsize: %d\n", rc
->leafsize
);
482 printf("\ttree root generation: %llu\n", rc
->generation
);
483 printf("\tchunk root generation: %llu\n", rc
->chunk_root_generation
);
486 print_all_devices(&rc
->fs_devices
->devices
);
487 print_block_group_tree(&rc
->bg
);
488 print_chunk_tree(&rc
->chunk
);
489 print_device_extent_tree(&rc
->devext
);
492 static void print_chunk_info(struct chunk_record
*chunk
, char *prefix
)
494 struct device_extent_record
*devext
;
497 print_chunk_self_info(chunk
, prefix
);
499 printf("%s", prefix
);
501 print_block_group_info(chunk
->bg_rec
, " ");
503 printf(" No block group.\n");
505 printf("%s", prefix
);
506 if (list_empty(&chunk
->dextents
)) {
507 printf(" No device extent.\n");
509 printf(" Device extent list:\n");
511 list_for_each_entry(devext
, &chunk
->dextents
, chunk_list
) {
513 printf("%s", prefix
);
514 printf("%s[%2d]", " ", i
);
515 print_device_extent_info(devext
, NULL
);
521 static void print_check_result(struct recover_control
*rc
)
523 struct chunk_record
*chunk
;
524 struct block_group_record
*bg
;
525 struct device_extent_record
*devext
;
533 printf("CHECK RESULT:\n");
534 printf("Healthy Chunks:\n");
535 list_for_each_entry(chunk
, &rc
->good_chunks
, list
) {
536 print_chunk_info(chunk
, " ");
540 printf("Bad Chunks:\n");
541 list_for_each_entry(chunk
, &rc
->bad_chunks
, list
) {
542 print_chunk_info(chunk
, " ");
547 printf("Total Chunks:\t%d\n", total
);
548 printf(" Heathy:\t%d\n", good
);
549 printf(" Bad:\t%d\n", bad
);
552 printf("Orphan Block Groups:\n");
553 list_for_each_entry(bg
, &rc
->bg
.block_groups
, list
)
554 print_block_group_info(bg
, " ");
557 printf("Orphan Device Extents:\n");
558 list_for_each_entry(devext
, &rc
->devext
.no_chunk_orphans
, chunk_list
)
559 print_device_extent_info(devext
, " ");
562 static int check_chunk_by_metadata(struct recover_control
*rc
,
563 struct btrfs_root
*root
,
564 struct chunk_record
*chunk
, int bg_only
)
569 struct btrfs_path path
;
570 struct btrfs_key key
;
571 struct btrfs_root
*dev_root
;
572 struct stripe
*stripe
;
573 struct btrfs_dev_extent
*dev_extent
;
574 struct btrfs_block_group_item
*bg_ptr
;
575 struct extent_buffer
*l
;
577 btrfs_init_path(&path
);
582 dev_root
= root
->fs_info
->dev_root
;
583 for (i
= 0; i
< chunk
->num_stripes
; i
++) {
584 stripe
= &chunk
->stripes
[i
];
586 key
.objectid
= stripe
->devid
;
587 key
.offset
= stripe
->offset
;
588 key
.type
= BTRFS_DEV_EXTENT_KEY
;
590 ret
= btrfs_search_slot(NULL
, dev_root
, &key
, &path
, 0, 0);
592 fprintf(stderr
, "Search device extent failed(%d)\n",
594 btrfs_release_path(&path
);
596 } else if (ret
> 0) {
599 "No device extent[%llu, %llu]\n",
600 stripe
->devid
, stripe
->offset
);
601 btrfs_release_path(&path
);
605 slot
= path
.slots
[0];
606 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
608 btrfs_dev_extent_chunk_offset(l
, dev_extent
)) {
611 "Device tree unmatch with chunks dev_extent[%llu, %llu], chunk[%llu, %llu]\n",
612 btrfs_dev_extent_chunk_offset(l
,
614 btrfs_dev_extent_length(l
, dev_extent
),
615 chunk
->offset
, chunk
->length
);
616 btrfs_release_path(&path
);
619 btrfs_release_path(&path
);
623 key
.objectid
= chunk
->offset
;
624 key
.type
= BTRFS_BLOCK_GROUP_ITEM_KEY
;
625 key
.offset
= chunk
->length
;
627 ret
= btrfs_search_slot(NULL
, root
->fs_info
->extent_root
, &key
, &path
,
630 fprintf(stderr
, "Search block group failed(%d)\n", ret
);
631 btrfs_release_path(&path
);
633 } else if (ret
> 0) {
635 fprintf(stderr
, "No block group[%llu, %llu]\n",
636 key
.objectid
, key
.offset
);
637 btrfs_release_path(&path
);
642 slot
= path
.slots
[0];
643 bg_ptr
= btrfs_item_ptr(l
, slot
, struct btrfs_block_group_item
);
644 if (chunk
->type_flags
!= btrfs_disk_block_group_flags(l
, bg_ptr
)) {
647 "Chunk[%llu, %llu]'s type(%llu) is differemt with Block Group's type(%llu)\n",
648 chunk
->offset
, chunk
->length
, chunk
->type_flags
,
649 btrfs_disk_block_group_flags(l
, bg_ptr
));
650 btrfs_release_path(&path
);
653 btrfs_release_path(&path
);
657 static int check_all_chunks_by_metadata(struct recover_control
*rc
,
658 struct btrfs_root
*root
)
660 struct chunk_record
*chunk
;
661 struct chunk_record
*next
;
662 LIST_HEAD(orphan_chunks
);
666 list_for_each_entry_safe(chunk
, next
, &rc
->good_chunks
, list
) {
667 err
= check_chunk_by_metadata(rc
, root
, chunk
, 0);
670 list_move_tail(&chunk
->list
, &orphan_chunks
);
671 else if (err
&& !ret
)
676 list_for_each_entry_safe(chunk
, next
, &rc
->unrepaired_chunks
, list
) {
677 err
= check_chunk_by_metadata(rc
, root
, chunk
, 1);
679 list_move_tail(&chunk
->list
, &orphan_chunks
);
680 else if (err
&& !ret
)
684 list_for_each_entry(chunk
, &rc
->bad_chunks
, list
) {
685 err
= check_chunk_by_metadata(rc
, root
, chunk
, 1);
686 if (err
!= -ENOENT
&& !ret
)
687 ret
= err
? err
: -EINVAL
;
689 list_splice(&orphan_chunks
, &rc
->bad_chunks
);
693 static int extract_metadata_record(struct recover_control
*rc
,
694 struct extent_buffer
*leaf
)
696 struct btrfs_key key
;
701 nritems
= btrfs_header_nritems(leaf
);
702 for (i
= 0; i
< nritems
; i
++) {
703 btrfs_item_key_to_cpu(leaf
, &key
, i
);
705 case BTRFS_BLOCK_GROUP_ITEM_KEY
:
706 pthread_mutex_lock(&rc
->rc_lock
);
707 ret
= process_block_group_item(&rc
->bg
, leaf
, &key
, i
);
708 pthread_mutex_unlock(&rc
->rc_lock
);
710 case BTRFS_CHUNK_ITEM_KEY
:
711 pthread_mutex_lock(&rc
->rc_lock
);
712 ret
= process_chunk_item(&rc
->chunk
, leaf
, &key
, i
);
713 pthread_mutex_unlock(&rc
->rc_lock
);
715 case BTRFS_DEV_EXTENT_KEY
:
716 pthread_mutex_lock(&rc
->rc_lock
);
717 ret
= process_device_extent_item(&rc
->devext
, leaf
,
719 pthread_mutex_unlock(&rc
->rc_lock
);
728 static inline int is_super_block_address(u64 offset
)
732 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
733 if (offset
== btrfs_sb_offset(i
))
739 static int scan_one_device(void *dev_scan_struct
)
741 struct extent_buffer
*buf
;
744 struct device_scan
*dev_scan
= (struct device_scan
*)dev_scan_struct
;
745 struct recover_control
*rc
= dev_scan
->rc
;
746 struct btrfs_device
*device
= dev_scan
->dev
;
747 int fd
= dev_scan
->fd
;
749 ret
= pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS
, NULL
);
753 buf
= malloc(sizeof(*buf
) + rc
->leafsize
);
756 buf
->len
= rc
->leafsize
;
760 if (is_super_block_address(bytenr
))
761 bytenr
+= rc
->sectorsize
;
763 if (pread64(fd
, buf
->data
, rc
->leafsize
, bytenr
) <
767 if (memcmp_extent_buffer(buf
, rc
->fs_devices
->fsid
,
770 bytenr
+= rc
->sectorsize
;
774 if (verify_tree_block_csum_silent(buf
, rc
->csum_size
)) {
775 bytenr
+= rc
->sectorsize
;
779 pthread_mutex_lock(&rc
->rc_lock
);
780 ret
= process_extent_buffer(&rc
->eb_cache
, buf
, device
, bytenr
);
781 pthread_mutex_unlock(&rc
->rc_lock
);
785 if (btrfs_header_level(buf
) != 0)
788 switch (btrfs_header_owner(buf
)) {
789 case BTRFS_EXTENT_TREE_OBJECTID
:
790 case BTRFS_DEV_TREE_OBJECTID
:
791 /* different tree use different generation */
792 if (btrfs_header_generation(buf
) > rc
->generation
)
794 ret
= extract_metadata_record(rc
, buf
);
798 case BTRFS_CHUNK_TREE_OBJECTID
:
799 if (btrfs_header_generation(buf
) >
800 rc
->chunk_root_generation
)
802 ret
= extract_metadata_record(rc
, buf
);
808 bytenr
+= rc
->leafsize
;
816 static int scan_devices(struct recover_control
*rc
)
820 struct btrfs_device
*dev
;
821 struct device_scan
*dev_scans
;
830 list_for_each_entry(dev
, &rc
->fs_devices
->devices
, dev_list
)
832 dev_scans
= (struct device_scan
*)malloc(sizeof(struct device_scan
)
836 t_scans
= (pthread_t
*)malloc(sizeof(pthread_t
) * devnr
);
839 t_rets
= (int *)malloc(sizeof(int) * devnr
);
843 list_for_each_entry(dev
, &rc
->fs_devices
->devices
, dev_list
) {
844 fd
= open(dev
->name
, O_RDONLY
);
846 fprintf(stderr
, "Failed to open device %s\n",
850 dev_scans
[devidx
].rc
= rc
;
851 dev_scans
[devidx
].dev
= dev
;
852 dev_scans
[devidx
].fd
= fd
;
853 ret
= pthread_create(&t_scans
[devidx
], NULL
,
854 (void *)scan_one_device
,
855 (void *)&dev_scans
[devidx
]);
858 cancel_to
= devidx
- 1;
866 ret
= pthread_join(t_scans
[i
], (void **)&t_rets
[i
]);
867 if (ret
|| t_rets
[i
]) {
870 cancel_to
= devnr
- 1;
876 while (cancel_from
<= cancel_to
) {
877 pthread_cancel(t_scans
[cancel_from
]);
886 static int build_device_map_by_chunk_record(struct btrfs_root
*root
,
887 struct chunk_record
*chunk
)
892 u8 uuid
[BTRFS_UUID_SIZE
];
894 struct btrfs_mapping_tree
*map_tree
;
895 struct map_lookup
*map
;
896 struct stripe
*stripe
;
898 map_tree
= &root
->fs_info
->mapping_tree
;
899 num_stripes
= chunk
->num_stripes
;
900 map
= malloc(btrfs_map_lookup_size(num_stripes
));
903 map
->ce
.start
= chunk
->offset
;
904 map
->ce
.size
= chunk
->length
;
905 map
->num_stripes
= num_stripes
;
906 map
->io_width
= chunk
->io_width
;
907 map
->io_align
= chunk
->io_align
;
908 map
->sector_size
= chunk
->sector_size
;
909 map
->stripe_len
= chunk
->stripe_len
;
910 map
->type
= chunk
->type_flags
;
911 map
->sub_stripes
= chunk
->sub_stripes
;
913 for (i
= 0, stripe
= chunk
->stripes
; i
< num_stripes
; i
++, stripe
++) {
914 devid
= stripe
->devid
;
915 memcpy(uuid
, stripe
->dev_uuid
, BTRFS_UUID_SIZE
);
916 map
->stripes
[i
].physical
= stripe
->offset
;
917 map
->stripes
[i
].dev
= btrfs_find_device(root
, devid
,
919 if (!map
->stripes
[i
].dev
) {
925 ret
= insert_cache_extent(&map_tree
->cache_tree
, &map
->ce
);
929 static int build_device_maps_by_chunk_records(struct recover_control
*rc
,
930 struct btrfs_root
*root
)
933 struct chunk_record
*chunk
;
935 list_for_each_entry(chunk
, &rc
->good_chunks
, list
) {
936 ret
= build_device_map_by_chunk_record(root
, chunk
);
943 static int block_group_remove_all_extent_items(struct btrfs_trans_handle
*trans
,
944 struct btrfs_root
*root
,
945 struct block_group_record
*bg
)
947 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
948 struct btrfs_key key
;
949 struct btrfs_path path
;
950 struct extent_buffer
*leaf
;
951 u64 start
= bg
->objectid
;
952 u64 end
= bg
->objectid
+ bg
->offset
;
959 btrfs_init_path(&path
);
960 root
= root
->fs_info
->extent_root
;
962 key
.objectid
= start
;
964 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
966 ret
= btrfs_search_slot(trans
, root
, &key
, &path
, -1, 1);
972 leaf
= path
.nodes
[0];
973 nitems
= btrfs_header_nritems(leaf
);
975 /* The tree is empty. */
980 if (path
.slots
[0] >= nitems
) {
981 ret
= btrfs_next_leaf(root
, &path
);
988 leaf
= path
.nodes
[0];
989 btrfs_item_key_to_cpu(leaf
, &key
, 0);
990 if (key
.objectid
>= end
)
992 btrfs_release_path(&path
);
998 for (i
= path
.slots
[0]; i
< nitems
; i
++) {
999 btrfs_item_key_to_cpu(leaf
, &key
, i
);
1000 if (key
.objectid
>= end
)
1003 if (key
.type
== BTRFS_BLOCK_GROUP_ITEM_KEY
) {
1013 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
||
1014 key
.type
== BTRFS_METADATA_ITEM_KEY
) {
1015 old_val
= btrfs_super_bytes_used(fs_info
->super_copy
);
1016 if (key
.type
== BTRFS_METADATA_ITEM_KEY
)
1017 old_val
+= root
->leafsize
;
1019 old_val
+= key
.offset
;
1020 btrfs_set_super_bytes_used(fs_info
->super_copy
,
1026 ret
= btrfs_del_items(trans
, root
, &path
, del_s
, del_nr
);
1031 if (key
.objectid
< end
) {
1032 if (key
.type
== BTRFS_BLOCK_GROUP_ITEM_KEY
) {
1033 key
.objectid
+= root
->sectorsize
;
1034 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1037 btrfs_release_path(&path
);
1041 btrfs_release_path(&path
);
1045 static int block_group_free_all_extent(struct btrfs_trans_handle
*trans
,
1046 struct btrfs_root
*root
,
1047 struct block_group_record
*bg
)
1049 struct btrfs_block_group_cache
*cache
;
1050 struct btrfs_fs_info
*info
;
1054 info
= root
->fs_info
;
1055 cache
= btrfs_lookup_block_group(info
, bg
->objectid
);
1059 start
= cache
->key
.objectid
;
1060 end
= start
+ cache
->key
.offset
- 1;
1062 set_extent_bits(&info
->block_group_cache
, start
, end
,
1063 BLOCK_GROUP_DIRTY
, GFP_NOFS
);
1064 set_extent_dirty(&info
->free_space_cache
, start
, end
, GFP_NOFS
);
1066 btrfs_set_block_group_used(&cache
->item
, 0);
1071 static int remove_chunk_extent_item(struct btrfs_trans_handle
*trans
,
1072 struct recover_control
*rc
,
1073 struct btrfs_root
*root
)
1075 struct chunk_record
*chunk
;
1078 list_for_each_entry(chunk
, &rc
->good_chunks
, list
) {
1079 if (!(chunk
->type_flags
& BTRFS_BLOCK_GROUP_SYSTEM
))
1081 ret
= block_group_remove_all_extent_items(trans
, root
,
1086 ret
= block_group_free_all_extent(trans
, root
, chunk
->bg_rec
);
1093 static int __rebuild_chunk_root(struct btrfs_trans_handle
*trans
,
1094 struct recover_control
*rc
,
1095 struct btrfs_root
*root
)
1098 struct btrfs_device
*dev
;
1099 struct extent_buffer
*cow
;
1100 struct btrfs_disk_key disk_key
;
1103 list_for_each_entry(dev
, &rc
->fs_devices
->devices
, dev_list
) {
1104 if (min_devid
> dev
->devid
)
1105 min_devid
= dev
->devid
;
1107 disk_key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1108 disk_key
.type
= BTRFS_DEV_ITEM_KEY
;
1109 disk_key
.offset
= min_devid
;
1111 cow
= btrfs_alloc_free_block(trans
, root
, root
->nodesize
,
1112 BTRFS_CHUNK_TREE_OBJECTID
,
1113 &disk_key
, 0, 0, 0);
1114 btrfs_set_header_bytenr(cow
, cow
->start
);
1115 btrfs_set_header_generation(cow
, trans
->transid
);
1116 btrfs_set_header_nritems(cow
, 0);
1117 btrfs_set_header_level(cow
, 0);
1118 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
1119 btrfs_set_header_owner(cow
, BTRFS_CHUNK_TREE_OBJECTID
);
1120 write_extent_buffer(cow
, root
->fs_info
->fsid
,
1121 btrfs_header_fsid(), BTRFS_FSID_SIZE
);
1123 write_extent_buffer(cow
, root
->fs_info
->chunk_tree_uuid
,
1124 btrfs_header_chunk_tree_uuid(cow
),
1128 btrfs_mark_buffer_dirty(cow
);
1133 static int __rebuild_device_items(struct btrfs_trans_handle
*trans
,
1134 struct recover_control
*rc
,
1135 struct btrfs_root
*root
)
1137 struct btrfs_device
*dev
;
1138 struct btrfs_key key
;
1139 struct btrfs_dev_item
*dev_item
;
1142 dev_item
= malloc(sizeof(struct btrfs_dev_item
));
1146 list_for_each_entry(dev
, &rc
->fs_devices
->devices
, dev_list
) {
1147 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1148 key
.type
= BTRFS_DEV_ITEM_KEY
;
1149 key
.offset
= dev
->devid
;
1151 btrfs_set_stack_device_generation(dev_item
, 0);
1152 btrfs_set_stack_device_type(dev_item
, dev
->type
);
1153 btrfs_set_stack_device_id(dev_item
, dev
->devid
);
1154 btrfs_set_stack_device_total_bytes(dev_item
, dev
->total_bytes
);
1155 btrfs_set_stack_device_bytes_used(dev_item
, dev
->bytes_used
);
1156 btrfs_set_stack_device_io_align(dev_item
, dev
->io_align
);
1157 btrfs_set_stack_device_io_width(dev_item
, dev
->io_width
);
1158 btrfs_set_stack_device_sector_size(dev_item
, dev
->sector_size
);
1159 memcpy(dev_item
->uuid
, dev
->uuid
, BTRFS_UUID_SIZE
);
1160 memcpy(dev_item
->fsid
, dev
->fs_devices
->fsid
, BTRFS_UUID_SIZE
);
1162 ret
= btrfs_insert_item(trans
, root
, &key
,
1163 dev_item
, sizeof(*dev_item
));
1170 static int __rebuild_chunk_items(struct btrfs_trans_handle
*trans
,
1171 struct recover_control
*rc
,
1172 struct btrfs_root
*root
)
1174 struct btrfs_key key
;
1175 struct btrfs_chunk
*chunk
= NULL
;
1176 struct btrfs_root
*chunk_root
;
1177 struct chunk_record
*chunk_rec
;
1180 chunk_root
= root
->fs_info
->chunk_root
;
1182 list_for_each_entry(chunk_rec
, &rc
->good_chunks
, list
) {
1183 chunk
= create_chunk_item(chunk_rec
);
1187 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1188 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1189 key
.offset
= chunk_rec
->offset
;
1191 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
,
1192 btrfs_chunk_item_size(chunk
->num_stripes
));
1200 static int rebuild_chunk_tree(struct btrfs_trans_handle
*trans
,
1201 struct recover_control
*rc
,
1202 struct btrfs_root
*root
)
1206 root
= root
->fs_info
->chunk_root
;
1208 ret
= __rebuild_chunk_root(trans
, rc
, root
);
1212 ret
= __rebuild_device_items(trans
, rc
, root
);
1216 ret
= __rebuild_chunk_items(trans
, rc
, root
);
1221 static int rebuild_sys_array(struct recover_control
*rc
,
1222 struct btrfs_root
*root
)
1224 struct btrfs_chunk
*chunk
;
1225 struct btrfs_key key
;
1226 struct chunk_record
*chunk_rec
;
1230 btrfs_set_super_sys_array_size(root
->fs_info
->super_copy
, 0);
1232 list_for_each_entry(chunk_rec
, &rc
->good_chunks
, list
) {
1233 if (!(chunk_rec
->type_flags
& BTRFS_BLOCK_GROUP_SYSTEM
))
1236 num_stripes
= chunk_rec
->num_stripes
;
1237 chunk
= create_chunk_item(chunk_rec
);
1243 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1244 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1245 key
.offset
= chunk_rec
->offset
;
1247 ret
= btrfs_add_system_chunk(NULL
, root
, &key
, chunk
,
1248 btrfs_chunk_item_size(num_stripes
));
1257 static struct btrfs_root
*
1258 open_ctree_with_broken_chunk(struct recover_control
*rc
)
1260 struct btrfs_fs_info
*fs_info
;
1261 struct btrfs_super_block
*disk_super
;
1262 struct extent_buffer
*eb
;
1269 fs_info
= btrfs_new_fs_info(1, BTRFS_SUPER_INFO_OFFSET
);
1271 fprintf(stderr
, "Failed to allocate memory for fs_info\n");
1272 return ERR_PTR(-ENOMEM
);
1274 fs_info
->is_chunk_recover
= 1;
1276 fs_info
->fs_devices
= rc
->fs_devices
;
1277 ret
= btrfs_open_devices(fs_info
->fs_devices
, O_RDWR
);
1281 disk_super
= fs_info
->super_copy
;
1282 ret
= btrfs_read_dev_super(fs_info
->fs_devices
->latest_bdev
,
1283 disk_super
, fs_info
->super_bytenr
);
1285 fprintf(stderr
, "No valid btrfs found\n");
1289 memcpy(fs_info
->fsid
, &disk_super
->fsid
, BTRFS_FSID_SIZE
);
1291 ret
= btrfs_check_fs_compatibility(disk_super
, 1);
1295 nodesize
= btrfs_super_nodesize(disk_super
);
1296 leafsize
= btrfs_super_leafsize(disk_super
);
1297 sectorsize
= btrfs_super_sectorsize(disk_super
);
1298 stripesize
= btrfs_super_stripesize(disk_super
);
1300 __setup_root(nodesize
, leafsize
, sectorsize
, stripesize
,
1301 fs_info
->chunk_root
, fs_info
, BTRFS_CHUNK_TREE_OBJECTID
);
1303 ret
= build_device_maps_by_chunk_records(rc
, fs_info
->chunk_root
);
1307 ret
= btrfs_setup_all_roots(fs_info
, 0, 0);
1311 eb
= fs_info
->tree_root
->node
;
1312 read_extent_buffer(eb
, fs_info
->chunk_tree_uuid
,
1313 btrfs_header_chunk_tree_uuid(eb
),
1316 return fs_info
->fs_root
;
1318 btrfs_release_all_roots(fs_info
);
1320 btrfs_cleanup_all_caches(fs_info
);
1322 btrfs_close_devices(fs_info
->fs_devices
);
1324 btrfs_free_fs_info(fs_info
);
1325 return ERR_PTR(ret
);
1328 static int recover_prepare(struct recover_control
*rc
, char *path
)
1332 struct btrfs_super_block
*sb
;
1333 struct btrfs_fs_devices
*fs_devices
;
1336 fd
= open(path
, O_RDONLY
);
1338 fprintf(stderr
, "open %s\n error.\n", path
);
1342 sb
= malloc(sizeof(struct btrfs_super_block
));
1344 fprintf(stderr
, "allocating memory for sb failed.\n");
1349 ret
= btrfs_read_dev_super(fd
, sb
, BTRFS_SUPER_INFO_OFFSET
);
1351 fprintf(stderr
, "read super block error\n");
1355 rc
->sectorsize
= btrfs_super_sectorsize(sb
);
1356 rc
->leafsize
= btrfs_super_leafsize(sb
);
1357 rc
->generation
= btrfs_super_generation(sb
);
1358 rc
->chunk_root_generation
= btrfs_super_chunk_root_generation(sb
);
1359 rc
->csum_size
= btrfs_super_csum_size(sb
);
1361 /* if seed, the result of scanning below will be partial */
1362 if (btrfs_super_flags(sb
) & BTRFS_SUPER_FLAG_SEEDING
) {
1363 fprintf(stderr
, "this device is seed device\n");
1368 ret
= btrfs_scan_fs_devices(fd
, path
, &fs_devices
, 0, 1);
1372 rc
->fs_devices
= fs_devices
;
1375 print_all_devices(&rc
->fs_devices
->devices
);
1384 static int btrfs_get_device_extents(u64 chunk_object
,
1385 struct list_head
*orphan_devexts
,
1386 struct list_head
*ret_list
)
1388 struct device_extent_record
*devext
;
1389 struct device_extent_record
*next
;
1392 list_for_each_entry_safe(devext
, next
, orphan_devexts
, chunk_list
) {
1393 if (devext
->chunk_offset
== chunk_object
) {
1394 list_move_tail(&devext
->chunk_list
, ret_list
);
1401 static int calc_num_stripes(u64 type
)
1403 if (type
& (BTRFS_BLOCK_GROUP_RAID0
|
1404 BTRFS_BLOCK_GROUP_RAID10
|
1405 BTRFS_BLOCK_GROUP_RAID5
|
1406 BTRFS_BLOCK_GROUP_RAID6
))
1408 else if (type
& (BTRFS_BLOCK_GROUP_RAID1
|
1409 BTRFS_BLOCK_GROUP_DUP
))
1415 static inline int calc_sub_nstripes(u64 type
)
1417 if (type
& BTRFS_BLOCK_GROUP_RAID10
)
1423 static int btrfs_verify_device_extents(struct block_group_record
*bg
,
1424 struct list_head
*devexts
, int ndevexts
)
1426 struct device_extent_record
*devext
;
1428 int expected_num_stripes
;
1430 expected_num_stripes
= calc_num_stripes(bg
->flags
);
1431 if (expected_num_stripes
&& expected_num_stripes
!= ndevexts
)
1434 strpie_length
= calc_stripe_length(bg
->flags
, bg
->offset
, ndevexts
);
1435 list_for_each_entry(devext
, devexts
, chunk_list
) {
1436 if (devext
->length
!= strpie_length
)
1442 static int btrfs_rebuild_unordered_chunk_stripes(struct recover_control
*rc
,
1443 struct chunk_record
*chunk
)
1445 struct device_extent_record
*devext
;
1446 struct btrfs_device
*device
;
1449 devext
= list_first_entry(&chunk
->dextents
, struct device_extent_record
,
1451 for (i
= 0; i
< chunk
->num_stripes
; i
++) {
1452 chunk
->stripes
[i
].devid
= devext
->objectid
;
1453 chunk
->stripes
[i
].offset
= devext
->offset
;
1454 device
= btrfs_find_device_by_devid(rc
->fs_devices
,
1459 BUG_ON(btrfs_find_device_by_devid(rc
->fs_devices
,
1462 memcpy(chunk
->stripes
[i
].dev_uuid
, device
->uuid
,
1464 devext
= list_next_entry(devext
, chunk_list
);
1469 static int btrfs_calc_stripe_index(struct chunk_record
*chunk
, u64 logical
)
1471 u64 offset
= logical
- chunk
->offset
;
1473 int nr_data_stripes
;
1476 stripe_nr
= offset
/ chunk
->stripe_len
;
1477 if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID0
) {
1478 index
= stripe_nr
% chunk
->num_stripes
;
1479 } else if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID10
) {
1480 index
= stripe_nr
% (chunk
->num_stripes
/ chunk
->sub_stripes
);
1481 index
*= chunk
->sub_stripes
;
1482 } else if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID5
) {
1483 nr_data_stripes
= chunk
->num_stripes
- 1;
1484 index
= stripe_nr
% nr_data_stripes
;
1485 stripe_nr
/= nr_data_stripes
;
1486 index
= (index
+ stripe_nr
) % chunk
->num_stripes
;
1487 } else if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID6
) {
1488 nr_data_stripes
= chunk
->num_stripes
- 2;
1489 index
= stripe_nr
% nr_data_stripes
;
1490 stripe_nr
/= nr_data_stripes
;
1491 index
= (index
+ stripe_nr
) % chunk
->num_stripes
;
1498 /* calc the logical offset which is the start of the next stripe. */
1499 static inline u64
btrfs_next_stripe_logical_offset(struct chunk_record
*chunk
,
1502 u64 offset
= logical
- chunk
->offset
;
1504 offset
/= chunk
->stripe_len
;
1505 offset
*= chunk
->stripe_len
;
1506 offset
+= chunk
->stripe_len
;
1508 return offset
+ chunk
->offset
;
1511 static int is_extent_record_in_device_extent(struct extent_record
*er
,
1512 struct device_extent_record
*dext
,
1517 for (i
= 0; i
< er
->nmirrors
; i
++) {
1518 if (er
->devices
[i
]->devid
== dext
->objectid
&&
1519 er
->offsets
[i
] >= dext
->offset
&&
1520 er
->offsets
[i
] < dext
->offset
+ dext
->length
) {
1529 btrfs_rebuild_ordered_meta_chunk_stripes(struct recover_control
*rc
,
1530 struct chunk_record
*chunk
)
1532 u64 start
= chunk
->offset
;
1533 u64 end
= chunk
->offset
+ chunk
->length
;
1534 struct cache_extent
*cache
;
1535 struct extent_record
*er
;
1536 struct device_extent_record
*devext
;
1537 struct device_extent_record
*next
;
1538 struct btrfs_device
*device
;
1544 cache
= lookup_cache_extent(&rc
->eb_cache
,
1545 start
, chunk
->length
);
1547 /* No used space, we can reorder the stripes freely. */
1548 ret
= btrfs_rebuild_unordered_chunk_stripes(rc
, chunk
);
1552 list_splice_init(&chunk
->dextents
, &devexts
);
1554 er
= container_of(cache
, struct extent_record
, cache
);
1555 index
= btrfs_calc_stripe_index(chunk
, er
->cache
.start
);
1556 if (chunk
->stripes
[index
].devid
)
1558 list_for_each_entry_safe(devext
, next
, &devexts
, chunk_list
) {
1559 if (is_extent_record_in_device_extent(er
, devext
, &mirror
)) {
1560 chunk
->stripes
[index
].devid
= devext
->objectid
;
1561 chunk
->stripes
[index
].offset
= devext
->offset
;
1562 memcpy(chunk
->stripes
[index
].dev_uuid
,
1563 er
->devices
[mirror
]->uuid
,
1566 list_move(&devext
->chunk_list
, &chunk
->dextents
);
1570 start
= btrfs_next_stripe_logical_offset(chunk
, er
->cache
.start
);
1572 goto no_extent_record
;
1574 cache
= lookup_cache_extent(&rc
->eb_cache
, start
, end
- start
);
1578 if (list_empty(&devexts
))
1581 if (chunk
->type_flags
& (BTRFS_BLOCK_GROUP_RAID5
|
1582 BTRFS_BLOCK_GROUP_RAID6
)) {
1583 /* Fixme: try to recover the order by the parity block. */
1584 list_splice_tail(&devexts
, &chunk
->dextents
);
1588 /* There is no data on the lost stripes, we can reorder them freely. */
1589 for (index
= 0; index
< chunk
->num_stripes
; index
++) {
1590 if (chunk
->stripes
[index
].devid
)
1593 devext
= list_first_entry(&devexts
,
1594 struct device_extent_record
,
1596 list_move(&devext
->chunk_list
, &chunk
->dextents
);
1598 chunk
->stripes
[index
].devid
= devext
->objectid
;
1599 chunk
->stripes
[index
].offset
= devext
->offset
;
1600 device
= btrfs_find_device_by_devid(rc
->fs_devices
,
1604 list_splice_tail(&devexts
, &chunk
->dextents
);
1607 BUG_ON(btrfs_find_device_by_devid(rc
->fs_devices
,
1610 memcpy(chunk
->stripes
[index
].dev_uuid
, device
->uuid
,
1616 #define BTRFS_ORDERED_RAID (BTRFS_BLOCK_GROUP_RAID0 | \
1617 BTRFS_BLOCK_GROUP_RAID10 | \
1618 BTRFS_BLOCK_GROUP_RAID5 | \
1619 BTRFS_BLOCK_GROUP_RAID6)
1621 static int btrfs_rebuild_chunk_stripes(struct recover_control
*rc
,
1622 struct chunk_record
*chunk
)
1627 * All the data in the system metadata chunk will be dropped,
1628 * so we need not guarantee that the data is right or not, that
1629 * is we can reorder the stripes in the system metadata chunk.
1631 if ((chunk
->type_flags
& BTRFS_BLOCK_GROUP_METADATA
) &&
1632 (chunk
->type_flags
& BTRFS_ORDERED_RAID
))
1633 ret
=btrfs_rebuild_ordered_meta_chunk_stripes(rc
, chunk
);
1634 else if ((chunk
->type_flags
& BTRFS_BLOCK_GROUP_DATA
) &&
1635 (chunk
->type_flags
& BTRFS_ORDERED_RAID
))
1636 ret
= 1; /* Be handled after the fs is opened. */
1638 ret
= btrfs_rebuild_unordered_chunk_stripes(rc
, chunk
);
1643 static int next_csum(struct btrfs_root
*root
,
1644 struct extent_buffer
**leaf
,
1645 struct btrfs_path
*path
,
1650 struct btrfs_key
*key
)
1653 struct btrfs_root
*csum_root
= root
->fs_info
->csum_root
;
1654 struct btrfs_csum_item
*csum_item
;
1655 u32 blocksize
= root
->sectorsize
;
1656 u16 csum_size
= btrfs_super_csum_size(root
->fs_info
->super_copy
);
1657 int csums_in_item
= btrfs_item_size_nr(*leaf
, *slot
) / csum_size
;
1659 if (*csum_offset
>= csums_in_item
) {
1662 if (*slot
>= btrfs_header_nritems(*leaf
)) {
1663 ret
= btrfs_next_leaf(csum_root
, path
);
1668 *leaf
= path
->nodes
[0];
1669 *slot
= path
->slots
[0];
1671 btrfs_item_key_to_cpu(*leaf
, key
, *slot
);
1674 if (key
->offset
+ (*csum_offset
) * blocksize
>= end
)
1676 csum_item
= btrfs_item_ptr(*leaf
, *slot
, struct btrfs_csum_item
);
1677 csum_item
= (struct btrfs_csum_item
*)((unsigned char *)csum_item
1678 + (*csum_offset
) * csum_size
);
1679 read_extent_buffer(*leaf
, tree_csum
,
1680 (unsigned long)csum_item
, csum_size
);
1684 static u64
calc_data_offset(struct btrfs_key
*key
,
1685 struct chunk_record
*chunk
,
1691 int logical_stripe_nr
;
1693 int nr_data_stripes
;
1695 data_offset
= key
->offset
+ csum_offset
* blocksize
- chunk
->offset
;
1696 nr_data_stripes
= chunk
->num_stripes
;
1698 if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID5
)
1699 nr_data_stripes
-= 1;
1700 else if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID6
)
1701 nr_data_stripes
-= 2;
1703 logical_stripe_nr
= data_offset
/ chunk
->stripe_len
;
1704 dev_stripe_nr
= logical_stripe_nr
/ nr_data_stripes
;
1706 data_offset
-= logical_stripe_nr
* chunk
->stripe_len
;
1707 data_offset
+= dev_stripe_nr
* chunk
->stripe_len
;
1709 return dev_offset
+ data_offset
;
1712 static int check_one_csum(int fd
, u64 start
, u32 len
, u32 tree_csum
)
1716 u32 csum_result
= ~(u32
)0;
1721 ret
= pread64(fd
, data
, len
, start
);
1722 if (ret
< 0 || ret
!= len
) {
1727 csum_result
= btrfs_csum_data(NULL
, data
, csum_result
, len
);
1728 btrfs_csum_final(csum_result
, (char *)&csum_result
);
1729 if (csum_result
!= tree_csum
)
1736 static u64
item_end_offset(struct btrfs_root
*root
, struct btrfs_key
*key
,
1737 struct extent_buffer
*leaf
, int slot
) {
1738 u32 blocksize
= root
->sectorsize
;
1739 u16 csum_size
= btrfs_super_csum_size(root
->fs_info
->super_copy
);
1741 u64 offset
= btrfs_item_size_nr(leaf
, slot
);
1742 offset
/= csum_size
;
1743 offset
*= blocksize
;
1744 offset
+= key
->offset
;
1749 static int insert_stripe(struct list_head
*devexts
,
1750 struct recover_control
*rc
,
1751 struct chunk_record
*chunk
,
1753 struct device_extent_record
*devext
;
1754 struct btrfs_device
*dev
;
1756 devext
= list_entry(devexts
->next
, struct device_extent_record
,
1758 dev
= btrfs_find_device_by_devid(rc
->fs_devices
, devext
->objectid
,
1762 BUG_ON(btrfs_find_device_by_devid(rc
->fs_devices
, devext
->objectid
,
1765 chunk
->stripes
[index
].devid
= devext
->objectid
;
1766 chunk
->stripes
[index
].offset
= devext
->offset
;
1767 memcpy(chunk
->stripes
[index
].dev_uuid
, dev
->uuid
, BTRFS_UUID_SIZE
);
1769 list_move(&devext
->chunk_list
, &chunk
->dextents
);
1774 #define EQUAL_STRIPE (1 << 0)
1776 static int rebuild_raid_data_chunk_stripes(struct recover_control
*rc
,
1777 struct btrfs_root
*root
,
1778 struct chunk_record
*chunk
,
1784 struct btrfs_path path
;
1785 struct btrfs_key prev_key
;
1786 struct btrfs_key key
;
1787 struct btrfs_root
*csum_root
;
1788 struct extent_buffer
*leaf
;
1789 struct device_extent_record
*devext
;
1790 struct device_extent_record
*next
;
1791 struct btrfs_device
*dev
;
1792 u64 start
= chunk
->offset
;
1793 u64 end
= start
+ chunk
->stripe_len
;
1794 u64 chunk_end
= chunk
->offset
+ chunk
->length
;
1795 u64 csum_offset
= 0;
1797 u32 blocksize
= root
->sectorsize
;
1800 int num_unordered
= 0;
1801 LIST_HEAD(unordered
);
1802 LIST_HEAD(candidates
);
1804 csum_root
= root
->fs_info
->csum_root
;
1805 btrfs_init_path(&path
);
1806 list_splice_init(&chunk
->dextents
, &candidates
);
1808 if (list_is_last(candidates
.next
, &candidates
))
1811 key
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
1812 key
.type
= BTRFS_EXTENT_CSUM_KEY
;
1815 ret
= btrfs_search_slot(NULL
, csum_root
, &key
, &path
, 0, 0);
1817 fprintf(stderr
, "Search csum failed(%d)\n", ret
);
1820 leaf
= path
.nodes
[0];
1821 slot
= path
.slots
[0];
1823 if (slot
>= btrfs_header_nritems(leaf
)) {
1824 ret
= btrfs_next_leaf(csum_root
, &path
);
1827 "Walk tree failed(%d)\n", ret
);
1829 } else if (ret
> 0) {
1830 slot
= btrfs_header_nritems(leaf
) - 1;
1831 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
1832 if (item_end_offset(root
, &key
, leaf
, slot
)
1834 csum_offset
= start
- key
.offset
;
1835 csum_offset
/= blocksize
;
1840 leaf
= path
.nodes
[0];
1841 slot
= path
.slots
[0];
1843 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
1844 ret
= btrfs_previous_item(csum_root
, &path
, 0,
1845 BTRFS_EXTENT_CSUM_KEY
);
1849 if (key
.offset
>= end
)
1854 leaf
= path
.nodes
[0];
1855 slot
= path
.slots
[0];
1857 btrfs_item_key_to_cpu(leaf
, &prev_key
, slot
);
1858 if (item_end_offset(root
, &prev_key
, leaf
, slot
) > start
) {
1859 csum_offset
= start
- prev_key
.offset
;
1860 csum_offset
/= blocksize
;
1861 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
1863 if (key
.offset
>= end
)
1867 if (key
.offset
+ csum_offset
* blocksize
> chunk_end
)
1871 ret
= next_csum(root
, &leaf
, &path
, &slot
, &csum_offset
, &tree_csum
,
1874 fprintf(stderr
, "Fetch csum failed\n");
1876 } else if (ret
== 1) {
1877 list_for_each_entry(devext
, &unordered
, chunk_list
)
1879 if (!(*flags
& EQUAL_STRIPE
))
1880 *flags
|= EQUAL_STRIPE
;
1882 } else if (ret
== 2)
1885 list_for_each_entry_safe(devext
, next
, &candidates
, chunk_list
) {
1886 data_offset
= calc_data_offset(&key
, chunk
, devext
->offset
,
1887 csum_offset
, blocksize
);
1888 dev
= btrfs_find_device_by_devid(rc
->fs_devices
,
1889 devext
->objectid
, 0);
1894 BUG_ON(btrfs_find_device_by_devid(rc
->fs_devices
,
1895 devext
->objectid
, 1));
1897 ret
= check_one_csum(dev
->fd
, data_offset
, blocksize
,
1902 list_move(&devext
->chunk_list
, &unordered
);
1905 if (list_empty(&candidates
)) {
1906 list_for_each_entry(devext
, &unordered
, chunk_list
)
1908 if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID6
1909 && num_unordered
== 2) {
1910 list_splice_init(&unordered
, &chunk
->dextents
);
1911 btrfs_release_path(&path
);
1919 if (list_is_last(candidates
.next
, &candidates
)) {
1920 index
= btrfs_calc_stripe_index(chunk
,
1921 key
.offset
+ csum_offset
* blocksize
);
1922 if (chunk
->stripes
[index
].devid
)
1924 ret
= insert_stripe(&candidates
, rc
, chunk
, index
);
1932 start
= btrfs_next_stripe_logical_offset(chunk
, start
);
1933 end
= min(start
+ chunk
->stripe_len
, chunk_end
);
1934 list_splice_init(&unordered
, &candidates
);
1935 btrfs_release_path(&path
);
1937 if (end
< chunk_end
)
1941 list_splice_init(&candidates
, &unordered
);
1942 list_for_each_entry(devext
, &unordered
, chunk_list
)
1944 if (num_unordered
== 1) {
1945 for (i
= 0; i
< chunk
->num_stripes
; i
++) {
1946 if (!chunk
->stripes
[i
].devid
) {
1951 ret
= insert_stripe(&unordered
, rc
, chunk
, index
);
1955 if ((num_unordered
== 2 && chunk
->type_flags
1956 & BTRFS_BLOCK_GROUP_RAID5
)
1957 || (num_unordered
== 3 && chunk
->type_flags
1958 & BTRFS_BLOCK_GROUP_RAID6
)) {
1959 for (i
= 0; i
< chunk
->num_stripes
; i
++) {
1960 if (!chunk
->stripes
[i
].devid
) {
1961 ret
= insert_stripe(&unordered
, rc
,
1970 ret
= !!ret
|| (list_empty(&unordered
) ? 0 : 1);
1971 list_splice_init(&candidates
, &chunk
->dextents
);
1972 list_splice_init(&unordered
, &chunk
->dextents
);
1973 btrfs_release_path(&path
);
1978 static int btrfs_rebuild_ordered_data_chunk_stripes(struct recover_control
*rc
,
1979 struct btrfs_root
*root
)
1981 struct chunk_record
*chunk
;
1982 struct chunk_record
*next
;
1987 list_for_each_entry_safe(chunk
, next
, &rc
->unrepaired_chunks
, list
) {
1988 if ((chunk
->type_flags
& BTRFS_BLOCK_GROUP_DATA
)
1989 && (chunk
->type_flags
& BTRFS_ORDERED_RAID
)) {
1991 err
= rebuild_raid_data_chunk_stripes(rc
, root
, chunk
,
1994 list_move(&chunk
->list
, &rc
->bad_chunks
);
1995 if (flags
& EQUAL_STRIPE
)
1997 "Failure: too many equal stripes in chunk[%llu %llu]\n",
1998 chunk
->offset
, chunk
->length
);
2002 list_move(&chunk
->list
, &rc
->good_chunks
);
2008 static int btrfs_recover_chunks(struct recover_control
*rc
)
2010 struct chunk_record
*chunk
;
2011 struct block_group_record
*bg
;
2012 struct block_group_record
*next
;
2013 LIST_HEAD(new_chunks
);
2018 /* create the chunk by block group */
2019 list_for_each_entry_safe(bg
, next
, &rc
->bg
.block_groups
, list
) {
2020 nstripes
= btrfs_get_device_extents(bg
->objectid
,
2021 &rc
->devext
.no_chunk_orphans
,
2023 chunk
= malloc(btrfs_chunk_record_size(nstripes
));
2026 memset(chunk
, 0, btrfs_chunk_record_size(nstripes
));
2027 INIT_LIST_HEAD(&chunk
->dextents
);
2029 chunk
->cache
.start
= bg
->objectid
;
2030 chunk
->cache
.size
= bg
->offset
;
2031 chunk
->objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
2032 chunk
->type
= BTRFS_CHUNK_ITEM_KEY
;
2033 chunk
->offset
= bg
->objectid
;
2034 chunk
->generation
= bg
->generation
;
2035 chunk
->length
= bg
->offset
;
2036 chunk
->owner
= BTRFS_CHUNK_TREE_OBJECTID
;
2037 chunk
->stripe_len
= BTRFS_STRIPE_LEN
;
2038 chunk
->type_flags
= bg
->flags
;
2039 chunk
->io_width
= BTRFS_STRIPE_LEN
;
2040 chunk
->io_align
= BTRFS_STRIPE_LEN
;
2041 chunk
->sector_size
= rc
->sectorsize
;
2042 chunk
->sub_stripes
= calc_sub_nstripes(bg
->flags
);
2044 ret
= insert_cache_extent(&rc
->chunk
, &chunk
->cache
);
2048 list_add_tail(&chunk
->list
, &rc
->bad_chunks
);
2052 list_splice_init(&devexts
, &chunk
->dextents
);
2054 ret
= btrfs_verify_device_extents(bg
, &devexts
, nstripes
);
2056 list_add_tail(&chunk
->list
, &rc
->bad_chunks
);
2060 chunk
->num_stripes
= nstripes
;
2061 ret
= btrfs_rebuild_chunk_stripes(rc
, chunk
);
2063 list_add_tail(&chunk
->list
, &rc
->unrepaired_chunks
);
2065 list_add_tail(&chunk
->list
, &rc
->bad_chunks
);
2067 list_add_tail(&chunk
->list
, &rc
->good_chunks
);
2070 * Don't worry about the lost orphan device extents, they don't
2071 * have its chunk and block group, they must be the old ones that
2078 * Return 0 when succesful, < 0 on error and > 0 if aborted by user
2080 int btrfs_recover_chunk_tree(char *path
, int verbose
, int yes
)
2083 struct btrfs_root
*root
= NULL
;
2084 struct btrfs_trans_handle
*trans
;
2085 struct recover_control rc
;
2087 init_recover_control(&rc
, verbose
, yes
);
2089 ret
= recover_prepare(&rc
, path
);
2091 fprintf(stderr
, "recover prepare error\n");
2095 ret
= scan_devices(&rc
);
2097 fprintf(stderr
, "scan chunk headers error\n");
2101 if (cache_tree_empty(&rc
.chunk
) &&
2102 cache_tree_empty(&rc
.bg
.tree
) &&
2103 cache_tree_empty(&rc
.devext
.tree
)) {
2104 fprintf(stderr
, "no recoverable chunk\n");
2108 print_scan_result(&rc
);
2110 ret
= check_chunks(&rc
.chunk
, &rc
.bg
, &rc
.devext
, &rc
.good_chunks
,
2112 print_check_result(&rc
);
2114 if (!list_empty(&rc
.bg
.block_groups
) ||
2115 !list_empty(&rc
.devext
.no_chunk_orphans
)) {
2116 ret
= btrfs_recover_chunks(&rc
);
2121 * If the chunk is healthy, its block group item and device
2122 * extent item should be written on the disks. So, it is very
2123 * likely that the bad chunk is a old one that has been
2124 * droppped from the fs. Don't deal with them now, we will
2125 * check it after the fs is opened.
2128 fprintf(stderr
, "Check chunks successfully with no orphans\n");
2132 root
= open_ctree_with_broken_chunk(&rc
);
2134 fprintf(stderr
, "open with broken chunk error\n");
2135 ret
= PTR_ERR(root
);
2139 ret
= check_all_chunks_by_metadata(&rc
, root
);
2141 fprintf(stderr
, "The chunks in memory can not match the metadata of the fs. Repair failed.\n");
2142 goto fail_close_ctree
;
2145 ret
= btrfs_rebuild_ordered_data_chunk_stripes(&rc
, root
);
2147 fprintf(stderr
, "Failed to rebuild ordered chunk stripes.\n");
2148 goto fail_close_ctree
;
2152 ret
= ask_user("We are going to rebuild the chunk tree on disk, it might destroy the old metadata on the disk, Are you sure?");
2155 goto fail_close_ctree
;
2159 trans
= btrfs_start_transaction(root
, 1);
2160 ret
= remove_chunk_extent_item(trans
, &rc
, root
);
2163 ret
= rebuild_chunk_tree(trans
, &rc
, root
);
2166 ret
= rebuild_sys_array(&rc
, root
);
2169 btrfs_commit_transaction(trans
, root
);
2173 free_recover_control(&rc
);