2 * Copyright (C) 2011 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/blkdev.h>
20 #include <linux/ratelimit.h>
24 #include "ordered-data.h"
25 #include "transaction.h"
27 #include "extent_io.h"
28 #include "check-integrity.h"
31 * This is only the first step towards a full-features scrub. It reads all
32 * extent and super block and verifies the checksums. In case a bad checksum
33 * is found or the extent cannot be read, good data will be written back if
36 * Future enhancements:
37 * - In case an unrepairable extent is encountered, track which files are
38 * affected and report them
39 * - track and record media errors, throw out bad devices
40 * - add a mode to also read unallocated space
46 #define SCRUB_PAGES_PER_BIO 16 /* 64k per bio */
47 #define SCRUB_BIOS_PER_DEV 16 /* 1 MB per device in flight */
48 #define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
51 struct scrub_block
*sblock
;
53 struct btrfs_device
*dev
;
54 u64 flags
; /* extent flags */
59 unsigned int mirror_num
:8;
60 unsigned int have_csum
:1;
61 unsigned int io_error
:1;
63 u8 csum
[BTRFS_CSUM_SIZE
];
68 struct scrub_dev
*sdev
;
73 struct scrub_page
*pagev
[SCRUB_PAGES_PER_BIO
];
76 struct btrfs_work work
;
80 struct scrub_page pagev
[SCRUB_MAX_PAGES_PER_BLOCK
];
82 atomic_t outstanding_pages
;
83 atomic_t ref_count
; /* free mem on transition to zero */
84 struct scrub_dev
*sdev
;
86 unsigned int header_error
:1;
87 unsigned int checksum_error
:1;
88 unsigned int no_io_error_seen
:1;
89 unsigned int generation_error
:1; /* also sets header_error */
94 struct scrub_bio
*bios
[SCRUB_BIOS_PER_DEV
];
95 struct btrfs_device
*dev
;
100 spinlock_t list_lock
;
101 wait_queue_head_t list_wait
;
103 struct list_head csum_list
;
106 int pages_per_bio
; /* <= SCRUB_PAGES_PER_BIO */
113 struct btrfs_scrub_progress stat
;
114 spinlock_t stat_lock
;
117 struct scrub_fixup_nodatasum
{
118 struct scrub_dev
*sdev
;
120 struct btrfs_root
*root
;
121 struct btrfs_work work
;
125 struct scrub_warning
{
126 struct btrfs_path
*path
;
127 u64 extent_item_size
;
133 struct btrfs_device
*dev
;
139 static int scrub_handle_errored_block(struct scrub_block
*sblock_to_check
);
140 static int scrub_setup_recheck_block(struct scrub_dev
*sdev
,
141 struct btrfs_mapping_tree
*map_tree
,
142 u64 length
, u64 logical
,
143 struct scrub_block
*sblock
);
144 static int scrub_recheck_block(struct btrfs_fs_info
*fs_info
,
145 struct scrub_block
*sblock
, int is_metadata
,
146 int have_csum
, u8
*csum
, u64 generation
,
148 static void scrub_recheck_block_checksum(struct btrfs_fs_info
*fs_info
,
149 struct scrub_block
*sblock
,
150 int is_metadata
, int have_csum
,
151 const u8
*csum
, u64 generation
,
153 static void scrub_complete_bio_end_io(struct bio
*bio
, int err
);
154 static int scrub_repair_block_from_good_copy(struct scrub_block
*sblock_bad
,
155 struct scrub_block
*sblock_good
,
157 static int scrub_repair_page_from_good_copy(struct scrub_block
*sblock_bad
,
158 struct scrub_block
*sblock_good
,
159 int page_num
, int force_write
);
160 static int scrub_checksum_data(struct scrub_block
*sblock
);
161 static int scrub_checksum_tree_block(struct scrub_block
*sblock
);
162 static int scrub_checksum_super(struct scrub_block
*sblock
);
163 static void scrub_block_get(struct scrub_block
*sblock
);
164 static void scrub_block_put(struct scrub_block
*sblock
);
165 static int scrub_add_page_to_bio(struct scrub_dev
*sdev
,
166 struct scrub_page
*spage
);
167 static int scrub_pages(struct scrub_dev
*sdev
, u64 logical
, u64 len
,
168 u64 physical
, u64 flags
, u64 gen
, int mirror_num
,
169 u8
*csum
, int force
);
170 static void scrub_bio_end_io(struct bio
*bio
, int err
);
171 static void scrub_bio_end_io_worker(struct btrfs_work
*work
);
172 static void scrub_block_complete(struct scrub_block
*sblock
);
175 static void scrub_free_csums(struct scrub_dev
*sdev
)
177 while (!list_empty(&sdev
->csum_list
)) {
178 struct btrfs_ordered_sum
*sum
;
179 sum
= list_first_entry(&sdev
->csum_list
,
180 struct btrfs_ordered_sum
, list
);
181 list_del(&sum
->list
);
186 static noinline_for_stack
void scrub_free_dev(struct scrub_dev
*sdev
)
193 /* this can happen when scrub is cancelled */
194 if (sdev
->curr
!= -1) {
195 struct scrub_bio
*sbio
= sdev
->bios
[sdev
->curr
];
197 for (i
= 0; i
< sbio
->page_count
; i
++) {
198 BUG_ON(!sbio
->pagev
[i
]);
199 BUG_ON(!sbio
->pagev
[i
]->page
);
200 scrub_block_put(sbio
->pagev
[i
]->sblock
);
205 for (i
= 0; i
< SCRUB_BIOS_PER_DEV
; ++i
) {
206 struct scrub_bio
*sbio
= sdev
->bios
[i
];
213 scrub_free_csums(sdev
);
217 static noinline_for_stack
218 struct scrub_dev
*scrub_setup_dev(struct btrfs_device
*dev
)
220 struct scrub_dev
*sdev
;
222 struct btrfs_fs_info
*fs_info
= dev
->dev_root
->fs_info
;
225 pages_per_bio
= min_t(int, SCRUB_PAGES_PER_BIO
,
226 bio_get_nr_vecs(dev
->bdev
));
227 sdev
= kzalloc(sizeof(*sdev
), GFP_NOFS
);
231 sdev
->pages_per_bio
= pages_per_bio
;
233 for (i
= 0; i
< SCRUB_BIOS_PER_DEV
; ++i
) {
234 struct scrub_bio
*sbio
;
236 sbio
= kzalloc(sizeof(*sbio
), GFP_NOFS
);
239 sdev
->bios
[i
] = sbio
;
243 sbio
->page_count
= 0;
244 sbio
->work
.func
= scrub_bio_end_io_worker
;
246 if (i
!= SCRUB_BIOS_PER_DEV
-1)
247 sdev
->bios
[i
]->next_free
= i
+ 1;
249 sdev
->bios
[i
]->next_free
= -1;
251 sdev
->first_free
= 0;
252 sdev
->nodesize
= dev
->dev_root
->nodesize
;
253 sdev
->leafsize
= dev
->dev_root
->leafsize
;
254 sdev
->sectorsize
= dev
->dev_root
->sectorsize
;
255 atomic_set(&sdev
->in_flight
, 0);
256 atomic_set(&sdev
->fixup_cnt
, 0);
257 atomic_set(&sdev
->cancel_req
, 0);
258 sdev
->csum_size
= btrfs_super_csum_size(fs_info
->super_copy
);
259 INIT_LIST_HEAD(&sdev
->csum_list
);
261 spin_lock_init(&sdev
->list_lock
);
262 spin_lock_init(&sdev
->stat_lock
);
263 init_waitqueue_head(&sdev
->list_wait
);
267 scrub_free_dev(sdev
);
268 return ERR_PTR(-ENOMEM
);
271 static int scrub_print_warning_inode(u64 inum
, u64 offset
, u64 root
, void *ctx
)
277 struct extent_buffer
*eb
;
278 struct btrfs_inode_item
*inode_item
;
279 struct scrub_warning
*swarn
= ctx
;
280 struct btrfs_fs_info
*fs_info
= swarn
->dev
->dev_root
->fs_info
;
281 struct inode_fs_paths
*ipath
= NULL
;
282 struct btrfs_root
*local_root
;
283 struct btrfs_key root_key
;
285 root_key
.objectid
= root
;
286 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
287 root_key
.offset
= (u64
)-1;
288 local_root
= btrfs_read_fs_root_no_name(fs_info
, &root_key
);
289 if (IS_ERR(local_root
)) {
290 ret
= PTR_ERR(local_root
);
294 ret
= inode_item_info(inum
, 0, local_root
, swarn
->path
);
296 btrfs_release_path(swarn
->path
);
300 eb
= swarn
->path
->nodes
[0];
301 inode_item
= btrfs_item_ptr(eb
, swarn
->path
->slots
[0],
302 struct btrfs_inode_item
);
303 isize
= btrfs_inode_size(eb
, inode_item
);
304 nlink
= btrfs_inode_nlink(eb
, inode_item
);
305 btrfs_release_path(swarn
->path
);
307 ipath
= init_ipath(4096, local_root
, swarn
->path
);
309 ret
= PTR_ERR(ipath
);
313 ret
= paths_from_inode(inum
, ipath
);
319 * we deliberately ignore the bit ipath might have been too small to
320 * hold all of the paths here
322 for (i
= 0; i
< ipath
->fspath
->elem_cnt
; ++i
)
323 printk(KERN_WARNING
"btrfs: %s at logical %llu on dev "
324 "%s, sector %llu, root %llu, inode %llu, offset %llu, "
325 "length %llu, links %u (path: %s)\n", swarn
->errstr
,
326 swarn
->logical
, swarn
->dev
->name
,
327 (unsigned long long)swarn
->sector
, root
, inum
, offset
,
328 min(isize
- offset
, (u64
)PAGE_SIZE
), nlink
,
329 (char *)(unsigned long)ipath
->fspath
->val
[i
]);
335 printk(KERN_WARNING
"btrfs: %s at logical %llu on dev "
336 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
337 "resolving failed with ret=%d\n", swarn
->errstr
,
338 swarn
->logical
, swarn
->dev
->name
,
339 (unsigned long long)swarn
->sector
, root
, inum
, offset
, ret
);
345 static void scrub_print_warning(const char *errstr
, struct scrub_block
*sblock
)
347 struct btrfs_device
*dev
= sblock
->sdev
->dev
;
348 struct btrfs_fs_info
*fs_info
= dev
->dev_root
->fs_info
;
349 struct btrfs_path
*path
;
350 struct btrfs_key found_key
;
351 struct extent_buffer
*eb
;
352 struct btrfs_extent_item
*ei
;
353 struct scrub_warning swarn
;
358 unsigned long ptr
= 0;
359 const int bufsize
= 4096;
362 path
= btrfs_alloc_path();
364 swarn
.scratch_buf
= kmalloc(bufsize
, GFP_NOFS
);
365 swarn
.msg_buf
= kmalloc(bufsize
, GFP_NOFS
);
366 BUG_ON(sblock
->page_count
< 1);
367 swarn
.sector
= (sblock
->pagev
[0].physical
) >> 9;
368 swarn
.logical
= sblock
->pagev
[0].logical
;
369 swarn
.errstr
= errstr
;
371 swarn
.msg_bufsize
= bufsize
;
372 swarn
.scratch_bufsize
= bufsize
;
374 if (!path
|| !swarn
.scratch_buf
|| !swarn
.msg_buf
)
377 ret
= extent_from_logical(fs_info
, swarn
.logical
, path
, &found_key
);
381 extent_item_pos
= swarn
.logical
- found_key
.objectid
;
382 swarn
.extent_item_size
= found_key
.offset
;
385 ei
= btrfs_item_ptr(eb
, path
->slots
[0], struct btrfs_extent_item
);
386 item_size
= btrfs_item_size_nr(eb
, path
->slots
[0]);
387 btrfs_release_path(path
);
389 if (ret
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
391 ret
= tree_backref_for_extent(&ptr
, eb
, ei
, item_size
,
392 &ref_root
, &ref_level
);
394 "btrfs: %s at logical %llu on dev %s, "
395 "sector %llu: metadata %s (level %d) in tree "
396 "%llu\n", errstr
, swarn
.logical
, dev
->name
,
397 (unsigned long long)swarn
.sector
,
398 ref_level
? "node" : "leaf",
399 ret
< 0 ? -1 : ref_level
,
400 ret
< 0 ? -1 : ref_root
);
404 iterate_extent_inodes(fs_info
, found_key
.objectid
,
406 scrub_print_warning_inode
, &swarn
);
410 btrfs_free_path(path
);
411 kfree(swarn
.scratch_buf
);
412 kfree(swarn
.msg_buf
);
415 static int scrub_fixup_readpage(u64 inum
, u64 offset
, u64 root
, void *ctx
)
417 struct page
*page
= NULL
;
419 struct scrub_fixup_nodatasum
*fixup
= ctx
;
422 struct btrfs_key key
;
423 struct inode
*inode
= NULL
;
424 u64 end
= offset
+ PAGE_SIZE
- 1;
425 struct btrfs_root
*local_root
;
428 key
.type
= BTRFS_ROOT_ITEM_KEY
;
429 key
.offset
= (u64
)-1;
430 local_root
= btrfs_read_fs_root_no_name(fixup
->root
->fs_info
, &key
);
431 if (IS_ERR(local_root
))
432 return PTR_ERR(local_root
);
434 key
.type
= BTRFS_INODE_ITEM_KEY
;
437 inode
= btrfs_iget(fixup
->root
->fs_info
->sb
, &key
, local_root
, NULL
);
439 return PTR_ERR(inode
);
441 index
= offset
>> PAGE_CACHE_SHIFT
;
443 page
= find_or_create_page(inode
->i_mapping
, index
, GFP_NOFS
);
449 if (PageUptodate(page
)) {
450 struct btrfs_mapping_tree
*map_tree
;
451 if (PageDirty(page
)) {
453 * we need to write the data to the defect sector. the
454 * data that was in that sector is not in memory,
455 * because the page was modified. we must not write the
456 * modified page to that sector.
458 * TODO: what could be done here: wait for the delalloc
459 * runner to write out that page (might involve
460 * COW) and see whether the sector is still
461 * referenced afterwards.
463 * For the meantime, we'll treat this error
464 * incorrectable, although there is a chance that a
465 * later scrub will find the bad sector again and that
466 * there's no dirty page in memory, then.
471 map_tree
= &BTRFS_I(inode
)->root
->fs_info
->mapping_tree
;
472 ret
= repair_io_failure(map_tree
, offset
, PAGE_SIZE
,
473 fixup
->logical
, page
,
479 * we need to get good data first. the general readpage path
480 * will call repair_io_failure for us, we just have to make
481 * sure we read the bad mirror.
483 ret
= set_extent_bits(&BTRFS_I(inode
)->io_tree
, offset
, end
,
484 EXTENT_DAMAGED
, GFP_NOFS
);
486 /* set_extent_bits should give proper error */
493 ret
= extent_read_full_page(&BTRFS_I(inode
)->io_tree
, page
,
496 wait_on_page_locked(page
);
498 corrected
= !test_range_bit(&BTRFS_I(inode
)->io_tree
, offset
,
499 end
, EXTENT_DAMAGED
, 0, NULL
);
501 clear_extent_bits(&BTRFS_I(inode
)->io_tree
, offset
, end
,
502 EXTENT_DAMAGED
, GFP_NOFS
);
514 if (ret
== 0 && corrected
) {
516 * we only need to call readpage for one of the inodes belonging
517 * to this extent. so make iterate_extent_inodes stop
525 static void scrub_fixup_nodatasum(struct btrfs_work
*work
)
528 struct scrub_fixup_nodatasum
*fixup
;
529 struct scrub_dev
*sdev
;
530 struct btrfs_trans_handle
*trans
= NULL
;
531 struct btrfs_fs_info
*fs_info
;
532 struct btrfs_path
*path
;
533 int uncorrectable
= 0;
535 fixup
= container_of(work
, struct scrub_fixup_nodatasum
, work
);
537 fs_info
= fixup
->root
->fs_info
;
539 path
= btrfs_alloc_path();
541 spin_lock(&sdev
->stat_lock
);
542 ++sdev
->stat
.malloc_errors
;
543 spin_unlock(&sdev
->stat_lock
);
548 trans
= btrfs_join_transaction(fixup
->root
);
555 * the idea is to trigger a regular read through the standard path. we
556 * read a page from the (failed) logical address by specifying the
557 * corresponding copynum of the failed sector. thus, that readpage is
559 * that is the point where on-the-fly error correction will kick in
560 * (once it's finished) and rewrite the failed sector if a good copy
563 ret
= iterate_inodes_from_logical(fixup
->logical
, fixup
->root
->fs_info
,
564 path
, scrub_fixup_readpage
,
572 spin_lock(&sdev
->stat_lock
);
573 ++sdev
->stat
.corrected_errors
;
574 spin_unlock(&sdev
->stat_lock
);
577 if (trans
&& !IS_ERR(trans
))
578 btrfs_end_transaction(trans
, fixup
->root
);
580 spin_lock(&sdev
->stat_lock
);
581 ++sdev
->stat
.uncorrectable_errors
;
582 spin_unlock(&sdev
->stat_lock
);
583 printk_ratelimited(KERN_ERR
584 "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n",
585 (unsigned long long)fixup
->logical
, sdev
->dev
->name
);
588 btrfs_free_path(path
);
591 /* see caller why we're pretending to be paused in the scrub counters */
592 mutex_lock(&fs_info
->scrub_lock
);
593 atomic_dec(&fs_info
->scrubs_running
);
594 atomic_dec(&fs_info
->scrubs_paused
);
595 mutex_unlock(&fs_info
->scrub_lock
);
596 atomic_dec(&sdev
->fixup_cnt
);
597 wake_up(&fs_info
->scrub_pause_wait
);
598 wake_up(&sdev
->list_wait
);
602 * scrub_handle_errored_block gets called when either verification of the
603 * pages failed or the bio failed to read, e.g. with EIO. In the latter
604 * case, this function handles all pages in the bio, even though only one
606 * The goal of this function is to repair the errored block by using the
607 * contents of one of the mirrors.
609 static int scrub_handle_errored_block(struct scrub_block
*sblock_to_check
)
611 struct scrub_dev
*sdev
= sblock_to_check
->sdev
;
612 struct btrfs_fs_info
*fs_info
;
616 unsigned int failed_mirror_index
;
617 unsigned int is_metadata
;
618 unsigned int have_csum
;
620 struct scrub_block
*sblocks_for_recheck
; /* holds one for each mirror */
621 struct scrub_block
*sblock_bad
;
626 static DEFINE_RATELIMIT_STATE(_rs
, DEFAULT_RATELIMIT_INTERVAL
,
627 DEFAULT_RATELIMIT_BURST
);
629 BUG_ON(sblock_to_check
->page_count
< 1);
630 fs_info
= sdev
->dev
->dev_root
->fs_info
;
631 length
= sblock_to_check
->page_count
* PAGE_SIZE
;
632 logical
= sblock_to_check
->pagev
[0].logical
;
633 generation
= sblock_to_check
->pagev
[0].generation
;
634 BUG_ON(sblock_to_check
->pagev
[0].mirror_num
< 1);
635 failed_mirror_index
= sblock_to_check
->pagev
[0].mirror_num
- 1;
636 is_metadata
= !(sblock_to_check
->pagev
[0].flags
&
637 BTRFS_EXTENT_FLAG_DATA
);
638 have_csum
= sblock_to_check
->pagev
[0].have_csum
;
639 csum
= sblock_to_check
->pagev
[0].csum
;
642 * read all mirrors one after the other. This includes to
643 * re-read the extent or metadata block that failed (that was
644 * the cause that this fixup code is called) another time,
645 * page by page this time in order to know which pages
646 * caused I/O errors and which ones are good (for all mirrors).
647 * It is the goal to handle the situation when more than one
648 * mirror contains I/O errors, but the errors do not
649 * overlap, i.e. the data can be repaired by selecting the
650 * pages from those mirrors without I/O error on the
651 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
652 * would be that mirror #1 has an I/O error on the first page,
653 * the second page is good, and mirror #2 has an I/O error on
654 * the second page, but the first page is good.
655 * Then the first page of the first mirror can be repaired by
656 * taking the first page of the second mirror, and the
657 * second page of the second mirror can be repaired by
658 * copying the contents of the 2nd page of the 1st mirror.
659 * One more note: if the pages of one mirror contain I/O
660 * errors, the checksum cannot be verified. In order to get
661 * the best data for repairing, the first attempt is to find
662 * a mirror without I/O errors and with a validated checksum.
663 * Only if this is not possible, the pages are picked from
664 * mirrors with I/O errors without considering the checksum.
665 * If the latter is the case, at the end, the checksum of the
666 * repaired area is verified in order to correctly maintain
670 sblocks_for_recheck
= kzalloc(BTRFS_MAX_MIRRORS
*
671 sizeof(*sblocks_for_recheck
),
673 if (!sblocks_for_recheck
) {
674 spin_lock(&sdev
->stat_lock
);
675 sdev
->stat
.malloc_errors
++;
676 sdev
->stat
.read_errors
++;
677 sdev
->stat
.uncorrectable_errors
++;
678 spin_unlock(&sdev
->stat_lock
);
679 btrfs_dev_stat_inc_and_print(sdev
->dev
,
680 BTRFS_DEV_STAT_READ_ERRS
);
684 /* setup the context, map the logical blocks and alloc the pages */
685 ret
= scrub_setup_recheck_block(sdev
, &fs_info
->mapping_tree
, length
,
686 logical
, sblocks_for_recheck
);
688 spin_lock(&sdev
->stat_lock
);
689 sdev
->stat
.read_errors
++;
690 sdev
->stat
.uncorrectable_errors
++;
691 spin_unlock(&sdev
->stat_lock
);
692 btrfs_dev_stat_inc_and_print(sdev
->dev
,
693 BTRFS_DEV_STAT_READ_ERRS
);
696 BUG_ON(failed_mirror_index
>= BTRFS_MAX_MIRRORS
);
697 sblock_bad
= sblocks_for_recheck
+ failed_mirror_index
;
699 /* build and submit the bios for the failed mirror, check checksums */
700 ret
= scrub_recheck_block(fs_info
, sblock_bad
, is_metadata
, have_csum
,
701 csum
, generation
, sdev
->csum_size
);
703 spin_lock(&sdev
->stat_lock
);
704 sdev
->stat
.read_errors
++;
705 sdev
->stat
.uncorrectable_errors
++;
706 spin_unlock(&sdev
->stat_lock
);
707 btrfs_dev_stat_inc_and_print(sdev
->dev
,
708 BTRFS_DEV_STAT_READ_ERRS
);
712 if (!sblock_bad
->header_error
&& !sblock_bad
->checksum_error
&&
713 sblock_bad
->no_io_error_seen
) {
715 * the error disappeared after reading page by page, or
716 * the area was part of a huge bio and other parts of the
717 * bio caused I/O errors, or the block layer merged several
718 * read requests into one and the error is caused by a
719 * different bio (usually one of the two latter cases is
722 spin_lock(&sdev
->stat_lock
);
723 sdev
->stat
.unverified_errors
++;
724 spin_unlock(&sdev
->stat_lock
);
729 if (!sblock_bad
->no_io_error_seen
) {
730 spin_lock(&sdev
->stat_lock
);
731 sdev
->stat
.read_errors
++;
732 spin_unlock(&sdev
->stat_lock
);
733 if (__ratelimit(&_rs
))
734 scrub_print_warning("i/o error", sblock_to_check
);
735 btrfs_dev_stat_inc_and_print(sdev
->dev
,
736 BTRFS_DEV_STAT_READ_ERRS
);
737 } else if (sblock_bad
->checksum_error
) {
738 spin_lock(&sdev
->stat_lock
);
739 sdev
->stat
.csum_errors
++;
740 spin_unlock(&sdev
->stat_lock
);
741 if (__ratelimit(&_rs
))
742 scrub_print_warning("checksum error", sblock_to_check
);
743 btrfs_dev_stat_inc_and_print(sdev
->dev
,
744 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
745 } else if (sblock_bad
->header_error
) {
746 spin_lock(&sdev
->stat_lock
);
747 sdev
->stat
.verify_errors
++;
748 spin_unlock(&sdev
->stat_lock
);
749 if (__ratelimit(&_rs
))
750 scrub_print_warning("checksum/header error",
752 if (sblock_bad
->generation_error
)
753 btrfs_dev_stat_inc_and_print(sdev
->dev
,
754 BTRFS_DEV_STAT_GENERATION_ERRS
);
756 btrfs_dev_stat_inc_and_print(sdev
->dev
,
757 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
761 goto did_not_correct_error
;
763 if (!is_metadata
&& !have_csum
) {
764 struct scrub_fixup_nodatasum
*fixup_nodatasum
;
767 * !is_metadata and !have_csum, this means that the data
768 * might not be COW'ed, that it might be modified
769 * concurrently. The general strategy to work on the
770 * commit root does not help in the case when COW is not
773 fixup_nodatasum
= kzalloc(sizeof(*fixup_nodatasum
), GFP_NOFS
);
774 if (!fixup_nodatasum
)
775 goto did_not_correct_error
;
776 fixup_nodatasum
->sdev
= sdev
;
777 fixup_nodatasum
->logical
= logical
;
778 fixup_nodatasum
->root
= fs_info
->extent_root
;
779 fixup_nodatasum
->mirror_num
= failed_mirror_index
+ 1;
781 * increment scrubs_running to prevent cancel requests from
782 * completing as long as a fixup worker is running. we must also
783 * increment scrubs_paused to prevent deadlocking on pause
784 * requests used for transactions commits (as the worker uses a
785 * transaction context). it is safe to regard the fixup worker
786 * as paused for all matters practical. effectively, we only
787 * avoid cancellation requests from completing.
789 mutex_lock(&fs_info
->scrub_lock
);
790 atomic_inc(&fs_info
->scrubs_running
);
791 atomic_inc(&fs_info
->scrubs_paused
);
792 mutex_unlock(&fs_info
->scrub_lock
);
793 atomic_inc(&sdev
->fixup_cnt
);
794 fixup_nodatasum
->work
.func
= scrub_fixup_nodatasum
;
795 btrfs_queue_worker(&fs_info
->scrub_workers
,
796 &fixup_nodatasum
->work
);
801 * now build and submit the bios for the other mirrors, check
804 for (mirror_index
= 0;
805 mirror_index
< BTRFS_MAX_MIRRORS
&&
806 sblocks_for_recheck
[mirror_index
].page_count
> 0;
808 if (mirror_index
== failed_mirror_index
)
811 /* build and submit the bios, check checksums */
812 ret
= scrub_recheck_block(fs_info
,
813 sblocks_for_recheck
+ mirror_index
,
814 is_metadata
, have_csum
, csum
,
815 generation
, sdev
->csum_size
);
817 goto did_not_correct_error
;
821 * first try to pick the mirror which is completely without I/O
822 * errors and also does not have a checksum error.
823 * If one is found, and if a checksum is present, the full block
824 * that is known to contain an error is rewritten. Afterwards
825 * the block is known to be corrected.
826 * If a mirror is found which is completely correct, and no
827 * checksum is present, only those pages are rewritten that had
828 * an I/O error in the block to be repaired, since it cannot be
829 * determined, which copy of the other pages is better (and it
830 * could happen otherwise that a correct page would be
831 * overwritten by a bad one).
833 for (mirror_index
= 0;
834 mirror_index
< BTRFS_MAX_MIRRORS
&&
835 sblocks_for_recheck
[mirror_index
].page_count
> 0;
837 struct scrub_block
*sblock_other
= sblocks_for_recheck
+
840 if (!sblock_other
->header_error
&&
841 !sblock_other
->checksum_error
&&
842 sblock_other
->no_io_error_seen
) {
843 int force_write
= is_metadata
|| have_csum
;
845 ret
= scrub_repair_block_from_good_copy(sblock_bad
,
849 goto corrected_error
;
854 * in case of I/O errors in the area that is supposed to be
855 * repaired, continue by picking good copies of those pages.
856 * Select the good pages from mirrors to rewrite bad pages from
857 * the area to fix. Afterwards verify the checksum of the block
858 * that is supposed to be repaired. This verification step is
859 * only done for the purpose of statistic counting and for the
860 * final scrub report, whether errors remain.
861 * A perfect algorithm could make use of the checksum and try
862 * all possible combinations of pages from the different mirrors
863 * until the checksum verification succeeds. For example, when
864 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
865 * of mirror #2 is readable but the final checksum test fails,
866 * then the 2nd page of mirror #3 could be tried, whether now
867 * the final checksum succeedes. But this would be a rare
868 * exception and is therefore not implemented. At least it is
869 * avoided that the good copy is overwritten.
870 * A more useful improvement would be to pick the sectors
871 * without I/O error based on sector sizes (512 bytes on legacy
872 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
873 * mirror could be repaired by taking 512 byte of a different
874 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
875 * area are unreadable.
878 /* can only fix I/O errors from here on */
879 if (sblock_bad
->no_io_error_seen
)
880 goto did_not_correct_error
;
883 for (page_num
= 0; page_num
< sblock_bad
->page_count
; page_num
++) {
884 struct scrub_page
*page_bad
= sblock_bad
->pagev
+ page_num
;
886 if (!page_bad
->io_error
)
889 for (mirror_index
= 0;
890 mirror_index
< BTRFS_MAX_MIRRORS
&&
891 sblocks_for_recheck
[mirror_index
].page_count
> 0;
893 struct scrub_block
*sblock_other
= sblocks_for_recheck
+
895 struct scrub_page
*page_other
= sblock_other
->pagev
+
898 if (!page_other
->io_error
) {
899 ret
= scrub_repair_page_from_good_copy(
900 sblock_bad
, sblock_other
, page_num
, 0);
902 page_bad
->io_error
= 0;
903 break; /* succeeded for this page */
908 if (page_bad
->io_error
) {
909 /* did not find a mirror to copy the page from */
915 if (is_metadata
|| have_csum
) {
917 * need to verify the checksum now that all
918 * sectors on disk are repaired (the write
919 * request for data to be repaired is on its way).
920 * Just be lazy and use scrub_recheck_block()
921 * which re-reads the data before the checksum
922 * is verified, but most likely the data comes out
925 ret
= scrub_recheck_block(fs_info
, sblock_bad
,
926 is_metadata
, have_csum
, csum
,
927 generation
, sdev
->csum_size
);
928 if (!ret
&& !sblock_bad
->header_error
&&
929 !sblock_bad
->checksum_error
&&
930 sblock_bad
->no_io_error_seen
)
931 goto corrected_error
;
933 goto did_not_correct_error
;
936 spin_lock(&sdev
->stat_lock
);
937 sdev
->stat
.corrected_errors
++;
938 spin_unlock(&sdev
->stat_lock
);
939 printk_ratelimited(KERN_ERR
940 "btrfs: fixed up error at logical %llu on dev %s\n",
941 (unsigned long long)logical
, sdev
->dev
->name
);
944 did_not_correct_error
:
945 spin_lock(&sdev
->stat_lock
);
946 sdev
->stat
.uncorrectable_errors
++;
947 spin_unlock(&sdev
->stat_lock
);
948 printk_ratelimited(KERN_ERR
949 "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n",
950 (unsigned long long)logical
, sdev
->dev
->name
);
954 if (sblocks_for_recheck
) {
955 for (mirror_index
= 0; mirror_index
< BTRFS_MAX_MIRRORS
;
957 struct scrub_block
*sblock
= sblocks_for_recheck
+
961 for (page_index
= 0; page_index
< SCRUB_PAGES_PER_BIO
;
963 if (sblock
->pagev
[page_index
].page
)
965 sblock
->pagev
[page_index
].page
);
967 kfree(sblocks_for_recheck
);
973 static int scrub_setup_recheck_block(struct scrub_dev
*sdev
,
974 struct btrfs_mapping_tree
*map_tree
,
975 u64 length
, u64 logical
,
976 struct scrub_block
*sblocks_for_recheck
)
983 * note: the three members sdev, ref_count and outstanding_pages
984 * are not used (and not set) in the blocks that are used for
985 * the recheck procedure
990 u64 sublen
= min_t(u64
, length
, PAGE_SIZE
);
991 u64 mapped_length
= sublen
;
992 struct btrfs_bio
*bbio
= NULL
;
995 * with a length of PAGE_SIZE, each returned stripe
996 * represents one mirror
998 ret
= btrfs_map_block(map_tree
, WRITE
, logical
, &mapped_length
,
1000 if (ret
|| !bbio
|| mapped_length
< sublen
) {
1005 BUG_ON(page_index
>= SCRUB_PAGES_PER_BIO
);
1006 for (mirror_index
= 0; mirror_index
< (int)bbio
->num_stripes
;
1008 struct scrub_block
*sblock
;
1009 struct scrub_page
*page
;
1011 if (mirror_index
>= BTRFS_MAX_MIRRORS
)
1014 sblock
= sblocks_for_recheck
+ mirror_index
;
1015 page
= sblock
->pagev
+ page_index
;
1016 page
->logical
= logical
;
1017 page
->physical
= bbio
->stripes
[mirror_index
].physical
;
1018 /* for missing devices, dev->bdev is NULL */
1019 page
->dev
= bbio
->stripes
[mirror_index
].dev
;
1020 page
->mirror_num
= mirror_index
+ 1;
1021 page
->page
= alloc_page(GFP_NOFS
);
1023 spin_lock(&sdev
->stat_lock
);
1024 sdev
->stat
.malloc_errors
++;
1025 spin_unlock(&sdev
->stat_lock
);
1028 sblock
->page_count
++;
1040 * this function will check the on disk data for checksum errors, header
1041 * errors and read I/O errors. If any I/O errors happen, the exact pages
1042 * which are errored are marked as being bad. The goal is to enable scrub
1043 * to take those pages that are not errored from all the mirrors so that
1044 * the pages that are errored in the just handled mirror can be repaired.
1046 static int scrub_recheck_block(struct btrfs_fs_info
*fs_info
,
1047 struct scrub_block
*sblock
, int is_metadata
,
1048 int have_csum
, u8
*csum
, u64 generation
,
1053 sblock
->no_io_error_seen
= 1;
1054 sblock
->header_error
= 0;
1055 sblock
->checksum_error
= 0;
1057 for (page_num
= 0; page_num
< sblock
->page_count
; page_num
++) {
1060 struct scrub_page
*page
= sblock
->pagev
+ page_num
;
1061 DECLARE_COMPLETION_ONSTACK(complete
);
1063 if (page
->dev
->bdev
== NULL
) {
1065 sblock
->no_io_error_seen
= 0;
1069 BUG_ON(!page
->page
);
1070 bio
= bio_alloc(GFP_NOFS
, 1);
1073 bio
->bi_bdev
= page
->dev
->bdev
;
1074 bio
->bi_sector
= page
->physical
>> 9;
1075 bio
->bi_end_io
= scrub_complete_bio_end_io
;
1076 bio
->bi_private
= &complete
;
1078 ret
= bio_add_page(bio
, page
->page
, PAGE_SIZE
, 0);
1079 if (PAGE_SIZE
!= ret
) {
1083 btrfsic_submit_bio(READ
, bio
);
1085 /* this will also unplug the queue */
1086 wait_for_completion(&complete
);
1088 page
->io_error
= !test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
1089 if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
1090 sblock
->no_io_error_seen
= 0;
1094 if (sblock
->no_io_error_seen
)
1095 scrub_recheck_block_checksum(fs_info
, sblock
, is_metadata
,
1096 have_csum
, csum
, generation
,
1102 static void scrub_recheck_block_checksum(struct btrfs_fs_info
*fs_info
,
1103 struct scrub_block
*sblock
,
1104 int is_metadata
, int have_csum
,
1105 const u8
*csum
, u64 generation
,
1109 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1111 struct btrfs_root
*root
= fs_info
->extent_root
;
1112 void *mapped_buffer
;
1114 BUG_ON(!sblock
->pagev
[0].page
);
1116 struct btrfs_header
*h
;
1118 mapped_buffer
= kmap_atomic(sblock
->pagev
[0].page
);
1119 h
= (struct btrfs_header
*)mapped_buffer
;
1121 if (sblock
->pagev
[0].logical
!= le64_to_cpu(h
->bytenr
) ||
1122 memcmp(h
->fsid
, fs_info
->fsid
, BTRFS_UUID_SIZE
) ||
1123 memcmp(h
->chunk_tree_uuid
, fs_info
->chunk_tree_uuid
,
1125 sblock
->header_error
= 1;
1126 } else if (generation
!= le64_to_cpu(h
->generation
)) {
1127 sblock
->header_error
= 1;
1128 sblock
->generation_error
= 1;
1135 mapped_buffer
= kmap_atomic(sblock
->pagev
[0].page
);
1138 for (page_num
= 0;;) {
1139 if (page_num
== 0 && is_metadata
)
1140 crc
= btrfs_csum_data(root
,
1141 ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
,
1142 crc
, PAGE_SIZE
- BTRFS_CSUM_SIZE
);
1144 crc
= btrfs_csum_data(root
, mapped_buffer
, crc
,
1147 kunmap_atomic(mapped_buffer
);
1149 if (page_num
>= sblock
->page_count
)
1151 BUG_ON(!sblock
->pagev
[page_num
].page
);
1153 mapped_buffer
= kmap_atomic(sblock
->pagev
[page_num
].page
);
1156 btrfs_csum_final(crc
, calculated_csum
);
1157 if (memcmp(calculated_csum
, csum
, csum_size
))
1158 sblock
->checksum_error
= 1;
1161 static void scrub_complete_bio_end_io(struct bio
*bio
, int err
)
1163 complete((struct completion
*)bio
->bi_private
);
1166 static int scrub_repair_block_from_good_copy(struct scrub_block
*sblock_bad
,
1167 struct scrub_block
*sblock_good
,
1173 for (page_num
= 0; page_num
< sblock_bad
->page_count
; page_num
++) {
1176 ret_sub
= scrub_repair_page_from_good_copy(sblock_bad
,
1187 static int scrub_repair_page_from_good_copy(struct scrub_block
*sblock_bad
,
1188 struct scrub_block
*sblock_good
,
1189 int page_num
, int force_write
)
1191 struct scrub_page
*page_bad
= sblock_bad
->pagev
+ page_num
;
1192 struct scrub_page
*page_good
= sblock_good
->pagev
+ page_num
;
1194 BUG_ON(sblock_bad
->pagev
[page_num
].page
== NULL
);
1195 BUG_ON(sblock_good
->pagev
[page_num
].page
== NULL
);
1196 if (force_write
|| sblock_bad
->header_error
||
1197 sblock_bad
->checksum_error
|| page_bad
->io_error
) {
1200 DECLARE_COMPLETION_ONSTACK(complete
);
1202 bio
= bio_alloc(GFP_NOFS
, 1);
1205 bio
->bi_bdev
= page_bad
->dev
->bdev
;
1206 bio
->bi_sector
= page_bad
->physical
>> 9;
1207 bio
->bi_end_io
= scrub_complete_bio_end_io
;
1208 bio
->bi_private
= &complete
;
1210 ret
= bio_add_page(bio
, page_good
->page
, PAGE_SIZE
, 0);
1211 if (PAGE_SIZE
!= ret
) {
1215 btrfsic_submit_bio(WRITE
, bio
);
1217 /* this will also unplug the queue */
1218 wait_for_completion(&complete
);
1219 if (!bio_flagged(bio
, BIO_UPTODATE
)) {
1220 btrfs_dev_stat_inc_and_print(page_bad
->dev
,
1221 BTRFS_DEV_STAT_WRITE_ERRS
);
1231 static void scrub_checksum(struct scrub_block
*sblock
)
1236 BUG_ON(sblock
->page_count
< 1);
1237 flags
= sblock
->pagev
[0].flags
;
1239 if (flags
& BTRFS_EXTENT_FLAG_DATA
)
1240 ret
= scrub_checksum_data(sblock
);
1241 else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)
1242 ret
= scrub_checksum_tree_block(sblock
);
1243 else if (flags
& BTRFS_EXTENT_FLAG_SUPER
)
1244 (void)scrub_checksum_super(sblock
);
1248 scrub_handle_errored_block(sblock
);
1251 static int scrub_checksum_data(struct scrub_block
*sblock
)
1253 struct scrub_dev
*sdev
= sblock
->sdev
;
1254 u8 csum
[BTRFS_CSUM_SIZE
];
1260 struct btrfs_root
*root
= sdev
->dev
->dev_root
;
1264 BUG_ON(sblock
->page_count
< 1);
1265 if (!sblock
->pagev
[0].have_csum
)
1268 on_disk_csum
= sblock
->pagev
[0].csum
;
1269 page
= sblock
->pagev
[0].page
;
1270 buffer
= kmap_atomic(page
);
1272 len
= sdev
->sectorsize
;
1275 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
1277 crc
= btrfs_csum_data(root
, buffer
, crc
, l
);
1278 kunmap_atomic(buffer
);
1283 BUG_ON(index
>= sblock
->page_count
);
1284 BUG_ON(!sblock
->pagev
[index
].page
);
1285 page
= sblock
->pagev
[index
].page
;
1286 buffer
= kmap_atomic(page
);
1289 btrfs_csum_final(crc
, csum
);
1290 if (memcmp(csum
, on_disk_csum
, sdev
->csum_size
))
1296 static int scrub_checksum_tree_block(struct scrub_block
*sblock
)
1298 struct scrub_dev
*sdev
= sblock
->sdev
;
1299 struct btrfs_header
*h
;
1300 struct btrfs_root
*root
= sdev
->dev
->dev_root
;
1301 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1302 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1303 u8 on_disk_csum
[BTRFS_CSUM_SIZE
];
1305 void *mapped_buffer
;
1314 BUG_ON(sblock
->page_count
< 1);
1315 page
= sblock
->pagev
[0].page
;
1316 mapped_buffer
= kmap_atomic(page
);
1317 h
= (struct btrfs_header
*)mapped_buffer
;
1318 memcpy(on_disk_csum
, h
->csum
, sdev
->csum_size
);
1321 * we don't use the getter functions here, as we
1322 * a) don't have an extent buffer and
1323 * b) the page is already kmapped
1326 if (sblock
->pagev
[0].logical
!= le64_to_cpu(h
->bytenr
))
1329 if (sblock
->pagev
[0].generation
!= le64_to_cpu(h
->generation
))
1332 if (memcmp(h
->fsid
, fs_info
->fsid
, BTRFS_UUID_SIZE
))
1335 if (memcmp(h
->chunk_tree_uuid
, fs_info
->chunk_tree_uuid
,
1339 BUG_ON(sdev
->nodesize
!= sdev
->leafsize
);
1340 len
= sdev
->nodesize
- BTRFS_CSUM_SIZE
;
1341 mapped_size
= PAGE_SIZE
- BTRFS_CSUM_SIZE
;
1342 p
= ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
;
1345 u64 l
= min_t(u64
, len
, mapped_size
);
1347 crc
= btrfs_csum_data(root
, p
, crc
, l
);
1348 kunmap_atomic(mapped_buffer
);
1353 BUG_ON(index
>= sblock
->page_count
);
1354 BUG_ON(!sblock
->pagev
[index
].page
);
1355 page
= sblock
->pagev
[index
].page
;
1356 mapped_buffer
= kmap_atomic(page
);
1357 mapped_size
= PAGE_SIZE
;
1361 btrfs_csum_final(crc
, calculated_csum
);
1362 if (memcmp(calculated_csum
, on_disk_csum
, sdev
->csum_size
))
1365 return fail
|| crc_fail
;
1368 static int scrub_checksum_super(struct scrub_block
*sblock
)
1370 struct btrfs_super_block
*s
;
1371 struct scrub_dev
*sdev
= sblock
->sdev
;
1372 struct btrfs_root
*root
= sdev
->dev
->dev_root
;
1373 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1374 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1375 u8 on_disk_csum
[BTRFS_CSUM_SIZE
];
1377 void *mapped_buffer
;
1386 BUG_ON(sblock
->page_count
< 1);
1387 page
= sblock
->pagev
[0].page
;
1388 mapped_buffer
= kmap_atomic(page
);
1389 s
= (struct btrfs_super_block
*)mapped_buffer
;
1390 memcpy(on_disk_csum
, s
->csum
, sdev
->csum_size
);
1392 if (sblock
->pagev
[0].logical
!= le64_to_cpu(s
->bytenr
))
1395 if (sblock
->pagev
[0].generation
!= le64_to_cpu(s
->generation
))
1398 if (memcmp(s
->fsid
, fs_info
->fsid
, BTRFS_UUID_SIZE
))
1401 len
= BTRFS_SUPER_INFO_SIZE
- BTRFS_CSUM_SIZE
;
1402 mapped_size
= PAGE_SIZE
- BTRFS_CSUM_SIZE
;
1403 p
= ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
;
1406 u64 l
= min_t(u64
, len
, mapped_size
);
1408 crc
= btrfs_csum_data(root
, p
, crc
, l
);
1409 kunmap_atomic(mapped_buffer
);
1414 BUG_ON(index
>= sblock
->page_count
);
1415 BUG_ON(!sblock
->pagev
[index
].page
);
1416 page
= sblock
->pagev
[index
].page
;
1417 mapped_buffer
= kmap_atomic(page
);
1418 mapped_size
= PAGE_SIZE
;
1422 btrfs_csum_final(crc
, calculated_csum
);
1423 if (memcmp(calculated_csum
, on_disk_csum
, sdev
->csum_size
))
1426 if (fail_cor
+ fail_gen
) {
1428 * if we find an error in a super block, we just report it.
1429 * They will get written with the next transaction commit
1432 spin_lock(&sdev
->stat_lock
);
1433 ++sdev
->stat
.super_errors
;
1434 spin_unlock(&sdev
->stat_lock
);
1436 btrfs_dev_stat_inc_and_print(sdev
->dev
,
1437 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
1439 btrfs_dev_stat_inc_and_print(sdev
->dev
,
1440 BTRFS_DEV_STAT_GENERATION_ERRS
);
1443 return fail_cor
+ fail_gen
;
1446 static void scrub_block_get(struct scrub_block
*sblock
)
1448 atomic_inc(&sblock
->ref_count
);
1451 static void scrub_block_put(struct scrub_block
*sblock
)
1453 if (atomic_dec_and_test(&sblock
->ref_count
)) {
1456 for (i
= 0; i
< sblock
->page_count
; i
++)
1457 if (sblock
->pagev
[i
].page
)
1458 __free_page(sblock
->pagev
[i
].page
);
1463 static void scrub_submit(struct scrub_dev
*sdev
)
1465 struct scrub_bio
*sbio
;
1467 if (sdev
->curr
== -1)
1470 sbio
= sdev
->bios
[sdev
->curr
];
1472 atomic_inc(&sdev
->in_flight
);
1474 btrfsic_submit_bio(READ
, sbio
->bio
);
1477 static int scrub_add_page_to_bio(struct scrub_dev
*sdev
,
1478 struct scrub_page
*spage
)
1480 struct scrub_block
*sblock
= spage
->sblock
;
1481 struct scrub_bio
*sbio
;
1486 * grab a fresh bio or wait for one to become available
1488 while (sdev
->curr
== -1) {
1489 spin_lock(&sdev
->list_lock
);
1490 sdev
->curr
= sdev
->first_free
;
1491 if (sdev
->curr
!= -1) {
1492 sdev
->first_free
= sdev
->bios
[sdev
->curr
]->next_free
;
1493 sdev
->bios
[sdev
->curr
]->next_free
= -1;
1494 sdev
->bios
[sdev
->curr
]->page_count
= 0;
1495 spin_unlock(&sdev
->list_lock
);
1497 spin_unlock(&sdev
->list_lock
);
1498 wait_event(sdev
->list_wait
, sdev
->first_free
!= -1);
1501 sbio
= sdev
->bios
[sdev
->curr
];
1502 if (sbio
->page_count
== 0) {
1505 sbio
->physical
= spage
->physical
;
1506 sbio
->logical
= spage
->logical
;
1509 bio
= bio_alloc(GFP_NOFS
, sdev
->pages_per_bio
);
1515 bio
->bi_private
= sbio
;
1516 bio
->bi_end_io
= scrub_bio_end_io
;
1517 bio
->bi_bdev
= sdev
->dev
->bdev
;
1518 bio
->bi_sector
= spage
->physical
>> 9;
1520 } else if (sbio
->physical
+ sbio
->page_count
* PAGE_SIZE
!=
1522 sbio
->logical
+ sbio
->page_count
* PAGE_SIZE
!=
1528 sbio
->pagev
[sbio
->page_count
] = spage
;
1529 ret
= bio_add_page(sbio
->bio
, spage
->page
, PAGE_SIZE
, 0);
1530 if (ret
!= PAGE_SIZE
) {
1531 if (sbio
->page_count
< 1) {
1540 scrub_block_get(sblock
); /* one for the added page */
1541 atomic_inc(&sblock
->outstanding_pages
);
1543 if (sbio
->page_count
== sdev
->pages_per_bio
)
1549 static int scrub_pages(struct scrub_dev
*sdev
, u64 logical
, u64 len
,
1550 u64 physical
, u64 flags
, u64 gen
, int mirror_num
,
1551 u8
*csum
, int force
)
1553 struct scrub_block
*sblock
;
1556 sblock
= kzalloc(sizeof(*sblock
), GFP_NOFS
);
1558 spin_lock(&sdev
->stat_lock
);
1559 sdev
->stat
.malloc_errors
++;
1560 spin_unlock(&sdev
->stat_lock
);
1564 /* one ref inside this function, plus one for each page later on */
1565 atomic_set(&sblock
->ref_count
, 1);
1566 sblock
->sdev
= sdev
;
1567 sblock
->no_io_error_seen
= 1;
1569 for (index
= 0; len
> 0; index
++) {
1570 struct scrub_page
*spage
= sblock
->pagev
+ index
;
1571 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
1573 BUG_ON(index
>= SCRUB_MAX_PAGES_PER_BLOCK
);
1574 spage
->page
= alloc_page(GFP_NOFS
);
1576 spin_lock(&sdev
->stat_lock
);
1577 sdev
->stat
.malloc_errors
++;
1578 spin_unlock(&sdev
->stat_lock
);
1581 __free_page(sblock
->pagev
[index
].page
);
1586 spage
->sblock
= sblock
;
1587 spage
->dev
= sdev
->dev
;
1588 spage
->flags
= flags
;
1589 spage
->generation
= gen
;
1590 spage
->logical
= logical
;
1591 spage
->physical
= physical
;
1592 spage
->mirror_num
= mirror_num
;
1594 spage
->have_csum
= 1;
1595 memcpy(spage
->csum
, csum
, sdev
->csum_size
);
1597 spage
->have_csum
= 0;
1599 sblock
->page_count
++;
1605 BUG_ON(sblock
->page_count
== 0);
1606 for (index
= 0; index
< sblock
->page_count
; index
++) {
1607 struct scrub_page
*spage
= sblock
->pagev
+ index
;
1610 ret
= scrub_add_page_to_bio(sdev
, spage
);
1612 scrub_block_put(sblock
);
1620 /* last one frees, either here or in bio completion for last page */
1621 scrub_block_put(sblock
);
1625 static void scrub_bio_end_io(struct bio
*bio
, int err
)
1627 struct scrub_bio
*sbio
= bio
->bi_private
;
1628 struct scrub_dev
*sdev
= sbio
->sdev
;
1629 struct btrfs_fs_info
*fs_info
= sdev
->dev
->dev_root
->fs_info
;
1634 btrfs_queue_worker(&fs_info
->scrub_workers
, &sbio
->work
);
1637 static void scrub_bio_end_io_worker(struct btrfs_work
*work
)
1639 struct scrub_bio
*sbio
= container_of(work
, struct scrub_bio
, work
);
1640 struct scrub_dev
*sdev
= sbio
->sdev
;
1643 BUG_ON(sbio
->page_count
> SCRUB_PAGES_PER_BIO
);
1645 for (i
= 0; i
< sbio
->page_count
; i
++) {
1646 struct scrub_page
*spage
= sbio
->pagev
[i
];
1648 spage
->io_error
= 1;
1649 spage
->sblock
->no_io_error_seen
= 0;
1653 /* now complete the scrub_block items that have all pages completed */
1654 for (i
= 0; i
< sbio
->page_count
; i
++) {
1655 struct scrub_page
*spage
= sbio
->pagev
[i
];
1656 struct scrub_block
*sblock
= spage
->sblock
;
1658 if (atomic_dec_and_test(&sblock
->outstanding_pages
))
1659 scrub_block_complete(sblock
);
1660 scrub_block_put(sblock
);
1664 /* what is this good for??? */
1665 sbio
->bio
->bi_flags
&= ~(BIO_POOL_MASK
- 1);
1666 sbio
->bio
->bi_flags
|= 1 << BIO_UPTODATE
;
1667 sbio
->bio
->bi_phys_segments
= 0;
1668 sbio
->bio
->bi_idx
= 0;
1670 for (i
= 0; i
< sbio
->page_count
; i
++) {
1672 bi
= &sbio
->bio
->bi_io_vec
[i
];
1674 bi
->bv_len
= PAGE_SIZE
;
1680 spin_lock(&sdev
->list_lock
);
1681 sbio
->next_free
= sdev
->first_free
;
1682 sdev
->first_free
= sbio
->index
;
1683 spin_unlock(&sdev
->list_lock
);
1684 atomic_dec(&sdev
->in_flight
);
1685 wake_up(&sdev
->list_wait
);
1688 static void scrub_block_complete(struct scrub_block
*sblock
)
1690 if (!sblock
->no_io_error_seen
)
1691 scrub_handle_errored_block(sblock
);
1693 scrub_checksum(sblock
);
1696 static int scrub_find_csum(struct scrub_dev
*sdev
, u64 logical
, u64 len
,
1699 struct btrfs_ordered_sum
*sum
= NULL
;
1702 unsigned long num_sectors
;
1704 while (!list_empty(&sdev
->csum_list
)) {
1705 sum
= list_first_entry(&sdev
->csum_list
,
1706 struct btrfs_ordered_sum
, list
);
1707 if (sum
->bytenr
> logical
)
1709 if (sum
->bytenr
+ sum
->len
> logical
)
1712 ++sdev
->stat
.csum_discards
;
1713 list_del(&sum
->list
);
1720 num_sectors
= sum
->len
/ sdev
->sectorsize
;
1721 for (i
= 0; i
< num_sectors
; ++i
) {
1722 if (sum
->sums
[i
].bytenr
== logical
) {
1723 memcpy(csum
, &sum
->sums
[i
].sum
, sdev
->csum_size
);
1728 if (ret
&& i
== num_sectors
- 1) {
1729 list_del(&sum
->list
);
1735 /* scrub extent tries to collect up to 64 kB for each bio */
1736 static int scrub_extent(struct scrub_dev
*sdev
, u64 logical
, u64 len
,
1737 u64 physical
, u64 flags
, u64 gen
, int mirror_num
)
1740 u8 csum
[BTRFS_CSUM_SIZE
];
1743 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
1744 blocksize
= sdev
->sectorsize
;
1745 spin_lock(&sdev
->stat_lock
);
1746 sdev
->stat
.data_extents_scrubbed
++;
1747 sdev
->stat
.data_bytes_scrubbed
+= len
;
1748 spin_unlock(&sdev
->stat_lock
);
1749 } else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
1750 BUG_ON(sdev
->nodesize
!= sdev
->leafsize
);
1751 blocksize
= sdev
->nodesize
;
1752 spin_lock(&sdev
->stat_lock
);
1753 sdev
->stat
.tree_extents_scrubbed
++;
1754 sdev
->stat
.tree_bytes_scrubbed
+= len
;
1755 spin_unlock(&sdev
->stat_lock
);
1757 blocksize
= sdev
->sectorsize
;
1762 u64 l
= min_t(u64
, len
, blocksize
);
1765 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
1766 /* push csums to sbio */
1767 have_csum
= scrub_find_csum(sdev
, logical
, l
, csum
);
1769 ++sdev
->stat
.no_csum
;
1771 ret
= scrub_pages(sdev
, logical
, l
, physical
, flags
, gen
,
1772 mirror_num
, have_csum
? csum
: NULL
, 0);
1782 static noinline_for_stack
int scrub_stripe(struct scrub_dev
*sdev
,
1783 struct map_lookup
*map
, int num
, u64 base
, u64 length
)
1785 struct btrfs_path
*path
;
1786 struct btrfs_fs_info
*fs_info
= sdev
->dev
->dev_root
->fs_info
;
1787 struct btrfs_root
*root
= fs_info
->extent_root
;
1788 struct btrfs_root
*csum_root
= fs_info
->csum_root
;
1789 struct btrfs_extent_item
*extent
;
1790 struct blk_plug plug
;
1796 struct extent_buffer
*l
;
1797 struct btrfs_key key
;
1802 struct reada_control
*reada1
;
1803 struct reada_control
*reada2
;
1804 struct btrfs_key key_start
;
1805 struct btrfs_key key_end
;
1807 u64 increment
= map
->stripe_len
;
1812 do_div(nstripes
, map
->stripe_len
);
1813 if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
1814 offset
= map
->stripe_len
* num
;
1815 increment
= map
->stripe_len
* map
->num_stripes
;
1817 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1818 int factor
= map
->num_stripes
/ map
->sub_stripes
;
1819 offset
= map
->stripe_len
* (num
/ map
->sub_stripes
);
1820 increment
= map
->stripe_len
* factor
;
1821 mirror_num
= num
% map
->sub_stripes
+ 1;
1822 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
1823 increment
= map
->stripe_len
;
1824 mirror_num
= num
% map
->num_stripes
+ 1;
1825 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
1826 increment
= map
->stripe_len
;
1827 mirror_num
= num
% map
->num_stripes
+ 1;
1829 increment
= map
->stripe_len
;
1833 path
= btrfs_alloc_path();
1838 * work on commit root. The related disk blocks are static as
1839 * long as COW is applied. This means, it is save to rewrite
1840 * them to repair disk errors without any race conditions
1842 path
->search_commit_root
= 1;
1843 path
->skip_locking
= 1;
1846 * trigger the readahead for extent tree csum tree and wait for
1847 * completion. During readahead, the scrub is officially paused
1848 * to not hold off transaction commits
1850 logical
= base
+ offset
;
1852 wait_event(sdev
->list_wait
,
1853 atomic_read(&sdev
->in_flight
) == 0);
1854 atomic_inc(&fs_info
->scrubs_paused
);
1855 wake_up(&fs_info
->scrub_pause_wait
);
1857 /* FIXME it might be better to start readahead at commit root */
1858 key_start
.objectid
= logical
;
1859 key_start
.type
= BTRFS_EXTENT_ITEM_KEY
;
1860 key_start
.offset
= (u64
)0;
1861 key_end
.objectid
= base
+ offset
+ nstripes
* increment
;
1862 key_end
.type
= BTRFS_EXTENT_ITEM_KEY
;
1863 key_end
.offset
= (u64
)0;
1864 reada1
= btrfs_reada_add(root
, &key_start
, &key_end
);
1866 key_start
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
1867 key_start
.type
= BTRFS_EXTENT_CSUM_KEY
;
1868 key_start
.offset
= logical
;
1869 key_end
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
1870 key_end
.type
= BTRFS_EXTENT_CSUM_KEY
;
1871 key_end
.offset
= base
+ offset
+ nstripes
* increment
;
1872 reada2
= btrfs_reada_add(csum_root
, &key_start
, &key_end
);
1874 if (!IS_ERR(reada1
))
1875 btrfs_reada_wait(reada1
);
1876 if (!IS_ERR(reada2
))
1877 btrfs_reada_wait(reada2
);
1879 mutex_lock(&fs_info
->scrub_lock
);
1880 while (atomic_read(&fs_info
->scrub_pause_req
)) {
1881 mutex_unlock(&fs_info
->scrub_lock
);
1882 wait_event(fs_info
->scrub_pause_wait
,
1883 atomic_read(&fs_info
->scrub_pause_req
) == 0);
1884 mutex_lock(&fs_info
->scrub_lock
);
1886 atomic_dec(&fs_info
->scrubs_paused
);
1887 mutex_unlock(&fs_info
->scrub_lock
);
1888 wake_up(&fs_info
->scrub_pause_wait
);
1891 * collect all data csums for the stripe to avoid seeking during
1892 * the scrub. This might currently (crc32) end up to be about 1MB
1894 blk_start_plug(&plug
);
1897 * now find all extents for each stripe and scrub them
1899 logical
= base
+ offset
;
1900 physical
= map
->stripes
[num
].physical
;
1902 for (i
= 0; i
< nstripes
; ++i
) {
1906 if (atomic_read(&fs_info
->scrub_cancel_req
) ||
1907 atomic_read(&sdev
->cancel_req
)) {
1912 * check to see if we have to pause
1914 if (atomic_read(&fs_info
->scrub_pause_req
)) {
1915 /* push queued extents */
1917 wait_event(sdev
->list_wait
,
1918 atomic_read(&sdev
->in_flight
) == 0);
1919 atomic_inc(&fs_info
->scrubs_paused
);
1920 wake_up(&fs_info
->scrub_pause_wait
);
1921 mutex_lock(&fs_info
->scrub_lock
);
1922 while (atomic_read(&fs_info
->scrub_pause_req
)) {
1923 mutex_unlock(&fs_info
->scrub_lock
);
1924 wait_event(fs_info
->scrub_pause_wait
,
1925 atomic_read(&fs_info
->scrub_pause_req
) == 0);
1926 mutex_lock(&fs_info
->scrub_lock
);
1928 atomic_dec(&fs_info
->scrubs_paused
);
1929 mutex_unlock(&fs_info
->scrub_lock
);
1930 wake_up(&fs_info
->scrub_pause_wait
);
1933 ret
= btrfs_lookup_csums_range(csum_root
, logical
,
1934 logical
+ map
->stripe_len
- 1,
1935 &sdev
->csum_list
, 1);
1939 key
.objectid
= logical
;
1940 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1941 key
.offset
= (u64
)0;
1943 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1947 ret
= btrfs_previous_item(root
, path
, 0,
1948 BTRFS_EXTENT_ITEM_KEY
);
1952 /* there's no smaller item, so stick with the
1954 btrfs_release_path(path
);
1955 ret
= btrfs_search_slot(NULL
, root
, &key
,
1964 slot
= path
->slots
[0];
1965 if (slot
>= btrfs_header_nritems(l
)) {
1966 ret
= btrfs_next_leaf(root
, path
);
1974 btrfs_item_key_to_cpu(l
, &key
, slot
);
1976 if (key
.objectid
+ key
.offset
<= logical
)
1979 if (key
.objectid
>= logical
+ map
->stripe_len
)
1982 if (btrfs_key_type(&key
) != BTRFS_EXTENT_ITEM_KEY
)
1985 extent
= btrfs_item_ptr(l
, slot
,
1986 struct btrfs_extent_item
);
1987 flags
= btrfs_extent_flags(l
, extent
);
1988 generation
= btrfs_extent_generation(l
, extent
);
1990 if (key
.objectid
< logical
&&
1991 (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)) {
1993 "btrfs scrub: tree block %llu spanning "
1994 "stripes, ignored. logical=%llu\n",
1995 (unsigned long long)key
.objectid
,
1996 (unsigned long long)logical
);
2001 * trim extent to this stripe
2003 if (key
.objectid
< logical
) {
2004 key
.offset
-= logical
- key
.objectid
;
2005 key
.objectid
= logical
;
2007 if (key
.objectid
+ key
.offset
>
2008 logical
+ map
->stripe_len
) {
2009 key
.offset
= logical
+ map
->stripe_len
-
2013 ret
= scrub_extent(sdev
, key
.objectid
, key
.offset
,
2014 key
.objectid
- logical
+ physical
,
2015 flags
, generation
, mirror_num
);
2022 btrfs_release_path(path
);
2023 logical
+= increment
;
2024 physical
+= map
->stripe_len
;
2025 spin_lock(&sdev
->stat_lock
);
2026 sdev
->stat
.last_physical
= physical
;
2027 spin_unlock(&sdev
->stat_lock
);
2029 /* push queued extents */
2033 blk_finish_plug(&plug
);
2034 btrfs_free_path(path
);
2035 return ret
< 0 ? ret
: 0;
2038 static noinline_for_stack
int scrub_chunk(struct scrub_dev
*sdev
,
2039 u64 chunk_tree
, u64 chunk_objectid
, u64 chunk_offset
, u64 length
,
2042 struct btrfs_mapping_tree
*map_tree
=
2043 &sdev
->dev
->dev_root
->fs_info
->mapping_tree
;
2044 struct map_lookup
*map
;
2045 struct extent_map
*em
;
2049 read_lock(&map_tree
->map_tree
.lock
);
2050 em
= lookup_extent_mapping(&map_tree
->map_tree
, chunk_offset
, 1);
2051 read_unlock(&map_tree
->map_tree
.lock
);
2056 map
= (struct map_lookup
*)em
->bdev
;
2057 if (em
->start
!= chunk_offset
)
2060 if (em
->len
< length
)
2063 for (i
= 0; i
< map
->num_stripes
; ++i
) {
2064 if (map
->stripes
[i
].dev
== sdev
->dev
&&
2065 map
->stripes
[i
].physical
== dev_offset
) {
2066 ret
= scrub_stripe(sdev
, map
, i
, chunk_offset
, length
);
2072 free_extent_map(em
);
2077 static noinline_for_stack
2078 int scrub_enumerate_chunks(struct scrub_dev
*sdev
, u64 start
, u64 end
)
2080 struct btrfs_dev_extent
*dev_extent
= NULL
;
2081 struct btrfs_path
*path
;
2082 struct btrfs_root
*root
= sdev
->dev
->dev_root
;
2083 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2090 struct extent_buffer
*l
;
2091 struct btrfs_key key
;
2092 struct btrfs_key found_key
;
2093 struct btrfs_block_group_cache
*cache
;
2095 path
= btrfs_alloc_path();
2100 path
->search_commit_root
= 1;
2101 path
->skip_locking
= 1;
2103 key
.objectid
= sdev
->dev
->devid
;
2105 key
.type
= BTRFS_DEV_EXTENT_KEY
;
2109 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2113 if (path
->slots
[0] >=
2114 btrfs_header_nritems(path
->nodes
[0])) {
2115 ret
= btrfs_next_leaf(root
, path
);
2122 slot
= path
->slots
[0];
2124 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
2126 if (found_key
.objectid
!= sdev
->dev
->devid
)
2129 if (btrfs_key_type(&found_key
) != BTRFS_DEV_EXTENT_KEY
)
2132 if (found_key
.offset
>= end
)
2135 if (found_key
.offset
< key
.offset
)
2138 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
2139 length
= btrfs_dev_extent_length(l
, dev_extent
);
2141 if (found_key
.offset
+ length
<= start
) {
2142 key
.offset
= found_key
.offset
+ length
;
2143 btrfs_release_path(path
);
2147 chunk_tree
= btrfs_dev_extent_chunk_tree(l
, dev_extent
);
2148 chunk_objectid
= btrfs_dev_extent_chunk_objectid(l
, dev_extent
);
2149 chunk_offset
= btrfs_dev_extent_chunk_offset(l
, dev_extent
);
2152 * get a reference on the corresponding block group to prevent
2153 * the chunk from going away while we scrub it
2155 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
2160 ret
= scrub_chunk(sdev
, chunk_tree
, chunk_objectid
,
2161 chunk_offset
, length
, found_key
.offset
);
2162 btrfs_put_block_group(cache
);
2166 key
.offset
= found_key
.offset
+ length
;
2167 btrfs_release_path(path
);
2170 btrfs_free_path(path
);
2173 * ret can still be 1 from search_slot or next_leaf,
2174 * that's not an error
2176 return ret
< 0 ? ret
: 0;
2179 static noinline_for_stack
int scrub_supers(struct scrub_dev
*sdev
)
2185 struct btrfs_device
*device
= sdev
->dev
;
2186 struct btrfs_root
*root
= device
->dev_root
;
2188 if (root
->fs_info
->fs_state
& BTRFS_SUPER_FLAG_ERROR
)
2191 gen
= root
->fs_info
->last_trans_committed
;
2193 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
2194 bytenr
= btrfs_sb_offset(i
);
2195 if (bytenr
+ BTRFS_SUPER_INFO_SIZE
> device
->total_bytes
)
2198 ret
= scrub_pages(sdev
, bytenr
, BTRFS_SUPER_INFO_SIZE
, bytenr
,
2199 BTRFS_EXTENT_FLAG_SUPER
, gen
, i
, NULL
, 1);
2203 wait_event(sdev
->list_wait
, atomic_read(&sdev
->in_flight
) == 0);
2209 * get a reference count on fs_info->scrub_workers. start worker if necessary
2211 static noinline_for_stack
int scrub_workers_get(struct btrfs_root
*root
)
2213 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2216 mutex_lock(&fs_info
->scrub_lock
);
2217 if (fs_info
->scrub_workers_refcnt
== 0) {
2218 btrfs_init_workers(&fs_info
->scrub_workers
, "scrub",
2219 fs_info
->thread_pool_size
, &fs_info
->generic_worker
);
2220 fs_info
->scrub_workers
.idle_thresh
= 4;
2221 ret
= btrfs_start_workers(&fs_info
->scrub_workers
);
2225 ++fs_info
->scrub_workers_refcnt
;
2227 mutex_unlock(&fs_info
->scrub_lock
);
2232 static noinline_for_stack
void scrub_workers_put(struct btrfs_root
*root
)
2234 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2236 mutex_lock(&fs_info
->scrub_lock
);
2237 if (--fs_info
->scrub_workers_refcnt
== 0)
2238 btrfs_stop_workers(&fs_info
->scrub_workers
);
2239 WARN_ON(fs_info
->scrub_workers_refcnt
< 0);
2240 mutex_unlock(&fs_info
->scrub_lock
);
2244 int btrfs_scrub_dev(struct btrfs_root
*root
, u64 devid
, u64 start
, u64 end
,
2245 struct btrfs_scrub_progress
*progress
, int readonly
)
2247 struct scrub_dev
*sdev
;
2248 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2250 struct btrfs_device
*dev
;
2252 if (btrfs_fs_closing(root
->fs_info
))
2256 * check some assumptions
2258 if (root
->nodesize
!= root
->leafsize
) {
2260 "btrfs_scrub: size assumption nodesize == leafsize (%d == %d) fails\n",
2261 root
->nodesize
, root
->leafsize
);
2265 if (root
->nodesize
> BTRFS_STRIPE_LEN
) {
2267 * in this case scrub is unable to calculate the checksum
2268 * the way scrub is implemented. Do not handle this
2269 * situation at all because it won't ever happen.
2272 "btrfs_scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails\n",
2273 root
->nodesize
, BTRFS_STRIPE_LEN
);
2277 if (root
->sectorsize
!= PAGE_SIZE
) {
2278 /* not supported for data w/o checksums */
2280 "btrfs_scrub: size assumption sectorsize != PAGE_SIZE (%d != %lld) fails\n",
2281 root
->sectorsize
, (unsigned long long)PAGE_SIZE
);
2285 ret
= scrub_workers_get(root
);
2289 mutex_lock(&root
->fs_info
->fs_devices
->device_list_mutex
);
2290 dev
= btrfs_find_device(root
, devid
, NULL
, NULL
);
2291 if (!dev
|| dev
->missing
) {
2292 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
2293 scrub_workers_put(root
);
2296 mutex_lock(&fs_info
->scrub_lock
);
2298 if (!dev
->in_fs_metadata
) {
2299 mutex_unlock(&fs_info
->scrub_lock
);
2300 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
2301 scrub_workers_put(root
);
2305 if (dev
->scrub_device
) {
2306 mutex_unlock(&fs_info
->scrub_lock
);
2307 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
2308 scrub_workers_put(root
);
2309 return -EINPROGRESS
;
2311 sdev
= scrub_setup_dev(dev
);
2313 mutex_unlock(&fs_info
->scrub_lock
);
2314 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
2315 scrub_workers_put(root
);
2316 return PTR_ERR(sdev
);
2318 sdev
->readonly
= readonly
;
2319 dev
->scrub_device
= sdev
;
2321 atomic_inc(&fs_info
->scrubs_running
);
2322 mutex_unlock(&fs_info
->scrub_lock
);
2323 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
2325 down_read(&fs_info
->scrub_super_lock
);
2326 ret
= scrub_supers(sdev
);
2327 up_read(&fs_info
->scrub_super_lock
);
2330 ret
= scrub_enumerate_chunks(sdev
, start
, end
);
2332 wait_event(sdev
->list_wait
, atomic_read(&sdev
->in_flight
) == 0);
2333 atomic_dec(&fs_info
->scrubs_running
);
2334 wake_up(&fs_info
->scrub_pause_wait
);
2336 wait_event(sdev
->list_wait
, atomic_read(&sdev
->fixup_cnt
) == 0);
2339 memcpy(progress
, &sdev
->stat
, sizeof(*progress
));
2341 mutex_lock(&fs_info
->scrub_lock
);
2342 dev
->scrub_device
= NULL
;
2343 mutex_unlock(&fs_info
->scrub_lock
);
2345 scrub_free_dev(sdev
);
2346 scrub_workers_put(root
);
2351 void btrfs_scrub_pause(struct btrfs_root
*root
)
2353 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2355 mutex_lock(&fs_info
->scrub_lock
);
2356 atomic_inc(&fs_info
->scrub_pause_req
);
2357 while (atomic_read(&fs_info
->scrubs_paused
) !=
2358 atomic_read(&fs_info
->scrubs_running
)) {
2359 mutex_unlock(&fs_info
->scrub_lock
);
2360 wait_event(fs_info
->scrub_pause_wait
,
2361 atomic_read(&fs_info
->scrubs_paused
) ==
2362 atomic_read(&fs_info
->scrubs_running
));
2363 mutex_lock(&fs_info
->scrub_lock
);
2365 mutex_unlock(&fs_info
->scrub_lock
);
2368 void btrfs_scrub_continue(struct btrfs_root
*root
)
2370 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2372 atomic_dec(&fs_info
->scrub_pause_req
);
2373 wake_up(&fs_info
->scrub_pause_wait
);
2376 void btrfs_scrub_pause_super(struct btrfs_root
*root
)
2378 down_write(&root
->fs_info
->scrub_super_lock
);
2381 void btrfs_scrub_continue_super(struct btrfs_root
*root
)
2383 up_write(&root
->fs_info
->scrub_super_lock
);
2386 int __btrfs_scrub_cancel(struct btrfs_fs_info
*fs_info
)
2389 mutex_lock(&fs_info
->scrub_lock
);
2390 if (!atomic_read(&fs_info
->scrubs_running
)) {
2391 mutex_unlock(&fs_info
->scrub_lock
);
2395 atomic_inc(&fs_info
->scrub_cancel_req
);
2396 while (atomic_read(&fs_info
->scrubs_running
)) {
2397 mutex_unlock(&fs_info
->scrub_lock
);
2398 wait_event(fs_info
->scrub_pause_wait
,
2399 atomic_read(&fs_info
->scrubs_running
) == 0);
2400 mutex_lock(&fs_info
->scrub_lock
);
2402 atomic_dec(&fs_info
->scrub_cancel_req
);
2403 mutex_unlock(&fs_info
->scrub_lock
);
2408 int btrfs_scrub_cancel(struct btrfs_root
*root
)
2410 return __btrfs_scrub_cancel(root
->fs_info
);
2413 int btrfs_scrub_cancel_dev(struct btrfs_root
*root
, struct btrfs_device
*dev
)
2415 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2416 struct scrub_dev
*sdev
;
2418 mutex_lock(&fs_info
->scrub_lock
);
2419 sdev
= dev
->scrub_device
;
2421 mutex_unlock(&fs_info
->scrub_lock
);
2424 atomic_inc(&sdev
->cancel_req
);
2425 while (dev
->scrub_device
) {
2426 mutex_unlock(&fs_info
->scrub_lock
);
2427 wait_event(fs_info
->scrub_pause_wait
,
2428 dev
->scrub_device
== NULL
);
2429 mutex_lock(&fs_info
->scrub_lock
);
2431 mutex_unlock(&fs_info
->scrub_lock
);
2436 int btrfs_scrub_cancel_devid(struct btrfs_root
*root
, u64 devid
)
2438 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2439 struct btrfs_device
*dev
;
2443 * we have to hold the device_list_mutex here so the device
2444 * does not go away in cancel_dev. FIXME: find a better solution
2446 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
2447 dev
= btrfs_find_device(root
, devid
, NULL
, NULL
);
2449 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2452 ret
= btrfs_scrub_cancel_dev(root
, dev
);
2453 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2458 int btrfs_scrub_progress(struct btrfs_root
*root
, u64 devid
,
2459 struct btrfs_scrub_progress
*progress
)
2461 struct btrfs_device
*dev
;
2462 struct scrub_dev
*sdev
= NULL
;
2464 mutex_lock(&root
->fs_info
->fs_devices
->device_list_mutex
);
2465 dev
= btrfs_find_device(root
, devid
, NULL
, NULL
);
2467 sdev
= dev
->scrub_device
;
2469 memcpy(progress
, &sdev
->stat
, sizeof(*progress
));
2470 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
2472 return dev
? (sdev
? 0 : -ENOTCONN
) : -ENODEV
;