2 * Copyright (C) 2011 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/blkdev.h>
20 #include <linux/ratelimit.h>
24 #include "ordered-data.h"
25 #include "transaction.h"
27 #include "extent_io.h"
28 #include "check-integrity.h"
31 * This is only the first step towards a full-features scrub. It reads all
32 * extent and super block and verifies the checksums. In case a bad checksum
33 * is found or the extent cannot be read, good data will be written back if
36 * Future enhancements:
37 * - In case an unrepairable extent is encountered, track which files are
38 * affected and report them
39 * - In case of a read error on files with nodatasum, map the file and read
40 * the extent to trigger a writeback of the good copy
41 * - track and record media errors, throw out bad devices
42 * - add a mode to also read unallocated space
48 static void scrub_bio_end_io(struct bio
*bio
, int err
);
49 static void scrub_checksum(struct btrfs_work
*work
);
50 static int scrub_checksum_data(struct scrub_dev
*sdev
,
51 struct scrub_page
*spag
, void *buffer
);
52 static int scrub_checksum_tree_block(struct scrub_dev
*sdev
,
53 struct scrub_page
*spag
, u64 logical
,
55 static int scrub_checksum_super(struct scrub_bio
*sbio
, void *buffer
);
56 static int scrub_fixup_check(struct scrub_bio
*sbio
, int ix
);
57 static void scrub_fixup_end_io(struct bio
*bio
, int err
);
58 static int scrub_fixup_io(int rw
, struct block_device
*bdev
, sector_t sector
,
60 static void scrub_fixup(struct scrub_bio
*sbio
, int ix
);
62 #define SCRUB_PAGES_PER_BIO 16 /* 64k per bio */
63 #define SCRUB_BIOS_PER_DEV 16 /* 1 MB per device in flight */
66 u64 flags
; /* extent flags */
70 u8 csum
[BTRFS_CSUM_SIZE
];
75 struct scrub_dev
*sdev
;
80 struct scrub_page spag
[SCRUB_PAGES_PER_BIO
];
83 struct btrfs_work work
;
87 struct scrub_bio
*bios
[SCRUB_BIOS_PER_DEV
];
88 struct btrfs_device
*dev
;
94 wait_queue_head_t list_wait
;
96 struct list_head csum_list
;
102 struct btrfs_scrub_progress stat
;
103 spinlock_t stat_lock
;
106 struct scrub_fixup_nodatasum
{
107 struct scrub_dev
*sdev
;
109 struct btrfs_root
*root
;
110 struct btrfs_work work
;
114 struct scrub_warning
{
115 struct btrfs_path
*path
;
116 u64 extent_item_size
;
122 struct btrfs_device
*dev
;
127 static void scrub_free_csums(struct scrub_dev
*sdev
)
129 while (!list_empty(&sdev
->csum_list
)) {
130 struct btrfs_ordered_sum
*sum
;
131 sum
= list_first_entry(&sdev
->csum_list
,
132 struct btrfs_ordered_sum
, list
);
133 list_del(&sum
->list
);
138 static void scrub_free_bio(struct bio
*bio
)
141 struct page
*last_page
= NULL
;
146 for (i
= 0; i
< bio
->bi_vcnt
; ++i
) {
147 if (bio
->bi_io_vec
[i
].bv_page
== last_page
)
149 last_page
= bio
->bi_io_vec
[i
].bv_page
;
150 __free_page(last_page
);
155 static noinline_for_stack
void scrub_free_dev(struct scrub_dev
*sdev
)
162 for (i
= 0; i
< SCRUB_BIOS_PER_DEV
; ++i
) {
163 struct scrub_bio
*sbio
= sdev
->bios
[i
];
168 scrub_free_bio(sbio
->bio
);
172 scrub_free_csums(sdev
);
176 static noinline_for_stack
177 struct scrub_dev
*scrub_setup_dev(struct btrfs_device
*dev
)
179 struct scrub_dev
*sdev
;
181 struct btrfs_fs_info
*fs_info
= dev
->dev_root
->fs_info
;
183 sdev
= kzalloc(sizeof(*sdev
), GFP_NOFS
);
187 for (i
= 0; i
< SCRUB_BIOS_PER_DEV
; ++i
) {
188 struct scrub_bio
*sbio
;
190 sbio
= kzalloc(sizeof(*sbio
), GFP_NOFS
);
193 sdev
->bios
[i
] = sbio
;
198 sbio
->work
.func
= scrub_checksum
;
200 if (i
!= SCRUB_BIOS_PER_DEV
-1)
201 sdev
->bios
[i
]->next_free
= i
+ 1;
203 sdev
->bios
[i
]->next_free
= -1;
205 sdev
->first_free
= 0;
207 atomic_set(&sdev
->in_flight
, 0);
208 atomic_set(&sdev
->fixup_cnt
, 0);
209 atomic_set(&sdev
->cancel_req
, 0);
210 sdev
->csum_size
= btrfs_super_csum_size(fs_info
->super_copy
);
211 INIT_LIST_HEAD(&sdev
->csum_list
);
213 spin_lock_init(&sdev
->list_lock
);
214 spin_lock_init(&sdev
->stat_lock
);
215 init_waitqueue_head(&sdev
->list_wait
);
219 scrub_free_dev(sdev
);
220 return ERR_PTR(-ENOMEM
);
223 static int scrub_print_warning_inode(u64 inum
, u64 offset
, u64 root
, void *ctx
)
229 struct extent_buffer
*eb
;
230 struct btrfs_inode_item
*inode_item
;
231 struct scrub_warning
*swarn
= ctx
;
232 struct btrfs_fs_info
*fs_info
= swarn
->dev
->dev_root
->fs_info
;
233 struct inode_fs_paths
*ipath
= NULL
;
234 struct btrfs_root
*local_root
;
235 struct btrfs_key root_key
;
237 root_key
.objectid
= root
;
238 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
239 root_key
.offset
= (u64
)-1;
240 local_root
= btrfs_read_fs_root_no_name(fs_info
, &root_key
);
241 if (IS_ERR(local_root
)) {
242 ret
= PTR_ERR(local_root
);
246 ret
= inode_item_info(inum
, 0, local_root
, swarn
->path
);
248 btrfs_release_path(swarn
->path
);
252 eb
= swarn
->path
->nodes
[0];
253 inode_item
= btrfs_item_ptr(eb
, swarn
->path
->slots
[0],
254 struct btrfs_inode_item
);
255 isize
= btrfs_inode_size(eb
, inode_item
);
256 nlink
= btrfs_inode_nlink(eb
, inode_item
);
257 btrfs_release_path(swarn
->path
);
259 ipath
= init_ipath(4096, local_root
, swarn
->path
);
261 ret
= PTR_ERR(ipath
);
265 ret
= paths_from_inode(inum
, ipath
);
271 * we deliberately ignore the bit ipath might have been too small to
272 * hold all of the paths here
274 for (i
= 0; i
< ipath
->fspath
->elem_cnt
; ++i
)
275 printk(KERN_WARNING
"btrfs: %s at logical %llu on dev "
276 "%s, sector %llu, root %llu, inode %llu, offset %llu, "
277 "length %llu, links %u (path: %s)\n", swarn
->errstr
,
278 swarn
->logical
, swarn
->dev
->name
,
279 (unsigned long long)swarn
->sector
, root
, inum
, offset
,
280 min(isize
- offset
, (u64
)PAGE_SIZE
), nlink
,
281 (char *)(unsigned long)ipath
->fspath
->val
[i
]);
287 printk(KERN_WARNING
"btrfs: %s at logical %llu on dev "
288 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
289 "resolving failed with ret=%d\n", swarn
->errstr
,
290 swarn
->logical
, swarn
->dev
->name
,
291 (unsigned long long)swarn
->sector
, root
, inum
, offset
, ret
);
297 static void scrub_print_warning(const char *errstr
, struct scrub_bio
*sbio
,
300 struct btrfs_device
*dev
= sbio
->sdev
->dev
;
301 struct btrfs_fs_info
*fs_info
= dev
->dev_root
->fs_info
;
302 struct btrfs_path
*path
;
303 struct btrfs_key found_key
;
304 struct extent_buffer
*eb
;
305 struct btrfs_extent_item
*ei
;
306 struct scrub_warning swarn
;
311 unsigned long ptr
= 0;
312 const int bufsize
= 4096;
315 path
= btrfs_alloc_path();
317 swarn
.scratch_buf
= kmalloc(bufsize
, GFP_NOFS
);
318 swarn
.msg_buf
= kmalloc(bufsize
, GFP_NOFS
);
319 swarn
.sector
= (sbio
->physical
+ ix
* PAGE_SIZE
) >> 9;
320 swarn
.logical
= sbio
->logical
+ ix
* PAGE_SIZE
;
321 swarn
.errstr
= errstr
;
323 swarn
.msg_bufsize
= bufsize
;
324 swarn
.scratch_bufsize
= bufsize
;
326 if (!path
|| !swarn
.scratch_buf
|| !swarn
.msg_buf
)
329 ret
= extent_from_logical(fs_info
, swarn
.logical
, path
, &found_key
);
333 extent_offset
= swarn
.logical
- found_key
.objectid
;
334 swarn
.extent_item_size
= found_key
.offset
;
337 ei
= btrfs_item_ptr(eb
, path
->slots
[0], struct btrfs_extent_item
);
338 item_size
= btrfs_item_size_nr(eb
, path
->slots
[0]);
340 if (ret
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
342 ret
= tree_backref_for_extent(&ptr
, eb
, ei
, item_size
,
343 &ref_root
, &ref_level
);
344 printk(KERN_WARNING
"%s at logical %llu on dev %s, "
345 "sector %llu: metadata %s (level %d) in tree "
346 "%llu\n", errstr
, swarn
.logical
, dev
->name
,
347 (unsigned long long)swarn
.sector
,
348 ref_level
? "node" : "leaf",
349 ret
< 0 ? -1 : ref_level
,
350 ret
< 0 ? -1 : ref_root
);
354 iterate_extent_inodes(fs_info
, path
, found_key
.objectid
,
356 scrub_print_warning_inode
, &swarn
);
360 btrfs_free_path(path
);
361 kfree(swarn
.scratch_buf
);
362 kfree(swarn
.msg_buf
);
365 static int scrub_fixup_readpage(u64 inum
, u64 offset
, u64 root
, void *ctx
)
367 struct page
*page
= NULL
;
369 struct scrub_fixup_nodatasum
*fixup
= ctx
;
372 struct btrfs_key key
;
373 struct inode
*inode
= NULL
;
374 u64 end
= offset
+ PAGE_SIZE
- 1;
375 struct btrfs_root
*local_root
;
378 key
.type
= BTRFS_ROOT_ITEM_KEY
;
379 key
.offset
= (u64
)-1;
380 local_root
= btrfs_read_fs_root_no_name(fixup
->root
->fs_info
, &key
);
381 if (IS_ERR(local_root
))
382 return PTR_ERR(local_root
);
384 key
.type
= BTRFS_INODE_ITEM_KEY
;
387 inode
= btrfs_iget(fixup
->root
->fs_info
->sb
, &key
, local_root
, NULL
);
389 return PTR_ERR(inode
);
391 index
= offset
>> PAGE_CACHE_SHIFT
;
393 page
= find_or_create_page(inode
->i_mapping
, index
, GFP_NOFS
);
399 if (PageUptodate(page
)) {
400 struct btrfs_mapping_tree
*map_tree
;
401 if (PageDirty(page
)) {
403 * we need to write the data to the defect sector. the
404 * data that was in that sector is not in memory,
405 * because the page was modified. we must not write the
406 * modified page to that sector.
408 * TODO: what could be done here: wait for the delalloc
409 * runner to write out that page (might involve
410 * COW) and see whether the sector is still
411 * referenced afterwards.
413 * For the meantime, we'll treat this error
414 * incorrectable, although there is a chance that a
415 * later scrub will find the bad sector again and that
416 * there's no dirty page in memory, then.
421 map_tree
= &BTRFS_I(inode
)->root
->fs_info
->mapping_tree
;
422 ret
= repair_io_failure(map_tree
, offset
, PAGE_SIZE
,
423 fixup
->logical
, page
,
429 * we need to get good data first. the general readpage path
430 * will call repair_io_failure for us, we just have to make
431 * sure we read the bad mirror.
433 ret
= set_extent_bits(&BTRFS_I(inode
)->io_tree
, offset
, end
,
434 EXTENT_DAMAGED
, GFP_NOFS
);
436 /* set_extent_bits should give proper error */
443 ret
= extent_read_full_page(&BTRFS_I(inode
)->io_tree
, page
,
446 wait_on_page_locked(page
);
448 corrected
= !test_range_bit(&BTRFS_I(inode
)->io_tree
, offset
,
449 end
, EXTENT_DAMAGED
, 0, NULL
);
451 clear_extent_bits(&BTRFS_I(inode
)->io_tree
, offset
, end
,
452 EXTENT_DAMAGED
, GFP_NOFS
);
464 if (ret
== 0 && corrected
) {
466 * we only need to call readpage for one of the inodes belonging
467 * to this extent. so make iterate_extent_inodes stop
475 static void scrub_fixup_nodatasum(struct btrfs_work
*work
)
478 struct scrub_fixup_nodatasum
*fixup
;
479 struct scrub_dev
*sdev
;
480 struct btrfs_trans_handle
*trans
= NULL
;
481 struct btrfs_fs_info
*fs_info
;
482 struct btrfs_path
*path
;
483 int uncorrectable
= 0;
485 fixup
= container_of(work
, struct scrub_fixup_nodatasum
, work
);
487 fs_info
= fixup
->root
->fs_info
;
489 path
= btrfs_alloc_path();
491 spin_lock(&sdev
->stat_lock
);
492 ++sdev
->stat
.malloc_errors
;
493 spin_unlock(&sdev
->stat_lock
);
498 trans
= btrfs_join_transaction(fixup
->root
);
505 * the idea is to trigger a regular read through the standard path. we
506 * read a page from the (failed) logical address by specifying the
507 * corresponding copynum of the failed sector. thus, that readpage is
509 * that is the point where on-the-fly error correction will kick in
510 * (once it's finished) and rewrite the failed sector if a good copy
513 ret
= iterate_inodes_from_logical(fixup
->logical
, fixup
->root
->fs_info
,
514 path
, scrub_fixup_readpage
,
522 spin_lock(&sdev
->stat_lock
);
523 ++sdev
->stat
.corrected_errors
;
524 spin_unlock(&sdev
->stat_lock
);
527 if (trans
&& !IS_ERR(trans
))
528 btrfs_end_transaction(trans
, fixup
->root
);
530 spin_lock(&sdev
->stat_lock
);
531 ++sdev
->stat
.uncorrectable_errors
;
532 spin_unlock(&sdev
->stat_lock
);
533 printk_ratelimited(KERN_ERR
"btrfs: unable to fixup "
534 "(nodatasum) error at logical %llu\n",
538 btrfs_free_path(path
);
541 /* see caller why we're pretending to be paused in the scrub counters */
542 mutex_lock(&fs_info
->scrub_lock
);
543 atomic_dec(&fs_info
->scrubs_running
);
544 atomic_dec(&fs_info
->scrubs_paused
);
545 mutex_unlock(&fs_info
->scrub_lock
);
546 atomic_dec(&sdev
->fixup_cnt
);
547 wake_up(&fs_info
->scrub_pause_wait
);
548 wake_up(&sdev
->list_wait
);
552 * scrub_recheck_error gets called when either verification of the page
553 * failed or the bio failed to read, e.g. with EIO. In the latter case,
554 * recheck_error gets called for every page in the bio, even though only
557 static int scrub_recheck_error(struct scrub_bio
*sbio
, int ix
)
559 struct scrub_dev
*sdev
= sbio
->sdev
;
560 u64 sector
= (sbio
->physical
+ ix
* PAGE_SIZE
) >> 9;
561 static DEFINE_RATELIMIT_STATE(_rs
, DEFAULT_RATELIMIT_INTERVAL
,
562 DEFAULT_RATELIMIT_BURST
);
565 if (scrub_fixup_io(READ
, sbio
->sdev
->dev
->bdev
, sector
,
566 sbio
->bio
->bi_io_vec
[ix
].bv_page
) == 0) {
567 if (scrub_fixup_check(sbio
, ix
) == 0)
570 if (__ratelimit(&_rs
))
571 scrub_print_warning("i/o error", sbio
, ix
);
573 if (__ratelimit(&_rs
))
574 scrub_print_warning("checksum error", sbio
, ix
);
577 spin_lock(&sdev
->stat_lock
);
578 ++sdev
->stat
.read_errors
;
579 spin_unlock(&sdev
->stat_lock
);
581 scrub_fixup(sbio
, ix
);
585 static int scrub_fixup_check(struct scrub_bio
*sbio
, int ix
)
590 u64 flags
= sbio
->spag
[ix
].flags
;
592 page
= sbio
->bio
->bi_io_vec
[ix
].bv_page
;
593 buffer
= kmap_atomic(page
, KM_USER0
);
594 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
595 ret
= scrub_checksum_data(sbio
->sdev
,
596 sbio
->spag
+ ix
, buffer
);
597 } else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
598 ret
= scrub_checksum_tree_block(sbio
->sdev
,
600 sbio
->logical
+ ix
* PAGE_SIZE
,
605 kunmap_atomic(buffer
, KM_USER0
);
610 static void scrub_fixup_end_io(struct bio
*bio
, int err
)
612 complete((struct completion
*)bio
->bi_private
);
615 static void scrub_fixup(struct scrub_bio
*sbio
, int ix
)
617 struct scrub_dev
*sdev
= sbio
->sdev
;
618 struct btrfs_fs_info
*fs_info
= sdev
->dev
->dev_root
->fs_info
;
619 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
620 struct btrfs_bio
*bbio
= NULL
;
621 struct scrub_fixup_nodatasum
*fixup
;
622 u64 logical
= sbio
->logical
+ ix
* PAGE_SIZE
;
626 DECLARE_COMPLETION_ONSTACK(complete
);
628 if ((sbio
->spag
[ix
].flags
& BTRFS_EXTENT_FLAG_DATA
) &&
629 (sbio
->spag
[ix
].have_csum
== 0)) {
630 fixup
= kzalloc(sizeof(*fixup
), GFP_NOFS
);
634 fixup
->logical
= logical
;
635 fixup
->root
= fs_info
->extent_root
;
636 fixup
->mirror_num
= sbio
->spag
[ix
].mirror_num
;
638 * increment scrubs_running to prevent cancel requests from
639 * completing as long as a fixup worker is running. we must also
640 * increment scrubs_paused to prevent deadlocking on pause
641 * requests used for transactions commits (as the worker uses a
642 * transaction context). it is safe to regard the fixup worker
643 * as paused for all matters practical. effectively, we only
644 * avoid cancellation requests from completing.
646 mutex_lock(&fs_info
->scrub_lock
);
647 atomic_inc(&fs_info
->scrubs_running
);
648 atomic_inc(&fs_info
->scrubs_paused
);
649 mutex_unlock(&fs_info
->scrub_lock
);
650 atomic_inc(&sdev
->fixup_cnt
);
651 fixup
->work
.func
= scrub_fixup_nodatasum
;
652 btrfs_queue_worker(&fs_info
->scrub_workers
, &fixup
->work
);
657 ret
= btrfs_map_block(map_tree
, REQ_WRITE
, logical
, &length
,
659 if (ret
|| !bbio
|| length
< PAGE_SIZE
) {
661 "scrub_fixup: btrfs_map_block failed us for %llu\n",
662 (unsigned long long)logical
);
668 if (bbio
->num_stripes
== 1)
669 /* there aren't any replicas */
673 * first find a good copy
675 for (i
= 0; i
< bbio
->num_stripes
; ++i
) {
676 if (i
+ 1 == sbio
->spag
[ix
].mirror_num
)
679 if (scrub_fixup_io(READ
, bbio
->stripes
[i
].dev
->bdev
,
680 bbio
->stripes
[i
].physical
>> 9,
681 sbio
->bio
->bi_io_vec
[ix
].bv_page
)) {
682 /* I/O-error, this is not a good copy */
686 if (scrub_fixup_check(sbio
, ix
) == 0)
689 if (i
== bbio
->num_stripes
)
692 if (!sdev
->readonly
) {
694 * bi_io_vec[ix].bv_page now contains good data, write it back
696 if (scrub_fixup_io(WRITE
, sdev
->dev
->bdev
,
697 (sbio
->physical
+ ix
* PAGE_SIZE
) >> 9,
698 sbio
->bio
->bi_io_vec
[ix
].bv_page
)) {
699 /* I/O-error, writeback failed, give up */
705 spin_lock(&sdev
->stat_lock
);
706 ++sdev
->stat
.corrected_errors
;
707 spin_unlock(&sdev
->stat_lock
);
709 printk_ratelimited(KERN_ERR
"btrfs: fixed up error at logical %llu\n",
710 (unsigned long long)logical
);
715 spin_lock(&sdev
->stat_lock
);
716 ++sdev
->stat
.uncorrectable_errors
;
717 spin_unlock(&sdev
->stat_lock
);
719 printk_ratelimited(KERN_ERR
"btrfs: unable to fixup (regular) error at "
720 "logical %llu\n", (unsigned long long)logical
);
723 static int scrub_fixup_io(int rw
, struct block_device
*bdev
, sector_t sector
,
726 struct bio
*bio
= NULL
;
728 DECLARE_COMPLETION_ONSTACK(complete
);
730 bio
= bio_alloc(GFP_NOFS
, 1);
732 bio
->bi_sector
= sector
;
733 bio_add_page(bio
, page
, PAGE_SIZE
, 0);
734 bio
->bi_end_io
= scrub_fixup_end_io
;
735 bio
->bi_private
= &complete
;
736 btrfsic_submit_bio(rw
, bio
);
738 /* this will also unplug the queue */
739 wait_for_completion(&complete
);
741 ret
= !test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
746 static void scrub_bio_end_io(struct bio
*bio
, int err
)
748 struct scrub_bio
*sbio
= bio
->bi_private
;
749 struct scrub_dev
*sdev
= sbio
->sdev
;
750 struct btrfs_fs_info
*fs_info
= sdev
->dev
->dev_root
->fs_info
;
755 btrfs_queue_worker(&fs_info
->scrub_workers
, &sbio
->work
);
758 static void scrub_checksum(struct btrfs_work
*work
)
760 struct scrub_bio
*sbio
= container_of(work
, struct scrub_bio
, work
);
761 struct scrub_dev
*sdev
= sbio
->sdev
;
771 for (i
= 0; i
< sbio
->count
; ++i
)
772 ret
|= scrub_recheck_error(sbio
, i
);
774 spin_lock(&sdev
->stat_lock
);
775 ++sdev
->stat
.unverified_errors
;
776 spin_unlock(&sdev
->stat_lock
);
779 sbio
->bio
->bi_flags
&= ~(BIO_POOL_MASK
- 1);
780 sbio
->bio
->bi_flags
|= 1 << BIO_UPTODATE
;
781 sbio
->bio
->bi_phys_segments
= 0;
782 sbio
->bio
->bi_idx
= 0;
784 for (i
= 0; i
< sbio
->count
; i
++) {
786 bi
= &sbio
->bio
->bi_io_vec
[i
];
788 bi
->bv_len
= PAGE_SIZE
;
792 for (i
= 0; i
< sbio
->count
; ++i
) {
793 page
= sbio
->bio
->bi_io_vec
[i
].bv_page
;
794 buffer
= kmap_atomic(page
, KM_USER0
);
795 flags
= sbio
->spag
[i
].flags
;
796 logical
= sbio
->logical
+ i
* PAGE_SIZE
;
798 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
799 ret
= scrub_checksum_data(sdev
, sbio
->spag
+ i
, buffer
);
800 } else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
801 ret
= scrub_checksum_tree_block(sdev
, sbio
->spag
+ i
,
803 } else if (flags
& BTRFS_EXTENT_FLAG_SUPER
) {
805 (void)scrub_checksum_super(sbio
, buffer
);
809 kunmap_atomic(buffer
, KM_USER0
);
811 ret
= scrub_recheck_error(sbio
, i
);
813 spin_lock(&sdev
->stat_lock
);
814 ++sdev
->stat
.unverified_errors
;
815 spin_unlock(&sdev
->stat_lock
);
821 scrub_free_bio(sbio
->bio
);
823 spin_lock(&sdev
->list_lock
);
824 sbio
->next_free
= sdev
->first_free
;
825 sdev
->first_free
= sbio
->index
;
826 spin_unlock(&sdev
->list_lock
);
827 atomic_dec(&sdev
->in_flight
);
828 wake_up(&sdev
->list_wait
);
831 static int scrub_checksum_data(struct scrub_dev
*sdev
,
832 struct scrub_page
*spag
, void *buffer
)
834 u8 csum
[BTRFS_CSUM_SIZE
];
837 struct btrfs_root
*root
= sdev
->dev
->dev_root
;
839 if (!spag
->have_csum
)
842 crc
= btrfs_csum_data(root
, buffer
, crc
, PAGE_SIZE
);
843 btrfs_csum_final(crc
, csum
);
844 if (memcmp(csum
, spag
->csum
, sdev
->csum_size
))
847 spin_lock(&sdev
->stat_lock
);
848 ++sdev
->stat
.data_extents_scrubbed
;
849 sdev
->stat
.data_bytes_scrubbed
+= PAGE_SIZE
;
851 ++sdev
->stat
.csum_errors
;
852 spin_unlock(&sdev
->stat_lock
);
857 static int scrub_checksum_tree_block(struct scrub_dev
*sdev
,
858 struct scrub_page
*spag
, u64 logical
,
861 struct btrfs_header
*h
;
862 struct btrfs_root
*root
= sdev
->dev
->dev_root
;
863 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
864 u8 csum
[BTRFS_CSUM_SIZE
];
870 * we don't use the getter functions here, as we
871 * a) don't have an extent buffer and
872 * b) the page is already kmapped
874 h
= (struct btrfs_header
*)buffer
;
876 if (logical
!= le64_to_cpu(h
->bytenr
))
879 if (spag
->generation
!= le64_to_cpu(h
->generation
))
882 if (memcmp(h
->fsid
, fs_info
->fsid
, BTRFS_UUID_SIZE
))
885 if (memcmp(h
->chunk_tree_uuid
, fs_info
->chunk_tree_uuid
,
889 crc
= btrfs_csum_data(root
, buffer
+ BTRFS_CSUM_SIZE
, crc
,
890 PAGE_SIZE
- BTRFS_CSUM_SIZE
);
891 btrfs_csum_final(crc
, csum
);
892 if (memcmp(csum
, h
->csum
, sdev
->csum_size
))
895 spin_lock(&sdev
->stat_lock
);
896 ++sdev
->stat
.tree_extents_scrubbed
;
897 sdev
->stat
.tree_bytes_scrubbed
+= PAGE_SIZE
;
899 ++sdev
->stat
.csum_errors
;
901 ++sdev
->stat
.verify_errors
;
902 spin_unlock(&sdev
->stat_lock
);
904 return fail
|| crc_fail
;
907 static int scrub_checksum_super(struct scrub_bio
*sbio
, void *buffer
)
909 struct btrfs_super_block
*s
;
911 struct scrub_dev
*sdev
= sbio
->sdev
;
912 struct btrfs_root
*root
= sdev
->dev
->dev_root
;
913 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
914 u8 csum
[BTRFS_CSUM_SIZE
];
918 s
= (struct btrfs_super_block
*)buffer
;
919 logical
= sbio
->logical
;
921 if (logical
!= le64_to_cpu(s
->bytenr
))
924 if (sbio
->spag
[0].generation
!= le64_to_cpu(s
->generation
))
927 if (memcmp(s
->fsid
, fs_info
->fsid
, BTRFS_UUID_SIZE
))
930 crc
= btrfs_csum_data(root
, buffer
+ BTRFS_CSUM_SIZE
, crc
,
931 PAGE_SIZE
- BTRFS_CSUM_SIZE
);
932 btrfs_csum_final(crc
, csum
);
933 if (memcmp(csum
, s
->csum
, sbio
->sdev
->csum_size
))
938 * if we find an error in a super block, we just report it.
939 * They will get written with the next transaction commit
942 spin_lock(&sdev
->stat_lock
);
943 ++sdev
->stat
.super_errors
;
944 spin_unlock(&sdev
->stat_lock
);
950 static int scrub_submit(struct scrub_dev
*sdev
)
952 struct scrub_bio
*sbio
;
954 if (sdev
->curr
== -1)
957 sbio
= sdev
->bios
[sdev
->curr
];
960 atomic_inc(&sdev
->in_flight
);
962 btrfsic_submit_bio(READ
, sbio
->bio
);
967 static int scrub_page(struct scrub_dev
*sdev
, u64 logical
, u64 len
,
968 u64 physical
, u64 flags
, u64 gen
, int mirror_num
,
971 struct scrub_bio
*sbio
;
977 * grab a fresh bio or wait for one to become available
979 while (sdev
->curr
== -1) {
980 spin_lock(&sdev
->list_lock
);
981 sdev
->curr
= sdev
->first_free
;
982 if (sdev
->curr
!= -1) {
983 sdev
->first_free
= sdev
->bios
[sdev
->curr
]->next_free
;
984 sdev
->bios
[sdev
->curr
]->next_free
= -1;
985 sdev
->bios
[sdev
->curr
]->count
= 0;
986 spin_unlock(&sdev
->list_lock
);
988 spin_unlock(&sdev
->list_lock
);
989 wait_event(sdev
->list_wait
, sdev
->first_free
!= -1);
992 sbio
= sdev
->bios
[sdev
->curr
];
993 if (sbio
->count
== 0) {
996 sbio
->physical
= physical
;
997 sbio
->logical
= logical
;
998 bio
= bio_alloc(GFP_NOFS
, SCRUB_PAGES_PER_BIO
);
1002 bio
->bi_private
= sbio
;
1003 bio
->bi_end_io
= scrub_bio_end_io
;
1004 bio
->bi_bdev
= sdev
->dev
->bdev
;
1005 bio
->bi_sector
= sbio
->physical
>> 9;
1008 } else if (sbio
->physical
+ sbio
->count
* PAGE_SIZE
!= physical
||
1009 sbio
->logical
+ sbio
->count
* PAGE_SIZE
!= logical
) {
1010 ret
= scrub_submit(sdev
);
1015 sbio
->spag
[sbio
->count
].flags
= flags
;
1016 sbio
->spag
[sbio
->count
].generation
= gen
;
1017 sbio
->spag
[sbio
->count
].have_csum
= 0;
1018 sbio
->spag
[sbio
->count
].mirror_num
= mirror_num
;
1020 page
= alloc_page(GFP_NOFS
);
1024 ret
= bio_add_page(sbio
->bio
, page
, PAGE_SIZE
, 0);
1027 ret
= scrub_submit(sdev
);
1034 sbio
->spag
[sbio
->count
].have_csum
= 1;
1035 memcpy(sbio
->spag
[sbio
->count
].csum
, csum
, sdev
->csum_size
);
1038 if (sbio
->count
== SCRUB_PAGES_PER_BIO
|| force
) {
1041 ret
= scrub_submit(sdev
);
1049 static int scrub_find_csum(struct scrub_dev
*sdev
, u64 logical
, u64 len
,
1052 struct btrfs_ordered_sum
*sum
= NULL
;
1055 unsigned long num_sectors
;
1056 u32 sectorsize
= sdev
->dev
->dev_root
->sectorsize
;
1058 while (!list_empty(&sdev
->csum_list
)) {
1059 sum
= list_first_entry(&sdev
->csum_list
,
1060 struct btrfs_ordered_sum
, list
);
1061 if (sum
->bytenr
> logical
)
1063 if (sum
->bytenr
+ sum
->len
> logical
)
1066 ++sdev
->stat
.csum_discards
;
1067 list_del(&sum
->list
);
1074 num_sectors
= sum
->len
/ sectorsize
;
1075 for (i
= 0; i
< num_sectors
; ++i
) {
1076 if (sum
->sums
[i
].bytenr
== logical
) {
1077 memcpy(csum
, &sum
->sums
[i
].sum
, sdev
->csum_size
);
1082 if (ret
&& i
== num_sectors
- 1) {
1083 list_del(&sum
->list
);
1089 /* scrub extent tries to collect up to 64 kB for each bio */
1090 static int scrub_extent(struct scrub_dev
*sdev
, u64 logical
, u64 len
,
1091 u64 physical
, u64 flags
, u64 gen
, int mirror_num
)
1094 u8 csum
[BTRFS_CSUM_SIZE
];
1097 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
1100 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
1101 /* push csums to sbio */
1102 have_csum
= scrub_find_csum(sdev
, logical
, l
, csum
);
1104 ++sdev
->stat
.no_csum
;
1106 ret
= scrub_page(sdev
, logical
, l
, physical
, flags
, gen
,
1107 mirror_num
, have_csum
? csum
: NULL
, 0);
1117 static noinline_for_stack
int scrub_stripe(struct scrub_dev
*sdev
,
1118 struct map_lookup
*map
, int num
, u64 base
, u64 length
)
1120 struct btrfs_path
*path
;
1121 struct btrfs_fs_info
*fs_info
= sdev
->dev
->dev_root
->fs_info
;
1122 struct btrfs_root
*root
= fs_info
->extent_root
;
1123 struct btrfs_root
*csum_root
= fs_info
->csum_root
;
1124 struct btrfs_extent_item
*extent
;
1125 struct blk_plug plug
;
1131 struct extent_buffer
*l
;
1132 struct btrfs_key key
;
1137 struct reada_control
*reada1
;
1138 struct reada_control
*reada2
;
1139 struct btrfs_key key_start
;
1140 struct btrfs_key key_end
;
1142 u64 increment
= map
->stripe_len
;
1147 do_div(nstripes
, map
->stripe_len
);
1148 if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
1149 offset
= map
->stripe_len
* num
;
1150 increment
= map
->stripe_len
* map
->num_stripes
;
1152 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1153 int factor
= map
->num_stripes
/ map
->sub_stripes
;
1154 offset
= map
->stripe_len
* (num
/ map
->sub_stripes
);
1155 increment
= map
->stripe_len
* factor
;
1156 mirror_num
= num
% map
->sub_stripes
+ 1;
1157 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
1158 increment
= map
->stripe_len
;
1159 mirror_num
= num
% map
->num_stripes
+ 1;
1160 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
1161 increment
= map
->stripe_len
;
1162 mirror_num
= num
% map
->num_stripes
+ 1;
1164 increment
= map
->stripe_len
;
1168 path
= btrfs_alloc_path();
1172 path
->search_commit_root
= 1;
1173 path
->skip_locking
= 1;
1176 * trigger the readahead for extent tree csum tree and wait for
1177 * completion. During readahead, the scrub is officially paused
1178 * to not hold off transaction commits
1180 logical
= base
+ offset
;
1182 wait_event(sdev
->list_wait
,
1183 atomic_read(&sdev
->in_flight
) == 0);
1184 atomic_inc(&fs_info
->scrubs_paused
);
1185 wake_up(&fs_info
->scrub_pause_wait
);
1187 /* FIXME it might be better to start readahead at commit root */
1188 key_start
.objectid
= logical
;
1189 key_start
.type
= BTRFS_EXTENT_ITEM_KEY
;
1190 key_start
.offset
= (u64
)0;
1191 key_end
.objectid
= base
+ offset
+ nstripes
* increment
;
1192 key_end
.type
= BTRFS_EXTENT_ITEM_KEY
;
1193 key_end
.offset
= (u64
)0;
1194 reada1
= btrfs_reada_add(root
, &key_start
, &key_end
);
1196 key_start
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
1197 key_start
.type
= BTRFS_EXTENT_CSUM_KEY
;
1198 key_start
.offset
= logical
;
1199 key_end
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
1200 key_end
.type
= BTRFS_EXTENT_CSUM_KEY
;
1201 key_end
.offset
= base
+ offset
+ nstripes
* increment
;
1202 reada2
= btrfs_reada_add(csum_root
, &key_start
, &key_end
);
1204 if (!IS_ERR(reada1
))
1205 btrfs_reada_wait(reada1
);
1206 if (!IS_ERR(reada2
))
1207 btrfs_reada_wait(reada2
);
1209 mutex_lock(&fs_info
->scrub_lock
);
1210 while (atomic_read(&fs_info
->scrub_pause_req
)) {
1211 mutex_unlock(&fs_info
->scrub_lock
);
1212 wait_event(fs_info
->scrub_pause_wait
,
1213 atomic_read(&fs_info
->scrub_pause_req
) == 0);
1214 mutex_lock(&fs_info
->scrub_lock
);
1216 atomic_dec(&fs_info
->scrubs_paused
);
1217 mutex_unlock(&fs_info
->scrub_lock
);
1218 wake_up(&fs_info
->scrub_pause_wait
);
1221 * collect all data csums for the stripe to avoid seeking during
1222 * the scrub. This might currently (crc32) end up to be about 1MB
1224 blk_start_plug(&plug
);
1227 * now find all extents for each stripe and scrub them
1229 logical
= base
+ offset
;
1230 physical
= map
->stripes
[num
].physical
;
1232 for (i
= 0; i
< nstripes
; ++i
) {
1236 if (atomic_read(&fs_info
->scrub_cancel_req
) ||
1237 atomic_read(&sdev
->cancel_req
)) {
1242 * check to see if we have to pause
1244 if (atomic_read(&fs_info
->scrub_pause_req
)) {
1245 /* push queued extents */
1247 wait_event(sdev
->list_wait
,
1248 atomic_read(&sdev
->in_flight
) == 0);
1249 atomic_inc(&fs_info
->scrubs_paused
);
1250 wake_up(&fs_info
->scrub_pause_wait
);
1251 mutex_lock(&fs_info
->scrub_lock
);
1252 while (atomic_read(&fs_info
->scrub_pause_req
)) {
1253 mutex_unlock(&fs_info
->scrub_lock
);
1254 wait_event(fs_info
->scrub_pause_wait
,
1255 atomic_read(&fs_info
->scrub_pause_req
) == 0);
1256 mutex_lock(&fs_info
->scrub_lock
);
1258 atomic_dec(&fs_info
->scrubs_paused
);
1259 mutex_unlock(&fs_info
->scrub_lock
);
1260 wake_up(&fs_info
->scrub_pause_wait
);
1263 ret
= btrfs_lookup_csums_range(csum_root
, logical
,
1264 logical
+ map
->stripe_len
- 1,
1265 &sdev
->csum_list
, 1);
1269 key
.objectid
= logical
;
1270 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1271 key
.offset
= (u64
)0;
1273 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1277 ret
= btrfs_previous_item(root
, path
, 0,
1278 BTRFS_EXTENT_ITEM_KEY
);
1282 /* there's no smaller item, so stick with the
1284 btrfs_release_path(path
);
1285 ret
= btrfs_search_slot(NULL
, root
, &key
,
1294 slot
= path
->slots
[0];
1295 if (slot
>= btrfs_header_nritems(l
)) {
1296 ret
= btrfs_next_leaf(root
, path
);
1304 btrfs_item_key_to_cpu(l
, &key
, slot
);
1306 if (key
.objectid
+ key
.offset
<= logical
)
1309 if (key
.objectid
>= logical
+ map
->stripe_len
)
1312 if (btrfs_key_type(&key
) != BTRFS_EXTENT_ITEM_KEY
)
1315 extent
= btrfs_item_ptr(l
, slot
,
1316 struct btrfs_extent_item
);
1317 flags
= btrfs_extent_flags(l
, extent
);
1318 generation
= btrfs_extent_generation(l
, extent
);
1320 if (key
.objectid
< logical
&&
1321 (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)) {
1323 "btrfs scrub: tree block %llu spanning "
1324 "stripes, ignored. logical=%llu\n",
1325 (unsigned long long)key
.objectid
,
1326 (unsigned long long)logical
);
1331 * trim extent to this stripe
1333 if (key
.objectid
< logical
) {
1334 key
.offset
-= logical
- key
.objectid
;
1335 key
.objectid
= logical
;
1337 if (key
.objectid
+ key
.offset
>
1338 logical
+ map
->stripe_len
) {
1339 key
.offset
= logical
+ map
->stripe_len
-
1343 ret
= scrub_extent(sdev
, key
.objectid
, key
.offset
,
1344 key
.objectid
- logical
+ physical
,
1345 flags
, generation
, mirror_num
);
1352 btrfs_release_path(path
);
1353 logical
+= increment
;
1354 physical
+= map
->stripe_len
;
1355 spin_lock(&sdev
->stat_lock
);
1356 sdev
->stat
.last_physical
= physical
;
1357 spin_unlock(&sdev
->stat_lock
);
1359 /* push queued extents */
1363 blk_finish_plug(&plug
);
1364 btrfs_free_path(path
);
1365 return ret
< 0 ? ret
: 0;
1368 static noinline_for_stack
int scrub_chunk(struct scrub_dev
*sdev
,
1369 u64 chunk_tree
, u64 chunk_objectid
, u64 chunk_offset
, u64 length
)
1371 struct btrfs_mapping_tree
*map_tree
=
1372 &sdev
->dev
->dev_root
->fs_info
->mapping_tree
;
1373 struct map_lookup
*map
;
1374 struct extent_map
*em
;
1378 read_lock(&map_tree
->map_tree
.lock
);
1379 em
= lookup_extent_mapping(&map_tree
->map_tree
, chunk_offset
, 1);
1380 read_unlock(&map_tree
->map_tree
.lock
);
1385 map
= (struct map_lookup
*)em
->bdev
;
1386 if (em
->start
!= chunk_offset
)
1389 if (em
->len
< length
)
1392 for (i
= 0; i
< map
->num_stripes
; ++i
) {
1393 if (map
->stripes
[i
].dev
== sdev
->dev
) {
1394 ret
= scrub_stripe(sdev
, map
, i
, chunk_offset
, length
);
1400 free_extent_map(em
);
1405 static noinline_for_stack
1406 int scrub_enumerate_chunks(struct scrub_dev
*sdev
, u64 start
, u64 end
)
1408 struct btrfs_dev_extent
*dev_extent
= NULL
;
1409 struct btrfs_path
*path
;
1410 struct btrfs_root
*root
= sdev
->dev
->dev_root
;
1411 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1418 struct extent_buffer
*l
;
1419 struct btrfs_key key
;
1420 struct btrfs_key found_key
;
1421 struct btrfs_block_group_cache
*cache
;
1423 path
= btrfs_alloc_path();
1428 path
->search_commit_root
= 1;
1429 path
->skip_locking
= 1;
1431 key
.objectid
= sdev
->dev
->devid
;
1433 key
.type
= BTRFS_DEV_EXTENT_KEY
;
1437 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1441 if (path
->slots
[0] >=
1442 btrfs_header_nritems(path
->nodes
[0])) {
1443 ret
= btrfs_next_leaf(root
, path
);
1450 slot
= path
->slots
[0];
1452 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
1454 if (found_key
.objectid
!= sdev
->dev
->devid
)
1457 if (btrfs_key_type(&found_key
) != BTRFS_DEV_EXTENT_KEY
)
1460 if (found_key
.offset
>= end
)
1463 if (found_key
.offset
< key
.offset
)
1466 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
1467 length
= btrfs_dev_extent_length(l
, dev_extent
);
1469 if (found_key
.offset
+ length
<= start
) {
1470 key
.offset
= found_key
.offset
+ length
;
1471 btrfs_release_path(path
);
1475 chunk_tree
= btrfs_dev_extent_chunk_tree(l
, dev_extent
);
1476 chunk_objectid
= btrfs_dev_extent_chunk_objectid(l
, dev_extent
);
1477 chunk_offset
= btrfs_dev_extent_chunk_offset(l
, dev_extent
);
1480 * get a reference on the corresponding block group to prevent
1481 * the chunk from going away while we scrub it
1483 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
1488 ret
= scrub_chunk(sdev
, chunk_tree
, chunk_objectid
,
1489 chunk_offset
, length
);
1490 btrfs_put_block_group(cache
);
1494 key
.offset
= found_key
.offset
+ length
;
1495 btrfs_release_path(path
);
1498 btrfs_free_path(path
);
1501 * ret can still be 1 from search_slot or next_leaf,
1502 * that's not an error
1504 return ret
< 0 ? ret
: 0;
1507 static noinline_for_stack
int scrub_supers(struct scrub_dev
*sdev
)
1513 struct btrfs_device
*device
= sdev
->dev
;
1514 struct btrfs_root
*root
= device
->dev_root
;
1516 gen
= root
->fs_info
->last_trans_committed
;
1518 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
1519 bytenr
= btrfs_sb_offset(i
);
1520 if (bytenr
+ BTRFS_SUPER_INFO_SIZE
>= device
->total_bytes
)
1523 ret
= scrub_page(sdev
, bytenr
, PAGE_SIZE
, bytenr
,
1524 BTRFS_EXTENT_FLAG_SUPER
, gen
, i
, NULL
, 1);
1528 wait_event(sdev
->list_wait
, atomic_read(&sdev
->in_flight
) == 0);
1534 * get a reference count on fs_info->scrub_workers. start worker if necessary
1536 static noinline_for_stack
int scrub_workers_get(struct btrfs_root
*root
)
1538 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1541 mutex_lock(&fs_info
->scrub_lock
);
1542 if (fs_info
->scrub_workers_refcnt
== 0) {
1543 btrfs_init_workers(&fs_info
->scrub_workers
, "scrub",
1544 fs_info
->thread_pool_size
, &fs_info
->generic_worker
);
1545 fs_info
->scrub_workers
.idle_thresh
= 4;
1546 ret
= btrfs_start_workers(&fs_info
->scrub_workers
);
1550 ++fs_info
->scrub_workers_refcnt
;
1552 mutex_unlock(&fs_info
->scrub_lock
);
1557 static noinline_for_stack
void scrub_workers_put(struct btrfs_root
*root
)
1559 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1561 mutex_lock(&fs_info
->scrub_lock
);
1562 if (--fs_info
->scrub_workers_refcnt
== 0)
1563 btrfs_stop_workers(&fs_info
->scrub_workers
);
1564 WARN_ON(fs_info
->scrub_workers_refcnt
< 0);
1565 mutex_unlock(&fs_info
->scrub_lock
);
1569 int btrfs_scrub_dev(struct btrfs_root
*root
, u64 devid
, u64 start
, u64 end
,
1570 struct btrfs_scrub_progress
*progress
, int readonly
)
1572 struct scrub_dev
*sdev
;
1573 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1575 struct btrfs_device
*dev
;
1577 if (btrfs_fs_closing(root
->fs_info
))
1581 * check some assumptions
1583 if (root
->sectorsize
!= PAGE_SIZE
||
1584 root
->sectorsize
!= root
->leafsize
||
1585 root
->sectorsize
!= root
->nodesize
) {
1586 printk(KERN_ERR
"btrfs_scrub: size assumptions fail\n");
1590 ret
= scrub_workers_get(root
);
1594 mutex_lock(&root
->fs_info
->fs_devices
->device_list_mutex
);
1595 dev
= btrfs_find_device(root
, devid
, NULL
, NULL
);
1596 if (!dev
|| dev
->missing
) {
1597 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
1598 scrub_workers_put(root
);
1601 mutex_lock(&fs_info
->scrub_lock
);
1603 if (!dev
->in_fs_metadata
) {
1604 mutex_unlock(&fs_info
->scrub_lock
);
1605 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
1606 scrub_workers_put(root
);
1610 if (dev
->scrub_device
) {
1611 mutex_unlock(&fs_info
->scrub_lock
);
1612 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
1613 scrub_workers_put(root
);
1614 return -EINPROGRESS
;
1616 sdev
= scrub_setup_dev(dev
);
1618 mutex_unlock(&fs_info
->scrub_lock
);
1619 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
1620 scrub_workers_put(root
);
1621 return PTR_ERR(sdev
);
1623 sdev
->readonly
= readonly
;
1624 dev
->scrub_device
= sdev
;
1626 atomic_inc(&fs_info
->scrubs_running
);
1627 mutex_unlock(&fs_info
->scrub_lock
);
1628 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
1630 down_read(&fs_info
->scrub_super_lock
);
1631 ret
= scrub_supers(sdev
);
1632 up_read(&fs_info
->scrub_super_lock
);
1635 ret
= scrub_enumerate_chunks(sdev
, start
, end
);
1637 wait_event(sdev
->list_wait
, atomic_read(&sdev
->in_flight
) == 0);
1638 atomic_dec(&fs_info
->scrubs_running
);
1639 wake_up(&fs_info
->scrub_pause_wait
);
1641 wait_event(sdev
->list_wait
, atomic_read(&sdev
->fixup_cnt
) == 0);
1644 memcpy(progress
, &sdev
->stat
, sizeof(*progress
));
1646 mutex_lock(&fs_info
->scrub_lock
);
1647 dev
->scrub_device
= NULL
;
1648 mutex_unlock(&fs_info
->scrub_lock
);
1650 scrub_free_dev(sdev
);
1651 scrub_workers_put(root
);
1656 int btrfs_scrub_pause(struct btrfs_root
*root
)
1658 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1660 mutex_lock(&fs_info
->scrub_lock
);
1661 atomic_inc(&fs_info
->scrub_pause_req
);
1662 while (atomic_read(&fs_info
->scrubs_paused
) !=
1663 atomic_read(&fs_info
->scrubs_running
)) {
1664 mutex_unlock(&fs_info
->scrub_lock
);
1665 wait_event(fs_info
->scrub_pause_wait
,
1666 atomic_read(&fs_info
->scrubs_paused
) ==
1667 atomic_read(&fs_info
->scrubs_running
));
1668 mutex_lock(&fs_info
->scrub_lock
);
1670 mutex_unlock(&fs_info
->scrub_lock
);
1675 int btrfs_scrub_continue(struct btrfs_root
*root
)
1677 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1679 atomic_dec(&fs_info
->scrub_pause_req
);
1680 wake_up(&fs_info
->scrub_pause_wait
);
1684 int btrfs_scrub_pause_super(struct btrfs_root
*root
)
1686 down_write(&root
->fs_info
->scrub_super_lock
);
1690 int btrfs_scrub_continue_super(struct btrfs_root
*root
)
1692 up_write(&root
->fs_info
->scrub_super_lock
);
1696 int btrfs_scrub_cancel(struct btrfs_root
*root
)
1698 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1700 mutex_lock(&fs_info
->scrub_lock
);
1701 if (!atomic_read(&fs_info
->scrubs_running
)) {
1702 mutex_unlock(&fs_info
->scrub_lock
);
1706 atomic_inc(&fs_info
->scrub_cancel_req
);
1707 while (atomic_read(&fs_info
->scrubs_running
)) {
1708 mutex_unlock(&fs_info
->scrub_lock
);
1709 wait_event(fs_info
->scrub_pause_wait
,
1710 atomic_read(&fs_info
->scrubs_running
) == 0);
1711 mutex_lock(&fs_info
->scrub_lock
);
1713 atomic_dec(&fs_info
->scrub_cancel_req
);
1714 mutex_unlock(&fs_info
->scrub_lock
);
1719 int btrfs_scrub_cancel_dev(struct btrfs_root
*root
, struct btrfs_device
*dev
)
1721 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1722 struct scrub_dev
*sdev
;
1724 mutex_lock(&fs_info
->scrub_lock
);
1725 sdev
= dev
->scrub_device
;
1727 mutex_unlock(&fs_info
->scrub_lock
);
1730 atomic_inc(&sdev
->cancel_req
);
1731 while (dev
->scrub_device
) {
1732 mutex_unlock(&fs_info
->scrub_lock
);
1733 wait_event(fs_info
->scrub_pause_wait
,
1734 dev
->scrub_device
== NULL
);
1735 mutex_lock(&fs_info
->scrub_lock
);
1737 mutex_unlock(&fs_info
->scrub_lock
);
1741 int btrfs_scrub_cancel_devid(struct btrfs_root
*root
, u64 devid
)
1743 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1744 struct btrfs_device
*dev
;
1748 * we have to hold the device_list_mutex here so the device
1749 * does not go away in cancel_dev. FIXME: find a better solution
1751 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
1752 dev
= btrfs_find_device(root
, devid
, NULL
, NULL
);
1754 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
1757 ret
= btrfs_scrub_cancel_dev(root
, dev
);
1758 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
1763 int btrfs_scrub_progress(struct btrfs_root
*root
, u64 devid
,
1764 struct btrfs_scrub_progress
*progress
)
1766 struct btrfs_device
*dev
;
1767 struct scrub_dev
*sdev
= NULL
;
1769 mutex_lock(&root
->fs_info
->fs_devices
->device_list_mutex
);
1770 dev
= btrfs_find_device(root
, devid
, NULL
, NULL
);
1772 sdev
= dev
->scrub_device
;
1774 memcpy(progress
, &sdev
->stat
, sizeof(*progress
));
1775 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
1777 return dev
? (sdev
? 0 : -ENOTCONN
) : -ENODEV
;