1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
6 #include <linux/blkdev.h>
7 #include <linux/ratelimit.h>
8 #include <linux/sched/mm.h>
12 #include "ordered-data.h"
13 #include "transaction.h"
15 #include "extent_io.h"
16 #include "dev-replace.h"
17 #include "check-integrity.h"
18 #include "rcu-string.h"
22 * This is only the first step towards a full-features scrub. It reads all
23 * extent and super block and verifies the checksums. In case a bad checksum
24 * is found or the extent cannot be read, good data will be written back if
27 * Future enhancements:
28 * - In case an unrepairable extent is encountered, track which files are
29 * affected and report them
30 * - track and record media errors, throw out bad devices
31 * - add a mode to also read unallocated space
38 * the following three values only influence the performance.
39 * The last one configures the number of parallel and outstanding I/O
40 * operations. The first two values configure an upper limit for the number
41 * of (dynamically allocated) pages that are added to a bio.
43 #define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
44 #define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
45 #define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
48 * the following value times PAGE_SIZE needs to be large enough to match the
49 * largest node/leaf/sector size that shall be supported.
50 * Values larger than BTRFS_STRIPE_LEN are not supported.
52 #define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
54 struct scrub_recover
{
56 struct btrfs_bio
*bbio
;
61 struct scrub_block
*sblock
;
63 struct btrfs_device
*dev
;
64 struct list_head list
;
65 u64 flags
; /* extent flags */
69 u64 physical_for_dev_replace
;
72 unsigned int mirror_num
:8;
73 unsigned int have_csum
:1;
74 unsigned int io_error
:1;
76 u8 csum
[BTRFS_CSUM_SIZE
];
78 struct scrub_recover
*recover
;
83 struct scrub_ctx
*sctx
;
84 struct btrfs_device
*dev
;
89 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
90 struct scrub_page
*pagev
[SCRUB_PAGES_PER_WR_BIO
];
92 struct scrub_page
*pagev
[SCRUB_PAGES_PER_RD_BIO
];
96 struct btrfs_work work
;
100 struct scrub_page
*pagev
[SCRUB_MAX_PAGES_PER_BLOCK
];
102 atomic_t outstanding_pages
;
103 refcount_t refs
; /* free mem on transition to zero */
104 struct scrub_ctx
*sctx
;
105 struct scrub_parity
*sparity
;
107 unsigned int header_error
:1;
108 unsigned int checksum_error
:1;
109 unsigned int no_io_error_seen
:1;
110 unsigned int generation_error
:1; /* also sets header_error */
112 /* The following is for the data used to check parity */
113 /* It is for the data with checksum */
114 unsigned int data_corrected
:1;
116 struct btrfs_work work
;
119 /* Used for the chunks with parity stripe such RAID5/6 */
120 struct scrub_parity
{
121 struct scrub_ctx
*sctx
;
123 struct btrfs_device
*scrub_dev
;
135 struct list_head spages
;
137 /* Work of parity check and repair */
138 struct btrfs_work work
;
140 /* Mark the parity blocks which have data */
141 unsigned long *dbitmap
;
144 * Mark the parity blocks which have data, but errors happen when
145 * read data or check data
147 unsigned long *ebitmap
;
149 unsigned long bitmap
[0];
153 struct scrub_bio
*bios
[SCRUB_BIOS_PER_SCTX
];
154 struct btrfs_fs_info
*fs_info
;
157 atomic_t bios_in_flight
;
158 atomic_t workers_pending
;
159 spinlock_t list_lock
;
160 wait_queue_head_t list_wait
;
162 struct list_head csum_list
;
165 int pages_per_rd_bio
;
169 struct scrub_bio
*wr_curr_bio
;
170 struct mutex wr_lock
;
171 int pages_per_wr_bio
; /* <= SCRUB_PAGES_PER_WR_BIO */
172 struct btrfs_device
*wr_tgtdev
;
173 bool flush_all_writes
;
178 struct btrfs_scrub_progress stat
;
179 spinlock_t stat_lock
;
182 * Use a ref counter to avoid use-after-free issues. Scrub workers
183 * decrement bios_in_flight and workers_pending and then do a wakeup
184 * on the list_wait wait queue. We must ensure the main scrub task
185 * doesn't free the scrub context before or while the workers are
186 * doing the wakeup() call.
191 struct scrub_warning
{
192 struct btrfs_path
*path
;
193 u64 extent_item_size
;
197 struct btrfs_device
*dev
;
200 struct full_stripe_lock
{
207 static void scrub_pending_bio_inc(struct scrub_ctx
*sctx
);
208 static void scrub_pending_bio_dec(struct scrub_ctx
*sctx
);
209 static int scrub_handle_errored_block(struct scrub_block
*sblock_to_check
);
210 static int scrub_setup_recheck_block(struct scrub_block
*original_sblock
,
211 struct scrub_block
*sblocks_for_recheck
);
212 static void scrub_recheck_block(struct btrfs_fs_info
*fs_info
,
213 struct scrub_block
*sblock
,
214 int retry_failed_mirror
);
215 static void scrub_recheck_block_checksum(struct scrub_block
*sblock
);
216 static int scrub_repair_block_from_good_copy(struct scrub_block
*sblock_bad
,
217 struct scrub_block
*sblock_good
);
218 static int scrub_repair_page_from_good_copy(struct scrub_block
*sblock_bad
,
219 struct scrub_block
*sblock_good
,
220 int page_num
, int force_write
);
221 static void scrub_write_block_to_dev_replace(struct scrub_block
*sblock
);
222 static int scrub_write_page_to_dev_replace(struct scrub_block
*sblock
,
224 static int scrub_checksum_data(struct scrub_block
*sblock
);
225 static int scrub_checksum_tree_block(struct scrub_block
*sblock
);
226 static int scrub_checksum_super(struct scrub_block
*sblock
);
227 static void scrub_block_get(struct scrub_block
*sblock
);
228 static void scrub_block_put(struct scrub_block
*sblock
);
229 static void scrub_page_get(struct scrub_page
*spage
);
230 static void scrub_page_put(struct scrub_page
*spage
);
231 static void scrub_parity_get(struct scrub_parity
*sparity
);
232 static void scrub_parity_put(struct scrub_parity
*sparity
);
233 static int scrub_add_page_to_rd_bio(struct scrub_ctx
*sctx
,
234 struct scrub_page
*spage
);
235 static int scrub_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
236 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
237 u64 gen
, int mirror_num
, u8
*csum
, int force
,
238 u64 physical_for_dev_replace
);
239 static void scrub_bio_end_io(struct bio
*bio
);
240 static void scrub_bio_end_io_worker(struct btrfs_work
*work
);
241 static void scrub_block_complete(struct scrub_block
*sblock
);
242 static void scrub_remap_extent(struct btrfs_fs_info
*fs_info
,
243 u64 extent_logical
, u64 extent_len
,
244 u64
*extent_physical
,
245 struct btrfs_device
**extent_dev
,
246 int *extent_mirror_num
);
247 static int scrub_add_page_to_wr_bio(struct scrub_ctx
*sctx
,
248 struct scrub_page
*spage
);
249 static void scrub_wr_submit(struct scrub_ctx
*sctx
);
250 static void scrub_wr_bio_end_io(struct bio
*bio
);
251 static void scrub_wr_bio_end_io_worker(struct btrfs_work
*work
);
252 static void __scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
);
253 static void scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
);
254 static void scrub_put_ctx(struct scrub_ctx
*sctx
);
256 static inline int scrub_is_page_on_raid56(struct scrub_page
*page
)
258 return page
->recover
&&
259 (page
->recover
->bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID56_MASK
);
262 static void scrub_pending_bio_inc(struct scrub_ctx
*sctx
)
264 refcount_inc(&sctx
->refs
);
265 atomic_inc(&sctx
->bios_in_flight
);
268 static void scrub_pending_bio_dec(struct scrub_ctx
*sctx
)
270 atomic_dec(&sctx
->bios_in_flight
);
271 wake_up(&sctx
->list_wait
);
275 static void __scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
)
277 while (atomic_read(&fs_info
->scrub_pause_req
)) {
278 mutex_unlock(&fs_info
->scrub_lock
);
279 wait_event(fs_info
->scrub_pause_wait
,
280 atomic_read(&fs_info
->scrub_pause_req
) == 0);
281 mutex_lock(&fs_info
->scrub_lock
);
285 static void scrub_pause_on(struct btrfs_fs_info
*fs_info
)
287 atomic_inc(&fs_info
->scrubs_paused
);
288 wake_up(&fs_info
->scrub_pause_wait
);
291 static void scrub_pause_off(struct btrfs_fs_info
*fs_info
)
293 mutex_lock(&fs_info
->scrub_lock
);
294 __scrub_blocked_if_needed(fs_info
);
295 atomic_dec(&fs_info
->scrubs_paused
);
296 mutex_unlock(&fs_info
->scrub_lock
);
298 wake_up(&fs_info
->scrub_pause_wait
);
301 static void scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
)
303 scrub_pause_on(fs_info
);
304 scrub_pause_off(fs_info
);
308 * Insert new full stripe lock into full stripe locks tree
310 * Return pointer to existing or newly inserted full_stripe_lock structure if
311 * everything works well.
312 * Return ERR_PTR(-ENOMEM) if we failed to allocate memory
314 * NOTE: caller must hold full_stripe_locks_root->lock before calling this
317 static struct full_stripe_lock
*insert_full_stripe_lock(
318 struct btrfs_full_stripe_locks_tree
*locks_root
,
322 struct rb_node
*parent
= NULL
;
323 struct full_stripe_lock
*entry
;
324 struct full_stripe_lock
*ret
;
326 lockdep_assert_held(&locks_root
->lock
);
328 p
= &locks_root
->root
.rb_node
;
331 entry
= rb_entry(parent
, struct full_stripe_lock
, node
);
332 if (fstripe_logical
< entry
->logical
) {
334 } else if (fstripe_logical
> entry
->logical
) {
342 /* Insert new lock */
343 ret
= kmalloc(sizeof(*ret
), GFP_KERNEL
);
345 return ERR_PTR(-ENOMEM
);
346 ret
->logical
= fstripe_logical
;
348 mutex_init(&ret
->mutex
);
350 rb_link_node(&ret
->node
, parent
, p
);
351 rb_insert_color(&ret
->node
, &locks_root
->root
);
356 * Search for a full stripe lock of a block group
358 * Return pointer to existing full stripe lock if found
359 * Return NULL if not found
361 static struct full_stripe_lock
*search_full_stripe_lock(
362 struct btrfs_full_stripe_locks_tree
*locks_root
,
365 struct rb_node
*node
;
366 struct full_stripe_lock
*entry
;
368 lockdep_assert_held(&locks_root
->lock
);
370 node
= locks_root
->root
.rb_node
;
372 entry
= rb_entry(node
, struct full_stripe_lock
, node
);
373 if (fstripe_logical
< entry
->logical
)
374 node
= node
->rb_left
;
375 else if (fstripe_logical
> entry
->logical
)
376 node
= node
->rb_right
;
384 * Helper to get full stripe logical from a normal bytenr.
386 * Caller must ensure @cache is a RAID56 block group.
388 static u64
get_full_stripe_logical(struct btrfs_block_group_cache
*cache
,
394 * Due to chunk item size limit, full stripe length should not be
395 * larger than U32_MAX. Just a sanity check here.
397 WARN_ON_ONCE(cache
->full_stripe_len
>= U32_MAX
);
400 * round_down() can only handle power of 2, while RAID56 full
401 * stripe length can be 64KiB * n, so we need to manually round down.
403 ret
= div64_u64(bytenr
- cache
->key
.objectid
, cache
->full_stripe_len
) *
404 cache
->full_stripe_len
+ cache
->key
.objectid
;
409 * Lock a full stripe to avoid concurrency of recovery and read
411 * It's only used for profiles with parities (RAID5/6), for other profiles it
414 * Return 0 if we locked full stripe covering @bytenr, with a mutex held.
415 * So caller must call unlock_full_stripe() at the same context.
417 * Return <0 if encounters error.
419 static int lock_full_stripe(struct btrfs_fs_info
*fs_info
, u64 bytenr
,
422 struct btrfs_block_group_cache
*bg_cache
;
423 struct btrfs_full_stripe_locks_tree
*locks_root
;
424 struct full_stripe_lock
*existing
;
429 bg_cache
= btrfs_lookup_block_group(fs_info
, bytenr
);
435 /* Profiles not based on parity don't need full stripe lock */
436 if (!(bg_cache
->flags
& BTRFS_BLOCK_GROUP_RAID56_MASK
))
438 locks_root
= &bg_cache
->full_stripe_locks_root
;
440 fstripe_start
= get_full_stripe_logical(bg_cache
, bytenr
);
442 /* Now insert the full stripe lock */
443 mutex_lock(&locks_root
->lock
);
444 existing
= insert_full_stripe_lock(locks_root
, fstripe_start
);
445 mutex_unlock(&locks_root
->lock
);
446 if (IS_ERR(existing
)) {
447 ret
= PTR_ERR(existing
);
450 mutex_lock(&existing
->mutex
);
453 btrfs_put_block_group(bg_cache
);
458 * Unlock a full stripe.
460 * NOTE: Caller must ensure it's the same context calling corresponding
461 * lock_full_stripe().
463 * Return 0 if we unlock full stripe without problem.
464 * Return <0 for error
466 static int unlock_full_stripe(struct btrfs_fs_info
*fs_info
, u64 bytenr
,
469 struct btrfs_block_group_cache
*bg_cache
;
470 struct btrfs_full_stripe_locks_tree
*locks_root
;
471 struct full_stripe_lock
*fstripe_lock
;
476 /* If we didn't acquire full stripe lock, no need to continue */
480 bg_cache
= btrfs_lookup_block_group(fs_info
, bytenr
);
485 if (!(bg_cache
->flags
& BTRFS_BLOCK_GROUP_RAID56_MASK
))
488 locks_root
= &bg_cache
->full_stripe_locks_root
;
489 fstripe_start
= get_full_stripe_logical(bg_cache
, bytenr
);
491 mutex_lock(&locks_root
->lock
);
492 fstripe_lock
= search_full_stripe_lock(locks_root
, fstripe_start
);
493 /* Unpaired unlock_full_stripe() detected */
497 mutex_unlock(&locks_root
->lock
);
501 if (fstripe_lock
->refs
== 0) {
503 btrfs_warn(fs_info
, "full stripe lock at %llu refcount underflow",
504 fstripe_lock
->logical
);
506 fstripe_lock
->refs
--;
509 if (fstripe_lock
->refs
== 0) {
510 rb_erase(&fstripe_lock
->node
, &locks_root
->root
);
513 mutex_unlock(&locks_root
->lock
);
515 mutex_unlock(&fstripe_lock
->mutex
);
519 btrfs_put_block_group(bg_cache
);
523 static void scrub_free_csums(struct scrub_ctx
*sctx
)
525 while (!list_empty(&sctx
->csum_list
)) {
526 struct btrfs_ordered_sum
*sum
;
527 sum
= list_first_entry(&sctx
->csum_list
,
528 struct btrfs_ordered_sum
, list
);
529 list_del(&sum
->list
);
534 static noinline_for_stack
void scrub_free_ctx(struct scrub_ctx
*sctx
)
541 /* this can happen when scrub is cancelled */
542 if (sctx
->curr
!= -1) {
543 struct scrub_bio
*sbio
= sctx
->bios
[sctx
->curr
];
545 for (i
= 0; i
< sbio
->page_count
; i
++) {
546 WARN_ON(!sbio
->pagev
[i
]->page
);
547 scrub_block_put(sbio
->pagev
[i
]->sblock
);
552 for (i
= 0; i
< SCRUB_BIOS_PER_SCTX
; ++i
) {
553 struct scrub_bio
*sbio
= sctx
->bios
[i
];
560 kfree(sctx
->wr_curr_bio
);
561 scrub_free_csums(sctx
);
565 static void scrub_put_ctx(struct scrub_ctx
*sctx
)
567 if (refcount_dec_and_test(&sctx
->refs
))
568 scrub_free_ctx(sctx
);
571 static noinline_for_stack
572 struct scrub_ctx
*scrub_setup_ctx(struct btrfs_device
*dev
, int is_dev_replace
)
574 struct scrub_ctx
*sctx
;
576 struct btrfs_fs_info
*fs_info
= dev
->fs_info
;
578 sctx
= kzalloc(sizeof(*sctx
), GFP_KERNEL
);
581 refcount_set(&sctx
->refs
, 1);
582 sctx
->is_dev_replace
= is_dev_replace
;
583 sctx
->pages_per_rd_bio
= SCRUB_PAGES_PER_RD_BIO
;
585 sctx
->fs_info
= dev
->fs_info
;
586 for (i
= 0; i
< SCRUB_BIOS_PER_SCTX
; ++i
) {
587 struct scrub_bio
*sbio
;
589 sbio
= kzalloc(sizeof(*sbio
), GFP_KERNEL
);
592 sctx
->bios
[i
] = sbio
;
596 sbio
->page_count
= 0;
597 btrfs_init_work(&sbio
->work
, btrfs_scrub_helper
,
598 scrub_bio_end_io_worker
, NULL
, NULL
);
600 if (i
!= SCRUB_BIOS_PER_SCTX
- 1)
601 sctx
->bios
[i
]->next_free
= i
+ 1;
603 sctx
->bios
[i
]->next_free
= -1;
605 sctx
->first_free
= 0;
606 atomic_set(&sctx
->bios_in_flight
, 0);
607 atomic_set(&sctx
->workers_pending
, 0);
608 atomic_set(&sctx
->cancel_req
, 0);
609 sctx
->csum_size
= btrfs_super_csum_size(fs_info
->super_copy
);
610 INIT_LIST_HEAD(&sctx
->csum_list
);
612 spin_lock_init(&sctx
->list_lock
);
613 spin_lock_init(&sctx
->stat_lock
);
614 init_waitqueue_head(&sctx
->list_wait
);
616 WARN_ON(sctx
->wr_curr_bio
!= NULL
);
617 mutex_init(&sctx
->wr_lock
);
618 sctx
->wr_curr_bio
= NULL
;
619 if (is_dev_replace
) {
620 WARN_ON(!fs_info
->dev_replace
.tgtdev
);
621 sctx
->pages_per_wr_bio
= SCRUB_PAGES_PER_WR_BIO
;
622 sctx
->wr_tgtdev
= fs_info
->dev_replace
.tgtdev
;
623 sctx
->flush_all_writes
= false;
629 scrub_free_ctx(sctx
);
630 return ERR_PTR(-ENOMEM
);
633 static int scrub_print_warning_inode(u64 inum
, u64 offset
, u64 root
,
641 struct extent_buffer
*eb
;
642 struct btrfs_inode_item
*inode_item
;
643 struct scrub_warning
*swarn
= warn_ctx
;
644 struct btrfs_fs_info
*fs_info
= swarn
->dev
->fs_info
;
645 struct inode_fs_paths
*ipath
= NULL
;
646 struct btrfs_root
*local_root
;
647 struct btrfs_key root_key
;
648 struct btrfs_key key
;
650 root_key
.objectid
= root
;
651 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
652 root_key
.offset
= (u64
)-1;
653 local_root
= btrfs_read_fs_root_no_name(fs_info
, &root_key
);
654 if (IS_ERR(local_root
)) {
655 ret
= PTR_ERR(local_root
);
660 * this makes the path point to (inum INODE_ITEM ioff)
663 key
.type
= BTRFS_INODE_ITEM_KEY
;
666 ret
= btrfs_search_slot(NULL
, local_root
, &key
, swarn
->path
, 0, 0);
668 btrfs_release_path(swarn
->path
);
672 eb
= swarn
->path
->nodes
[0];
673 inode_item
= btrfs_item_ptr(eb
, swarn
->path
->slots
[0],
674 struct btrfs_inode_item
);
675 isize
= btrfs_inode_size(eb
, inode_item
);
676 nlink
= btrfs_inode_nlink(eb
, inode_item
);
677 btrfs_release_path(swarn
->path
);
680 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
681 * uses GFP_NOFS in this context, so we keep it consistent but it does
682 * not seem to be strictly necessary.
684 nofs_flag
= memalloc_nofs_save();
685 ipath
= init_ipath(4096, local_root
, swarn
->path
);
686 memalloc_nofs_restore(nofs_flag
);
688 ret
= PTR_ERR(ipath
);
692 ret
= paths_from_inode(inum
, ipath
);
698 * we deliberately ignore the bit ipath might have been too small to
699 * hold all of the paths here
701 for (i
= 0; i
< ipath
->fspath
->elem_cnt
; ++i
)
702 btrfs_warn_in_rcu(fs_info
,
703 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)",
704 swarn
->errstr
, swarn
->logical
,
705 rcu_str_deref(swarn
->dev
->name
),
708 min(isize
- offset
, (u64
)PAGE_SIZE
), nlink
,
709 (char *)(unsigned long)ipath
->fspath
->val
[i
]);
715 btrfs_warn_in_rcu(fs_info
,
716 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
717 swarn
->errstr
, swarn
->logical
,
718 rcu_str_deref(swarn
->dev
->name
),
720 root
, inum
, offset
, ret
);
726 static void scrub_print_warning(const char *errstr
, struct scrub_block
*sblock
)
728 struct btrfs_device
*dev
;
729 struct btrfs_fs_info
*fs_info
;
730 struct btrfs_path
*path
;
731 struct btrfs_key found_key
;
732 struct extent_buffer
*eb
;
733 struct btrfs_extent_item
*ei
;
734 struct scrub_warning swarn
;
735 unsigned long ptr
= 0;
743 WARN_ON(sblock
->page_count
< 1);
744 dev
= sblock
->pagev
[0]->dev
;
745 fs_info
= sblock
->sctx
->fs_info
;
747 path
= btrfs_alloc_path();
751 swarn
.physical
= sblock
->pagev
[0]->physical
;
752 swarn
.logical
= sblock
->pagev
[0]->logical
;
753 swarn
.errstr
= errstr
;
756 ret
= extent_from_logical(fs_info
, swarn
.logical
, path
, &found_key
,
761 extent_item_pos
= swarn
.logical
- found_key
.objectid
;
762 swarn
.extent_item_size
= found_key
.offset
;
765 ei
= btrfs_item_ptr(eb
, path
->slots
[0], struct btrfs_extent_item
);
766 item_size
= btrfs_item_size_nr(eb
, path
->slots
[0]);
768 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
770 ret
= tree_backref_for_extent(&ptr
, eb
, &found_key
, ei
,
771 item_size
, &ref_root
,
773 btrfs_warn_in_rcu(fs_info
,
774 "%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
775 errstr
, swarn
.logical
,
776 rcu_str_deref(dev
->name
),
778 ref_level
? "node" : "leaf",
779 ret
< 0 ? -1 : ref_level
,
780 ret
< 0 ? -1 : ref_root
);
782 btrfs_release_path(path
);
784 btrfs_release_path(path
);
787 iterate_extent_inodes(fs_info
, found_key
.objectid
,
789 scrub_print_warning_inode
, &swarn
, false);
793 btrfs_free_path(path
);
796 static inline void scrub_get_recover(struct scrub_recover
*recover
)
798 refcount_inc(&recover
->refs
);
801 static inline void scrub_put_recover(struct btrfs_fs_info
*fs_info
,
802 struct scrub_recover
*recover
)
804 if (refcount_dec_and_test(&recover
->refs
)) {
805 btrfs_bio_counter_dec(fs_info
);
806 btrfs_put_bbio(recover
->bbio
);
812 * scrub_handle_errored_block gets called when either verification of the
813 * pages failed or the bio failed to read, e.g. with EIO. In the latter
814 * case, this function handles all pages in the bio, even though only one
816 * The goal of this function is to repair the errored block by using the
817 * contents of one of the mirrors.
819 static int scrub_handle_errored_block(struct scrub_block
*sblock_to_check
)
821 struct scrub_ctx
*sctx
= sblock_to_check
->sctx
;
822 struct btrfs_device
*dev
;
823 struct btrfs_fs_info
*fs_info
;
825 unsigned int failed_mirror_index
;
826 unsigned int is_metadata
;
827 unsigned int have_csum
;
828 struct scrub_block
*sblocks_for_recheck
; /* holds one for each mirror */
829 struct scrub_block
*sblock_bad
;
834 bool full_stripe_locked
;
835 static DEFINE_RATELIMIT_STATE(_rs
, DEFAULT_RATELIMIT_INTERVAL
,
836 DEFAULT_RATELIMIT_BURST
);
838 BUG_ON(sblock_to_check
->page_count
< 1);
839 fs_info
= sctx
->fs_info
;
840 if (sblock_to_check
->pagev
[0]->flags
& BTRFS_EXTENT_FLAG_SUPER
) {
842 * if we find an error in a super block, we just report it.
843 * They will get written with the next transaction commit
846 spin_lock(&sctx
->stat_lock
);
847 ++sctx
->stat
.super_errors
;
848 spin_unlock(&sctx
->stat_lock
);
851 logical
= sblock_to_check
->pagev
[0]->logical
;
852 BUG_ON(sblock_to_check
->pagev
[0]->mirror_num
< 1);
853 failed_mirror_index
= sblock_to_check
->pagev
[0]->mirror_num
- 1;
854 is_metadata
= !(sblock_to_check
->pagev
[0]->flags
&
855 BTRFS_EXTENT_FLAG_DATA
);
856 have_csum
= sblock_to_check
->pagev
[0]->have_csum
;
857 dev
= sblock_to_check
->pagev
[0]->dev
;
860 * For RAID5/6, race can happen for a different device scrub thread.
861 * For data corruption, Parity and Data threads will both try
862 * to recovery the data.
863 * Race can lead to doubly added csum error, or even unrecoverable
866 ret
= lock_full_stripe(fs_info
, logical
, &full_stripe_locked
);
868 spin_lock(&sctx
->stat_lock
);
870 sctx
->stat
.malloc_errors
++;
871 sctx
->stat
.read_errors
++;
872 sctx
->stat
.uncorrectable_errors
++;
873 spin_unlock(&sctx
->stat_lock
);
878 * read all mirrors one after the other. This includes to
879 * re-read the extent or metadata block that failed (that was
880 * the cause that this fixup code is called) another time,
881 * page by page this time in order to know which pages
882 * caused I/O errors and which ones are good (for all mirrors).
883 * It is the goal to handle the situation when more than one
884 * mirror contains I/O errors, but the errors do not
885 * overlap, i.e. the data can be repaired by selecting the
886 * pages from those mirrors without I/O error on the
887 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
888 * would be that mirror #1 has an I/O error on the first page,
889 * the second page is good, and mirror #2 has an I/O error on
890 * the second page, but the first page is good.
891 * Then the first page of the first mirror can be repaired by
892 * taking the first page of the second mirror, and the
893 * second page of the second mirror can be repaired by
894 * copying the contents of the 2nd page of the 1st mirror.
895 * One more note: if the pages of one mirror contain I/O
896 * errors, the checksum cannot be verified. In order to get
897 * the best data for repairing, the first attempt is to find
898 * a mirror without I/O errors and with a validated checksum.
899 * Only if this is not possible, the pages are picked from
900 * mirrors with I/O errors without considering the checksum.
901 * If the latter is the case, at the end, the checksum of the
902 * repaired area is verified in order to correctly maintain
906 sblocks_for_recheck
= kcalloc(BTRFS_MAX_MIRRORS
,
907 sizeof(*sblocks_for_recheck
), GFP_NOFS
);
908 if (!sblocks_for_recheck
) {
909 spin_lock(&sctx
->stat_lock
);
910 sctx
->stat
.malloc_errors
++;
911 sctx
->stat
.read_errors
++;
912 sctx
->stat
.uncorrectable_errors
++;
913 spin_unlock(&sctx
->stat_lock
);
914 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
918 /* setup the context, map the logical blocks and alloc the pages */
919 ret
= scrub_setup_recheck_block(sblock_to_check
, sblocks_for_recheck
);
921 spin_lock(&sctx
->stat_lock
);
922 sctx
->stat
.read_errors
++;
923 sctx
->stat
.uncorrectable_errors
++;
924 spin_unlock(&sctx
->stat_lock
);
925 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
928 BUG_ON(failed_mirror_index
>= BTRFS_MAX_MIRRORS
);
929 sblock_bad
= sblocks_for_recheck
+ failed_mirror_index
;
931 /* build and submit the bios for the failed mirror, check checksums */
932 scrub_recheck_block(fs_info
, sblock_bad
, 1);
934 if (!sblock_bad
->header_error
&& !sblock_bad
->checksum_error
&&
935 sblock_bad
->no_io_error_seen
) {
937 * the error disappeared after reading page by page, or
938 * the area was part of a huge bio and other parts of the
939 * bio caused I/O errors, or the block layer merged several
940 * read requests into one and the error is caused by a
941 * different bio (usually one of the two latter cases is
944 spin_lock(&sctx
->stat_lock
);
945 sctx
->stat
.unverified_errors
++;
946 sblock_to_check
->data_corrected
= 1;
947 spin_unlock(&sctx
->stat_lock
);
949 if (sctx
->is_dev_replace
)
950 scrub_write_block_to_dev_replace(sblock_bad
);
954 if (!sblock_bad
->no_io_error_seen
) {
955 spin_lock(&sctx
->stat_lock
);
956 sctx
->stat
.read_errors
++;
957 spin_unlock(&sctx
->stat_lock
);
958 if (__ratelimit(&_rs
))
959 scrub_print_warning("i/o error", sblock_to_check
);
960 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
961 } else if (sblock_bad
->checksum_error
) {
962 spin_lock(&sctx
->stat_lock
);
963 sctx
->stat
.csum_errors
++;
964 spin_unlock(&sctx
->stat_lock
);
965 if (__ratelimit(&_rs
))
966 scrub_print_warning("checksum error", sblock_to_check
);
967 btrfs_dev_stat_inc_and_print(dev
,
968 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
969 } else if (sblock_bad
->header_error
) {
970 spin_lock(&sctx
->stat_lock
);
971 sctx
->stat
.verify_errors
++;
972 spin_unlock(&sctx
->stat_lock
);
973 if (__ratelimit(&_rs
))
974 scrub_print_warning("checksum/header error",
976 if (sblock_bad
->generation_error
)
977 btrfs_dev_stat_inc_and_print(dev
,
978 BTRFS_DEV_STAT_GENERATION_ERRS
);
980 btrfs_dev_stat_inc_and_print(dev
,
981 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
984 if (sctx
->readonly
) {
985 ASSERT(!sctx
->is_dev_replace
);
990 * now build and submit the bios for the other mirrors, check
992 * First try to pick the mirror which is completely without I/O
993 * errors and also does not have a checksum error.
994 * If one is found, and if a checksum is present, the full block
995 * that is known to contain an error is rewritten. Afterwards
996 * the block is known to be corrected.
997 * If a mirror is found which is completely correct, and no
998 * checksum is present, only those pages are rewritten that had
999 * an I/O error in the block to be repaired, since it cannot be
1000 * determined, which copy of the other pages is better (and it
1001 * could happen otherwise that a correct page would be
1002 * overwritten by a bad one).
1004 for (mirror_index
= 0; ;mirror_index
++) {
1005 struct scrub_block
*sblock_other
;
1007 if (mirror_index
== failed_mirror_index
)
1010 /* raid56's mirror can be more than BTRFS_MAX_MIRRORS */
1011 if (!scrub_is_page_on_raid56(sblock_bad
->pagev
[0])) {
1012 if (mirror_index
>= BTRFS_MAX_MIRRORS
)
1014 if (!sblocks_for_recheck
[mirror_index
].page_count
)
1017 sblock_other
= sblocks_for_recheck
+ mirror_index
;
1019 struct scrub_recover
*r
= sblock_bad
->pagev
[0]->recover
;
1020 int max_allowed
= r
->bbio
->num_stripes
-
1021 r
->bbio
->num_tgtdevs
;
1023 if (mirror_index
>= max_allowed
)
1025 if (!sblocks_for_recheck
[1].page_count
)
1028 ASSERT(failed_mirror_index
== 0);
1029 sblock_other
= sblocks_for_recheck
+ 1;
1030 sblock_other
->pagev
[0]->mirror_num
= 1 + mirror_index
;
1033 /* build and submit the bios, check checksums */
1034 scrub_recheck_block(fs_info
, sblock_other
, 0);
1036 if (!sblock_other
->header_error
&&
1037 !sblock_other
->checksum_error
&&
1038 sblock_other
->no_io_error_seen
) {
1039 if (sctx
->is_dev_replace
) {
1040 scrub_write_block_to_dev_replace(sblock_other
);
1041 goto corrected_error
;
1043 ret
= scrub_repair_block_from_good_copy(
1044 sblock_bad
, sblock_other
);
1046 goto corrected_error
;
1051 if (sblock_bad
->no_io_error_seen
&& !sctx
->is_dev_replace
)
1052 goto did_not_correct_error
;
1055 * In case of I/O errors in the area that is supposed to be
1056 * repaired, continue by picking good copies of those pages.
1057 * Select the good pages from mirrors to rewrite bad pages from
1058 * the area to fix. Afterwards verify the checksum of the block
1059 * that is supposed to be repaired. This verification step is
1060 * only done for the purpose of statistic counting and for the
1061 * final scrub report, whether errors remain.
1062 * A perfect algorithm could make use of the checksum and try
1063 * all possible combinations of pages from the different mirrors
1064 * until the checksum verification succeeds. For example, when
1065 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1066 * of mirror #2 is readable but the final checksum test fails,
1067 * then the 2nd page of mirror #3 could be tried, whether now
1068 * the final checksum succeeds. But this would be a rare
1069 * exception and is therefore not implemented. At least it is
1070 * avoided that the good copy is overwritten.
1071 * A more useful improvement would be to pick the sectors
1072 * without I/O error based on sector sizes (512 bytes on legacy
1073 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1074 * mirror could be repaired by taking 512 byte of a different
1075 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1076 * area are unreadable.
1079 for (page_num
= 0; page_num
< sblock_bad
->page_count
;
1081 struct scrub_page
*page_bad
= sblock_bad
->pagev
[page_num
];
1082 struct scrub_block
*sblock_other
= NULL
;
1084 /* skip no-io-error page in scrub */
1085 if (!page_bad
->io_error
&& !sctx
->is_dev_replace
)
1088 if (scrub_is_page_on_raid56(sblock_bad
->pagev
[0])) {
1090 * In case of dev replace, if raid56 rebuild process
1091 * didn't work out correct data, then copy the content
1092 * in sblock_bad to make sure target device is identical
1093 * to source device, instead of writing garbage data in
1094 * sblock_for_recheck array to target device.
1096 sblock_other
= NULL
;
1097 } else if (page_bad
->io_error
) {
1098 /* try to find no-io-error page in mirrors */
1099 for (mirror_index
= 0;
1100 mirror_index
< BTRFS_MAX_MIRRORS
&&
1101 sblocks_for_recheck
[mirror_index
].page_count
> 0;
1103 if (!sblocks_for_recheck
[mirror_index
].
1104 pagev
[page_num
]->io_error
) {
1105 sblock_other
= sblocks_for_recheck
+
1114 if (sctx
->is_dev_replace
) {
1116 * did not find a mirror to fetch the page
1117 * from. scrub_write_page_to_dev_replace()
1118 * handles this case (page->io_error), by
1119 * filling the block with zeros before
1120 * submitting the write request
1123 sblock_other
= sblock_bad
;
1125 if (scrub_write_page_to_dev_replace(sblock_other
,
1127 btrfs_dev_replace_stats_inc(
1128 &fs_info
->dev_replace
.num_write_errors
);
1131 } else if (sblock_other
) {
1132 ret
= scrub_repair_page_from_good_copy(sblock_bad
,
1136 page_bad
->io_error
= 0;
1142 if (success
&& !sctx
->is_dev_replace
) {
1143 if (is_metadata
|| have_csum
) {
1145 * need to verify the checksum now that all
1146 * sectors on disk are repaired (the write
1147 * request for data to be repaired is on its way).
1148 * Just be lazy and use scrub_recheck_block()
1149 * which re-reads the data before the checksum
1150 * is verified, but most likely the data comes out
1151 * of the page cache.
1153 scrub_recheck_block(fs_info
, sblock_bad
, 1);
1154 if (!sblock_bad
->header_error
&&
1155 !sblock_bad
->checksum_error
&&
1156 sblock_bad
->no_io_error_seen
)
1157 goto corrected_error
;
1159 goto did_not_correct_error
;
1162 spin_lock(&sctx
->stat_lock
);
1163 sctx
->stat
.corrected_errors
++;
1164 sblock_to_check
->data_corrected
= 1;
1165 spin_unlock(&sctx
->stat_lock
);
1166 btrfs_err_rl_in_rcu(fs_info
,
1167 "fixed up error at logical %llu on dev %s",
1168 logical
, rcu_str_deref(dev
->name
));
1171 did_not_correct_error
:
1172 spin_lock(&sctx
->stat_lock
);
1173 sctx
->stat
.uncorrectable_errors
++;
1174 spin_unlock(&sctx
->stat_lock
);
1175 btrfs_err_rl_in_rcu(fs_info
,
1176 "unable to fixup (regular) error at logical %llu on dev %s",
1177 logical
, rcu_str_deref(dev
->name
));
1181 if (sblocks_for_recheck
) {
1182 for (mirror_index
= 0; mirror_index
< BTRFS_MAX_MIRRORS
;
1184 struct scrub_block
*sblock
= sblocks_for_recheck
+
1186 struct scrub_recover
*recover
;
1189 for (page_index
= 0; page_index
< sblock
->page_count
;
1191 sblock
->pagev
[page_index
]->sblock
= NULL
;
1192 recover
= sblock
->pagev
[page_index
]->recover
;
1194 scrub_put_recover(fs_info
, recover
);
1195 sblock
->pagev
[page_index
]->recover
=
1198 scrub_page_put(sblock
->pagev
[page_index
]);
1201 kfree(sblocks_for_recheck
);
1204 ret
= unlock_full_stripe(fs_info
, logical
, full_stripe_locked
);
1210 static inline int scrub_nr_raid_mirrors(struct btrfs_bio
*bbio
)
1212 if (bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID5
)
1214 else if (bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID6
)
1217 return (int)bbio
->num_stripes
;
1220 static inline void scrub_stripe_index_and_offset(u64 logical
, u64 map_type
,
1223 int nstripes
, int mirror
,
1229 if (map_type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
1231 for (i
= 0; i
< nstripes
; i
++) {
1232 if (raid_map
[i
] == RAID6_Q_STRIPE
||
1233 raid_map
[i
] == RAID5_P_STRIPE
)
1236 if (logical
>= raid_map
[i
] &&
1237 logical
< raid_map
[i
] + mapped_length
)
1242 *stripe_offset
= logical
- raid_map
[i
];
1244 /* The other RAID type */
1245 *stripe_index
= mirror
;
1250 static int scrub_setup_recheck_block(struct scrub_block
*original_sblock
,
1251 struct scrub_block
*sblocks_for_recheck
)
1253 struct scrub_ctx
*sctx
= original_sblock
->sctx
;
1254 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
1255 u64 length
= original_sblock
->page_count
* PAGE_SIZE
;
1256 u64 logical
= original_sblock
->pagev
[0]->logical
;
1257 u64 generation
= original_sblock
->pagev
[0]->generation
;
1258 u64 flags
= original_sblock
->pagev
[0]->flags
;
1259 u64 have_csum
= original_sblock
->pagev
[0]->have_csum
;
1260 struct scrub_recover
*recover
;
1261 struct btrfs_bio
*bbio
;
1272 * note: the two members refs and outstanding_pages
1273 * are not used (and not set) in the blocks that are used for
1274 * the recheck procedure
1277 while (length
> 0) {
1278 sublen
= min_t(u64
, length
, PAGE_SIZE
);
1279 mapped_length
= sublen
;
1283 * with a length of PAGE_SIZE, each returned stripe
1284 * represents one mirror
1286 btrfs_bio_counter_inc_blocked(fs_info
);
1287 ret
= btrfs_map_sblock(fs_info
, BTRFS_MAP_GET_READ_MIRRORS
,
1288 logical
, &mapped_length
, &bbio
);
1289 if (ret
|| !bbio
|| mapped_length
< sublen
) {
1290 btrfs_put_bbio(bbio
);
1291 btrfs_bio_counter_dec(fs_info
);
1295 recover
= kzalloc(sizeof(struct scrub_recover
), GFP_NOFS
);
1297 btrfs_put_bbio(bbio
);
1298 btrfs_bio_counter_dec(fs_info
);
1302 refcount_set(&recover
->refs
, 1);
1303 recover
->bbio
= bbio
;
1304 recover
->map_length
= mapped_length
;
1306 BUG_ON(page_index
>= SCRUB_MAX_PAGES_PER_BLOCK
);
1308 nmirrors
= min(scrub_nr_raid_mirrors(bbio
), BTRFS_MAX_MIRRORS
);
1310 for (mirror_index
= 0; mirror_index
< nmirrors
;
1312 struct scrub_block
*sblock
;
1313 struct scrub_page
*page
;
1315 sblock
= sblocks_for_recheck
+ mirror_index
;
1316 sblock
->sctx
= sctx
;
1318 page
= kzalloc(sizeof(*page
), GFP_NOFS
);
1321 spin_lock(&sctx
->stat_lock
);
1322 sctx
->stat
.malloc_errors
++;
1323 spin_unlock(&sctx
->stat_lock
);
1324 scrub_put_recover(fs_info
, recover
);
1327 scrub_page_get(page
);
1328 sblock
->pagev
[page_index
] = page
;
1329 page
->sblock
= sblock
;
1330 page
->flags
= flags
;
1331 page
->generation
= generation
;
1332 page
->logical
= logical
;
1333 page
->have_csum
= have_csum
;
1336 original_sblock
->pagev
[0]->csum
,
1339 scrub_stripe_index_and_offset(logical
,
1348 page
->physical
= bbio
->stripes
[stripe_index
].physical
+
1350 page
->dev
= bbio
->stripes
[stripe_index
].dev
;
1352 BUG_ON(page_index
>= original_sblock
->page_count
);
1353 page
->physical_for_dev_replace
=
1354 original_sblock
->pagev
[page_index
]->
1355 physical_for_dev_replace
;
1356 /* for missing devices, dev->bdev is NULL */
1357 page
->mirror_num
= mirror_index
+ 1;
1358 sblock
->page_count
++;
1359 page
->page
= alloc_page(GFP_NOFS
);
1363 scrub_get_recover(recover
);
1364 page
->recover
= recover
;
1366 scrub_put_recover(fs_info
, recover
);
1375 static void scrub_bio_wait_endio(struct bio
*bio
)
1377 complete(bio
->bi_private
);
1380 static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info
*fs_info
,
1382 struct scrub_page
*page
)
1384 DECLARE_COMPLETION_ONSTACK(done
);
1388 bio
->bi_iter
.bi_sector
= page
->logical
>> 9;
1389 bio
->bi_private
= &done
;
1390 bio
->bi_end_io
= scrub_bio_wait_endio
;
1392 mirror_num
= page
->sblock
->pagev
[0]->mirror_num
;
1393 ret
= raid56_parity_recover(fs_info
, bio
, page
->recover
->bbio
,
1394 page
->recover
->map_length
,
1399 wait_for_completion_io(&done
);
1400 return blk_status_to_errno(bio
->bi_status
);
1403 static void scrub_recheck_block_on_raid56(struct btrfs_fs_info
*fs_info
,
1404 struct scrub_block
*sblock
)
1406 struct scrub_page
*first_page
= sblock
->pagev
[0];
1410 /* All pages in sblock belong to the same stripe on the same device. */
1411 ASSERT(first_page
->dev
);
1412 if (!first_page
->dev
->bdev
)
1415 bio
= btrfs_io_bio_alloc(BIO_MAX_PAGES
);
1416 bio_set_dev(bio
, first_page
->dev
->bdev
);
1418 for (page_num
= 0; page_num
< sblock
->page_count
; page_num
++) {
1419 struct scrub_page
*page
= sblock
->pagev
[page_num
];
1421 WARN_ON(!page
->page
);
1422 bio_add_page(bio
, page
->page
, PAGE_SIZE
, 0);
1425 if (scrub_submit_raid56_bio_wait(fs_info
, bio
, first_page
)) {
1432 scrub_recheck_block_checksum(sblock
);
1436 for (page_num
= 0; page_num
< sblock
->page_count
; page_num
++)
1437 sblock
->pagev
[page_num
]->io_error
= 1;
1439 sblock
->no_io_error_seen
= 0;
1443 * this function will check the on disk data for checksum errors, header
1444 * errors and read I/O errors. If any I/O errors happen, the exact pages
1445 * which are errored are marked as being bad. The goal is to enable scrub
1446 * to take those pages that are not errored from all the mirrors so that
1447 * the pages that are errored in the just handled mirror can be repaired.
1449 static void scrub_recheck_block(struct btrfs_fs_info
*fs_info
,
1450 struct scrub_block
*sblock
,
1451 int retry_failed_mirror
)
1455 sblock
->no_io_error_seen
= 1;
1457 /* short cut for raid56 */
1458 if (!retry_failed_mirror
&& scrub_is_page_on_raid56(sblock
->pagev
[0]))
1459 return scrub_recheck_block_on_raid56(fs_info
, sblock
);
1461 for (page_num
= 0; page_num
< sblock
->page_count
; page_num
++) {
1463 struct scrub_page
*page
= sblock
->pagev
[page_num
];
1465 if (page
->dev
->bdev
== NULL
) {
1467 sblock
->no_io_error_seen
= 0;
1471 WARN_ON(!page
->page
);
1472 bio
= btrfs_io_bio_alloc(1);
1473 bio_set_dev(bio
, page
->dev
->bdev
);
1475 bio_add_page(bio
, page
->page
, PAGE_SIZE
, 0);
1476 bio
->bi_iter
.bi_sector
= page
->physical
>> 9;
1477 bio
->bi_opf
= REQ_OP_READ
;
1479 if (btrfsic_submit_bio_wait(bio
)) {
1481 sblock
->no_io_error_seen
= 0;
1487 if (sblock
->no_io_error_seen
)
1488 scrub_recheck_block_checksum(sblock
);
1491 static inline int scrub_check_fsid(u8 fsid
[],
1492 struct scrub_page
*spage
)
1494 struct btrfs_fs_devices
*fs_devices
= spage
->dev
->fs_devices
;
1497 ret
= memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
);
1501 static void scrub_recheck_block_checksum(struct scrub_block
*sblock
)
1503 sblock
->header_error
= 0;
1504 sblock
->checksum_error
= 0;
1505 sblock
->generation_error
= 0;
1507 if (sblock
->pagev
[0]->flags
& BTRFS_EXTENT_FLAG_DATA
)
1508 scrub_checksum_data(sblock
);
1510 scrub_checksum_tree_block(sblock
);
1513 static int scrub_repair_block_from_good_copy(struct scrub_block
*sblock_bad
,
1514 struct scrub_block
*sblock_good
)
1519 for (page_num
= 0; page_num
< sblock_bad
->page_count
; page_num
++) {
1522 ret_sub
= scrub_repair_page_from_good_copy(sblock_bad
,
1532 static int scrub_repair_page_from_good_copy(struct scrub_block
*sblock_bad
,
1533 struct scrub_block
*sblock_good
,
1534 int page_num
, int force_write
)
1536 struct scrub_page
*page_bad
= sblock_bad
->pagev
[page_num
];
1537 struct scrub_page
*page_good
= sblock_good
->pagev
[page_num
];
1538 struct btrfs_fs_info
*fs_info
= sblock_bad
->sctx
->fs_info
;
1540 BUG_ON(page_bad
->page
== NULL
);
1541 BUG_ON(page_good
->page
== NULL
);
1542 if (force_write
|| sblock_bad
->header_error
||
1543 sblock_bad
->checksum_error
|| page_bad
->io_error
) {
1547 if (!page_bad
->dev
->bdev
) {
1548 btrfs_warn_rl(fs_info
,
1549 "scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
1553 bio
= btrfs_io_bio_alloc(1);
1554 bio_set_dev(bio
, page_bad
->dev
->bdev
);
1555 bio
->bi_iter
.bi_sector
= page_bad
->physical
>> 9;
1556 bio
->bi_opf
= REQ_OP_WRITE
;
1558 ret
= bio_add_page(bio
, page_good
->page
, PAGE_SIZE
, 0);
1559 if (PAGE_SIZE
!= ret
) {
1564 if (btrfsic_submit_bio_wait(bio
)) {
1565 btrfs_dev_stat_inc_and_print(page_bad
->dev
,
1566 BTRFS_DEV_STAT_WRITE_ERRS
);
1567 btrfs_dev_replace_stats_inc(
1568 &fs_info
->dev_replace
.num_write_errors
);
1578 static void scrub_write_block_to_dev_replace(struct scrub_block
*sblock
)
1580 struct btrfs_fs_info
*fs_info
= sblock
->sctx
->fs_info
;
1584 * This block is used for the check of the parity on the source device,
1585 * so the data needn't be written into the destination device.
1587 if (sblock
->sparity
)
1590 for (page_num
= 0; page_num
< sblock
->page_count
; page_num
++) {
1593 ret
= scrub_write_page_to_dev_replace(sblock
, page_num
);
1595 btrfs_dev_replace_stats_inc(
1596 &fs_info
->dev_replace
.num_write_errors
);
1600 static int scrub_write_page_to_dev_replace(struct scrub_block
*sblock
,
1603 struct scrub_page
*spage
= sblock
->pagev
[page_num
];
1605 BUG_ON(spage
->page
== NULL
);
1606 if (spage
->io_error
) {
1607 void *mapped_buffer
= kmap_atomic(spage
->page
);
1609 clear_page(mapped_buffer
);
1610 flush_dcache_page(spage
->page
);
1611 kunmap_atomic(mapped_buffer
);
1613 return scrub_add_page_to_wr_bio(sblock
->sctx
, spage
);
1616 static int scrub_add_page_to_wr_bio(struct scrub_ctx
*sctx
,
1617 struct scrub_page
*spage
)
1619 struct scrub_bio
*sbio
;
1622 mutex_lock(&sctx
->wr_lock
);
1624 if (!sctx
->wr_curr_bio
) {
1625 sctx
->wr_curr_bio
= kzalloc(sizeof(*sctx
->wr_curr_bio
),
1627 if (!sctx
->wr_curr_bio
) {
1628 mutex_unlock(&sctx
->wr_lock
);
1631 sctx
->wr_curr_bio
->sctx
= sctx
;
1632 sctx
->wr_curr_bio
->page_count
= 0;
1634 sbio
= sctx
->wr_curr_bio
;
1635 if (sbio
->page_count
== 0) {
1638 sbio
->physical
= spage
->physical_for_dev_replace
;
1639 sbio
->logical
= spage
->logical
;
1640 sbio
->dev
= sctx
->wr_tgtdev
;
1643 bio
= btrfs_io_bio_alloc(sctx
->pages_per_wr_bio
);
1647 bio
->bi_private
= sbio
;
1648 bio
->bi_end_io
= scrub_wr_bio_end_io
;
1649 bio_set_dev(bio
, sbio
->dev
->bdev
);
1650 bio
->bi_iter
.bi_sector
= sbio
->physical
>> 9;
1651 bio
->bi_opf
= REQ_OP_WRITE
;
1653 } else if (sbio
->physical
+ sbio
->page_count
* PAGE_SIZE
!=
1654 spage
->physical_for_dev_replace
||
1655 sbio
->logical
+ sbio
->page_count
* PAGE_SIZE
!=
1657 scrub_wr_submit(sctx
);
1661 ret
= bio_add_page(sbio
->bio
, spage
->page
, PAGE_SIZE
, 0);
1662 if (ret
!= PAGE_SIZE
) {
1663 if (sbio
->page_count
< 1) {
1666 mutex_unlock(&sctx
->wr_lock
);
1669 scrub_wr_submit(sctx
);
1673 sbio
->pagev
[sbio
->page_count
] = spage
;
1674 scrub_page_get(spage
);
1676 if (sbio
->page_count
== sctx
->pages_per_wr_bio
)
1677 scrub_wr_submit(sctx
);
1678 mutex_unlock(&sctx
->wr_lock
);
1683 static void scrub_wr_submit(struct scrub_ctx
*sctx
)
1685 struct scrub_bio
*sbio
;
1687 if (!sctx
->wr_curr_bio
)
1690 sbio
= sctx
->wr_curr_bio
;
1691 sctx
->wr_curr_bio
= NULL
;
1692 WARN_ON(!sbio
->bio
->bi_disk
);
1693 scrub_pending_bio_inc(sctx
);
1694 /* process all writes in a single worker thread. Then the block layer
1695 * orders the requests before sending them to the driver which
1696 * doubled the write performance on spinning disks when measured
1698 btrfsic_submit_bio(sbio
->bio
);
1701 static void scrub_wr_bio_end_io(struct bio
*bio
)
1703 struct scrub_bio
*sbio
= bio
->bi_private
;
1704 struct btrfs_fs_info
*fs_info
= sbio
->dev
->fs_info
;
1706 sbio
->status
= bio
->bi_status
;
1709 btrfs_init_work(&sbio
->work
, btrfs_scrubwrc_helper
,
1710 scrub_wr_bio_end_io_worker
, NULL
, NULL
);
1711 btrfs_queue_work(fs_info
->scrub_wr_completion_workers
, &sbio
->work
);
1714 static void scrub_wr_bio_end_io_worker(struct btrfs_work
*work
)
1716 struct scrub_bio
*sbio
= container_of(work
, struct scrub_bio
, work
);
1717 struct scrub_ctx
*sctx
= sbio
->sctx
;
1720 WARN_ON(sbio
->page_count
> SCRUB_PAGES_PER_WR_BIO
);
1722 struct btrfs_dev_replace
*dev_replace
=
1723 &sbio
->sctx
->fs_info
->dev_replace
;
1725 for (i
= 0; i
< sbio
->page_count
; i
++) {
1726 struct scrub_page
*spage
= sbio
->pagev
[i
];
1728 spage
->io_error
= 1;
1729 btrfs_dev_replace_stats_inc(&dev_replace
->
1734 for (i
= 0; i
< sbio
->page_count
; i
++)
1735 scrub_page_put(sbio
->pagev
[i
]);
1739 scrub_pending_bio_dec(sctx
);
1742 static int scrub_checksum(struct scrub_block
*sblock
)
1748 * No need to initialize these stats currently,
1749 * because this function only use return value
1750 * instead of these stats value.
1755 sblock
->header_error
= 0;
1756 sblock
->generation_error
= 0;
1757 sblock
->checksum_error
= 0;
1759 WARN_ON(sblock
->page_count
< 1);
1760 flags
= sblock
->pagev
[0]->flags
;
1762 if (flags
& BTRFS_EXTENT_FLAG_DATA
)
1763 ret
= scrub_checksum_data(sblock
);
1764 else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)
1765 ret
= scrub_checksum_tree_block(sblock
);
1766 else if (flags
& BTRFS_EXTENT_FLAG_SUPER
)
1767 (void)scrub_checksum_super(sblock
);
1771 scrub_handle_errored_block(sblock
);
1776 static int scrub_checksum_data(struct scrub_block
*sblock
)
1778 struct scrub_ctx
*sctx
= sblock
->sctx
;
1779 u8 csum
[BTRFS_CSUM_SIZE
];
1787 BUG_ON(sblock
->page_count
< 1);
1788 if (!sblock
->pagev
[0]->have_csum
)
1791 on_disk_csum
= sblock
->pagev
[0]->csum
;
1792 page
= sblock
->pagev
[0]->page
;
1793 buffer
= kmap_atomic(page
);
1795 len
= sctx
->fs_info
->sectorsize
;
1798 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
1800 crc
= btrfs_csum_data(buffer
, crc
, l
);
1801 kunmap_atomic(buffer
);
1806 BUG_ON(index
>= sblock
->page_count
);
1807 BUG_ON(!sblock
->pagev
[index
]->page
);
1808 page
= sblock
->pagev
[index
]->page
;
1809 buffer
= kmap_atomic(page
);
1812 btrfs_csum_final(crc
, csum
);
1813 if (memcmp(csum
, on_disk_csum
, sctx
->csum_size
))
1814 sblock
->checksum_error
= 1;
1816 return sblock
->checksum_error
;
1819 static int scrub_checksum_tree_block(struct scrub_block
*sblock
)
1821 struct scrub_ctx
*sctx
= sblock
->sctx
;
1822 struct btrfs_header
*h
;
1823 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
1824 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1825 u8 on_disk_csum
[BTRFS_CSUM_SIZE
];
1827 void *mapped_buffer
;
1834 BUG_ON(sblock
->page_count
< 1);
1835 page
= sblock
->pagev
[0]->page
;
1836 mapped_buffer
= kmap_atomic(page
);
1837 h
= (struct btrfs_header
*)mapped_buffer
;
1838 memcpy(on_disk_csum
, h
->csum
, sctx
->csum_size
);
1841 * we don't use the getter functions here, as we
1842 * a) don't have an extent buffer and
1843 * b) the page is already kmapped
1845 if (sblock
->pagev
[0]->logical
!= btrfs_stack_header_bytenr(h
))
1846 sblock
->header_error
= 1;
1848 if (sblock
->pagev
[0]->generation
!= btrfs_stack_header_generation(h
)) {
1849 sblock
->header_error
= 1;
1850 sblock
->generation_error
= 1;
1853 if (!scrub_check_fsid(h
->fsid
, sblock
->pagev
[0]))
1854 sblock
->header_error
= 1;
1856 if (memcmp(h
->chunk_tree_uuid
, fs_info
->chunk_tree_uuid
,
1858 sblock
->header_error
= 1;
1860 len
= sctx
->fs_info
->nodesize
- BTRFS_CSUM_SIZE
;
1861 mapped_size
= PAGE_SIZE
- BTRFS_CSUM_SIZE
;
1862 p
= ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
;
1865 u64 l
= min_t(u64
, len
, mapped_size
);
1867 crc
= btrfs_csum_data(p
, crc
, l
);
1868 kunmap_atomic(mapped_buffer
);
1873 BUG_ON(index
>= sblock
->page_count
);
1874 BUG_ON(!sblock
->pagev
[index
]->page
);
1875 page
= sblock
->pagev
[index
]->page
;
1876 mapped_buffer
= kmap_atomic(page
);
1877 mapped_size
= PAGE_SIZE
;
1881 btrfs_csum_final(crc
, calculated_csum
);
1882 if (memcmp(calculated_csum
, on_disk_csum
, sctx
->csum_size
))
1883 sblock
->checksum_error
= 1;
1885 return sblock
->header_error
|| sblock
->checksum_error
;
1888 static int scrub_checksum_super(struct scrub_block
*sblock
)
1890 struct btrfs_super_block
*s
;
1891 struct scrub_ctx
*sctx
= sblock
->sctx
;
1892 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1893 u8 on_disk_csum
[BTRFS_CSUM_SIZE
];
1895 void *mapped_buffer
;
1904 BUG_ON(sblock
->page_count
< 1);
1905 page
= sblock
->pagev
[0]->page
;
1906 mapped_buffer
= kmap_atomic(page
);
1907 s
= (struct btrfs_super_block
*)mapped_buffer
;
1908 memcpy(on_disk_csum
, s
->csum
, sctx
->csum_size
);
1910 if (sblock
->pagev
[0]->logical
!= btrfs_super_bytenr(s
))
1913 if (sblock
->pagev
[0]->generation
!= btrfs_super_generation(s
))
1916 if (!scrub_check_fsid(s
->fsid
, sblock
->pagev
[0]))
1919 len
= BTRFS_SUPER_INFO_SIZE
- BTRFS_CSUM_SIZE
;
1920 mapped_size
= PAGE_SIZE
- BTRFS_CSUM_SIZE
;
1921 p
= ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
;
1924 u64 l
= min_t(u64
, len
, mapped_size
);
1926 crc
= btrfs_csum_data(p
, crc
, l
);
1927 kunmap_atomic(mapped_buffer
);
1932 BUG_ON(index
>= sblock
->page_count
);
1933 BUG_ON(!sblock
->pagev
[index
]->page
);
1934 page
= sblock
->pagev
[index
]->page
;
1935 mapped_buffer
= kmap_atomic(page
);
1936 mapped_size
= PAGE_SIZE
;
1940 btrfs_csum_final(crc
, calculated_csum
);
1941 if (memcmp(calculated_csum
, on_disk_csum
, sctx
->csum_size
))
1944 if (fail_cor
+ fail_gen
) {
1946 * if we find an error in a super block, we just report it.
1947 * They will get written with the next transaction commit
1950 spin_lock(&sctx
->stat_lock
);
1951 ++sctx
->stat
.super_errors
;
1952 spin_unlock(&sctx
->stat_lock
);
1954 btrfs_dev_stat_inc_and_print(sblock
->pagev
[0]->dev
,
1955 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
1957 btrfs_dev_stat_inc_and_print(sblock
->pagev
[0]->dev
,
1958 BTRFS_DEV_STAT_GENERATION_ERRS
);
1961 return fail_cor
+ fail_gen
;
1964 static void scrub_block_get(struct scrub_block
*sblock
)
1966 refcount_inc(&sblock
->refs
);
1969 static void scrub_block_put(struct scrub_block
*sblock
)
1971 if (refcount_dec_and_test(&sblock
->refs
)) {
1974 if (sblock
->sparity
)
1975 scrub_parity_put(sblock
->sparity
);
1977 for (i
= 0; i
< sblock
->page_count
; i
++)
1978 scrub_page_put(sblock
->pagev
[i
]);
1983 static void scrub_page_get(struct scrub_page
*spage
)
1985 atomic_inc(&spage
->refs
);
1988 static void scrub_page_put(struct scrub_page
*spage
)
1990 if (atomic_dec_and_test(&spage
->refs
)) {
1992 __free_page(spage
->page
);
1997 static void scrub_submit(struct scrub_ctx
*sctx
)
1999 struct scrub_bio
*sbio
;
2001 if (sctx
->curr
== -1)
2004 sbio
= sctx
->bios
[sctx
->curr
];
2006 scrub_pending_bio_inc(sctx
);
2007 btrfsic_submit_bio(sbio
->bio
);
2010 static int scrub_add_page_to_rd_bio(struct scrub_ctx
*sctx
,
2011 struct scrub_page
*spage
)
2013 struct scrub_block
*sblock
= spage
->sblock
;
2014 struct scrub_bio
*sbio
;
2019 * grab a fresh bio or wait for one to become available
2021 while (sctx
->curr
== -1) {
2022 spin_lock(&sctx
->list_lock
);
2023 sctx
->curr
= sctx
->first_free
;
2024 if (sctx
->curr
!= -1) {
2025 sctx
->first_free
= sctx
->bios
[sctx
->curr
]->next_free
;
2026 sctx
->bios
[sctx
->curr
]->next_free
= -1;
2027 sctx
->bios
[sctx
->curr
]->page_count
= 0;
2028 spin_unlock(&sctx
->list_lock
);
2030 spin_unlock(&sctx
->list_lock
);
2031 wait_event(sctx
->list_wait
, sctx
->first_free
!= -1);
2034 sbio
= sctx
->bios
[sctx
->curr
];
2035 if (sbio
->page_count
== 0) {
2038 sbio
->physical
= spage
->physical
;
2039 sbio
->logical
= spage
->logical
;
2040 sbio
->dev
= spage
->dev
;
2043 bio
= btrfs_io_bio_alloc(sctx
->pages_per_rd_bio
);
2047 bio
->bi_private
= sbio
;
2048 bio
->bi_end_io
= scrub_bio_end_io
;
2049 bio_set_dev(bio
, sbio
->dev
->bdev
);
2050 bio
->bi_iter
.bi_sector
= sbio
->physical
>> 9;
2051 bio
->bi_opf
= REQ_OP_READ
;
2053 } else if (sbio
->physical
+ sbio
->page_count
* PAGE_SIZE
!=
2055 sbio
->logical
+ sbio
->page_count
* PAGE_SIZE
!=
2057 sbio
->dev
!= spage
->dev
) {
2062 sbio
->pagev
[sbio
->page_count
] = spage
;
2063 ret
= bio_add_page(sbio
->bio
, spage
->page
, PAGE_SIZE
, 0);
2064 if (ret
!= PAGE_SIZE
) {
2065 if (sbio
->page_count
< 1) {
2074 scrub_block_get(sblock
); /* one for the page added to the bio */
2075 atomic_inc(&sblock
->outstanding_pages
);
2077 if (sbio
->page_count
== sctx
->pages_per_rd_bio
)
2083 static void scrub_missing_raid56_end_io(struct bio
*bio
)
2085 struct scrub_block
*sblock
= bio
->bi_private
;
2086 struct btrfs_fs_info
*fs_info
= sblock
->sctx
->fs_info
;
2089 sblock
->no_io_error_seen
= 0;
2093 btrfs_queue_work(fs_info
->scrub_workers
, &sblock
->work
);
2096 static void scrub_missing_raid56_worker(struct btrfs_work
*work
)
2098 struct scrub_block
*sblock
= container_of(work
, struct scrub_block
, work
);
2099 struct scrub_ctx
*sctx
= sblock
->sctx
;
2100 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
2102 struct btrfs_device
*dev
;
2104 logical
= sblock
->pagev
[0]->logical
;
2105 dev
= sblock
->pagev
[0]->dev
;
2107 if (sblock
->no_io_error_seen
)
2108 scrub_recheck_block_checksum(sblock
);
2110 if (!sblock
->no_io_error_seen
) {
2111 spin_lock(&sctx
->stat_lock
);
2112 sctx
->stat
.read_errors
++;
2113 spin_unlock(&sctx
->stat_lock
);
2114 btrfs_err_rl_in_rcu(fs_info
,
2115 "IO error rebuilding logical %llu for dev %s",
2116 logical
, rcu_str_deref(dev
->name
));
2117 } else if (sblock
->header_error
|| sblock
->checksum_error
) {
2118 spin_lock(&sctx
->stat_lock
);
2119 sctx
->stat
.uncorrectable_errors
++;
2120 spin_unlock(&sctx
->stat_lock
);
2121 btrfs_err_rl_in_rcu(fs_info
,
2122 "failed to rebuild valid logical %llu for dev %s",
2123 logical
, rcu_str_deref(dev
->name
));
2125 scrub_write_block_to_dev_replace(sblock
);
2128 scrub_block_put(sblock
);
2130 if (sctx
->is_dev_replace
&& sctx
->flush_all_writes
) {
2131 mutex_lock(&sctx
->wr_lock
);
2132 scrub_wr_submit(sctx
);
2133 mutex_unlock(&sctx
->wr_lock
);
2136 scrub_pending_bio_dec(sctx
);
2139 static void scrub_missing_raid56_pages(struct scrub_block
*sblock
)
2141 struct scrub_ctx
*sctx
= sblock
->sctx
;
2142 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
2143 u64 length
= sblock
->page_count
* PAGE_SIZE
;
2144 u64 logical
= sblock
->pagev
[0]->logical
;
2145 struct btrfs_bio
*bbio
= NULL
;
2147 struct btrfs_raid_bio
*rbio
;
2151 btrfs_bio_counter_inc_blocked(fs_info
);
2152 ret
= btrfs_map_sblock(fs_info
, BTRFS_MAP_GET_READ_MIRRORS
, logical
,
2154 if (ret
|| !bbio
|| !bbio
->raid_map
)
2157 if (WARN_ON(!sctx
->is_dev_replace
||
2158 !(bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID56_MASK
))) {
2160 * We shouldn't be scrubbing a missing device. Even for dev
2161 * replace, we should only get here for RAID 5/6. We either
2162 * managed to mount something with no mirrors remaining or
2163 * there's a bug in scrub_remap_extent()/btrfs_map_block().
2168 bio
= btrfs_io_bio_alloc(0);
2169 bio
->bi_iter
.bi_sector
= logical
>> 9;
2170 bio
->bi_private
= sblock
;
2171 bio
->bi_end_io
= scrub_missing_raid56_end_io
;
2173 rbio
= raid56_alloc_missing_rbio(fs_info
, bio
, bbio
, length
);
2177 for (i
= 0; i
< sblock
->page_count
; i
++) {
2178 struct scrub_page
*spage
= sblock
->pagev
[i
];
2180 raid56_add_scrub_pages(rbio
, spage
->page
, spage
->logical
);
2183 btrfs_init_work(&sblock
->work
, btrfs_scrub_helper
,
2184 scrub_missing_raid56_worker
, NULL
, NULL
);
2185 scrub_block_get(sblock
);
2186 scrub_pending_bio_inc(sctx
);
2187 raid56_submit_missing_rbio(rbio
);
2193 btrfs_bio_counter_dec(fs_info
);
2194 btrfs_put_bbio(bbio
);
2195 spin_lock(&sctx
->stat_lock
);
2196 sctx
->stat
.malloc_errors
++;
2197 spin_unlock(&sctx
->stat_lock
);
2200 static int scrub_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
2201 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
2202 u64 gen
, int mirror_num
, u8
*csum
, int force
,
2203 u64 physical_for_dev_replace
)
2205 struct scrub_block
*sblock
;
2208 sblock
= kzalloc(sizeof(*sblock
), GFP_KERNEL
);
2210 spin_lock(&sctx
->stat_lock
);
2211 sctx
->stat
.malloc_errors
++;
2212 spin_unlock(&sctx
->stat_lock
);
2216 /* one ref inside this function, plus one for each page added to
2218 refcount_set(&sblock
->refs
, 1);
2219 sblock
->sctx
= sctx
;
2220 sblock
->no_io_error_seen
= 1;
2222 for (index
= 0; len
> 0; index
++) {
2223 struct scrub_page
*spage
;
2224 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
2226 spage
= kzalloc(sizeof(*spage
), GFP_KERNEL
);
2229 spin_lock(&sctx
->stat_lock
);
2230 sctx
->stat
.malloc_errors
++;
2231 spin_unlock(&sctx
->stat_lock
);
2232 scrub_block_put(sblock
);
2235 BUG_ON(index
>= SCRUB_MAX_PAGES_PER_BLOCK
);
2236 scrub_page_get(spage
);
2237 sblock
->pagev
[index
] = spage
;
2238 spage
->sblock
= sblock
;
2240 spage
->flags
= flags
;
2241 spage
->generation
= gen
;
2242 spage
->logical
= logical
;
2243 spage
->physical
= physical
;
2244 spage
->physical_for_dev_replace
= physical_for_dev_replace
;
2245 spage
->mirror_num
= mirror_num
;
2247 spage
->have_csum
= 1;
2248 memcpy(spage
->csum
, csum
, sctx
->csum_size
);
2250 spage
->have_csum
= 0;
2252 sblock
->page_count
++;
2253 spage
->page
= alloc_page(GFP_KERNEL
);
2259 physical_for_dev_replace
+= l
;
2262 WARN_ON(sblock
->page_count
== 0);
2263 if (test_bit(BTRFS_DEV_STATE_MISSING
, &dev
->dev_state
)) {
2265 * This case should only be hit for RAID 5/6 device replace. See
2266 * the comment in scrub_missing_raid56_pages() for details.
2268 scrub_missing_raid56_pages(sblock
);
2270 for (index
= 0; index
< sblock
->page_count
; index
++) {
2271 struct scrub_page
*spage
= sblock
->pagev
[index
];
2274 ret
= scrub_add_page_to_rd_bio(sctx
, spage
);
2276 scrub_block_put(sblock
);
2285 /* last one frees, either here or in bio completion for last page */
2286 scrub_block_put(sblock
);
2290 static void scrub_bio_end_io(struct bio
*bio
)
2292 struct scrub_bio
*sbio
= bio
->bi_private
;
2293 struct btrfs_fs_info
*fs_info
= sbio
->dev
->fs_info
;
2295 sbio
->status
= bio
->bi_status
;
2298 btrfs_queue_work(fs_info
->scrub_workers
, &sbio
->work
);
2301 static void scrub_bio_end_io_worker(struct btrfs_work
*work
)
2303 struct scrub_bio
*sbio
= container_of(work
, struct scrub_bio
, work
);
2304 struct scrub_ctx
*sctx
= sbio
->sctx
;
2307 BUG_ON(sbio
->page_count
> SCRUB_PAGES_PER_RD_BIO
);
2309 for (i
= 0; i
< sbio
->page_count
; i
++) {
2310 struct scrub_page
*spage
= sbio
->pagev
[i
];
2312 spage
->io_error
= 1;
2313 spage
->sblock
->no_io_error_seen
= 0;
2317 /* now complete the scrub_block items that have all pages completed */
2318 for (i
= 0; i
< sbio
->page_count
; i
++) {
2319 struct scrub_page
*spage
= sbio
->pagev
[i
];
2320 struct scrub_block
*sblock
= spage
->sblock
;
2322 if (atomic_dec_and_test(&sblock
->outstanding_pages
))
2323 scrub_block_complete(sblock
);
2324 scrub_block_put(sblock
);
2329 spin_lock(&sctx
->list_lock
);
2330 sbio
->next_free
= sctx
->first_free
;
2331 sctx
->first_free
= sbio
->index
;
2332 spin_unlock(&sctx
->list_lock
);
2334 if (sctx
->is_dev_replace
&& sctx
->flush_all_writes
) {
2335 mutex_lock(&sctx
->wr_lock
);
2336 scrub_wr_submit(sctx
);
2337 mutex_unlock(&sctx
->wr_lock
);
2340 scrub_pending_bio_dec(sctx
);
2343 static inline void __scrub_mark_bitmap(struct scrub_parity
*sparity
,
2344 unsigned long *bitmap
,
2350 int sectorsize
= sparity
->sctx
->fs_info
->sectorsize
;
2352 if (len
>= sparity
->stripe_len
) {
2353 bitmap_set(bitmap
, 0, sparity
->nsectors
);
2357 start
-= sparity
->logic_start
;
2358 start
= div64_u64_rem(start
, sparity
->stripe_len
, &offset
);
2359 offset
= div_u64(offset
, sectorsize
);
2360 nsectors64
= div_u64(len
, sectorsize
);
2362 ASSERT(nsectors64
< UINT_MAX
);
2363 nsectors
= (u32
)nsectors64
;
2365 if (offset
+ nsectors
<= sparity
->nsectors
) {
2366 bitmap_set(bitmap
, offset
, nsectors
);
2370 bitmap_set(bitmap
, offset
, sparity
->nsectors
- offset
);
2371 bitmap_set(bitmap
, 0, nsectors
- (sparity
->nsectors
- offset
));
2374 static inline void scrub_parity_mark_sectors_error(struct scrub_parity
*sparity
,
2377 __scrub_mark_bitmap(sparity
, sparity
->ebitmap
, start
, len
);
2380 static inline void scrub_parity_mark_sectors_data(struct scrub_parity
*sparity
,
2383 __scrub_mark_bitmap(sparity
, sparity
->dbitmap
, start
, len
);
2386 static void scrub_block_complete(struct scrub_block
*sblock
)
2390 if (!sblock
->no_io_error_seen
) {
2392 scrub_handle_errored_block(sblock
);
2395 * if has checksum error, write via repair mechanism in
2396 * dev replace case, otherwise write here in dev replace
2399 corrupted
= scrub_checksum(sblock
);
2400 if (!corrupted
&& sblock
->sctx
->is_dev_replace
)
2401 scrub_write_block_to_dev_replace(sblock
);
2404 if (sblock
->sparity
&& corrupted
&& !sblock
->data_corrected
) {
2405 u64 start
= sblock
->pagev
[0]->logical
;
2406 u64 end
= sblock
->pagev
[sblock
->page_count
- 1]->logical
+
2409 scrub_parity_mark_sectors_error(sblock
->sparity
,
2410 start
, end
- start
);
2414 static int scrub_find_csum(struct scrub_ctx
*sctx
, u64 logical
, u8
*csum
)
2416 struct btrfs_ordered_sum
*sum
= NULL
;
2417 unsigned long index
;
2418 unsigned long num_sectors
;
2420 while (!list_empty(&sctx
->csum_list
)) {
2421 sum
= list_first_entry(&sctx
->csum_list
,
2422 struct btrfs_ordered_sum
, list
);
2423 if (sum
->bytenr
> logical
)
2425 if (sum
->bytenr
+ sum
->len
> logical
)
2428 ++sctx
->stat
.csum_discards
;
2429 list_del(&sum
->list
);
2436 index
= div_u64(logical
- sum
->bytenr
, sctx
->fs_info
->sectorsize
);
2437 ASSERT(index
< UINT_MAX
);
2439 num_sectors
= sum
->len
/ sctx
->fs_info
->sectorsize
;
2440 memcpy(csum
, sum
->sums
+ index
, sctx
->csum_size
);
2441 if (index
== num_sectors
- 1) {
2442 list_del(&sum
->list
);
2448 /* scrub extent tries to collect up to 64 kB for each bio */
2449 static int scrub_extent(struct scrub_ctx
*sctx
, struct map_lookup
*map
,
2450 u64 logical
, u64 len
,
2451 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
2452 u64 gen
, int mirror_num
, u64 physical_for_dev_replace
)
2455 u8 csum
[BTRFS_CSUM_SIZE
];
2458 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2459 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
)
2460 blocksize
= map
->stripe_len
;
2462 blocksize
= sctx
->fs_info
->sectorsize
;
2463 spin_lock(&sctx
->stat_lock
);
2464 sctx
->stat
.data_extents_scrubbed
++;
2465 sctx
->stat
.data_bytes_scrubbed
+= len
;
2466 spin_unlock(&sctx
->stat_lock
);
2467 } else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
2468 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
)
2469 blocksize
= map
->stripe_len
;
2471 blocksize
= sctx
->fs_info
->nodesize
;
2472 spin_lock(&sctx
->stat_lock
);
2473 sctx
->stat
.tree_extents_scrubbed
++;
2474 sctx
->stat
.tree_bytes_scrubbed
+= len
;
2475 spin_unlock(&sctx
->stat_lock
);
2477 blocksize
= sctx
->fs_info
->sectorsize
;
2482 u64 l
= min_t(u64
, len
, blocksize
);
2485 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2486 /* push csums to sbio */
2487 have_csum
= scrub_find_csum(sctx
, logical
, csum
);
2489 ++sctx
->stat
.no_csum
;
2491 ret
= scrub_pages(sctx
, logical
, l
, physical
, dev
, flags
, gen
,
2492 mirror_num
, have_csum
? csum
: NULL
, 0,
2493 physical_for_dev_replace
);
2499 physical_for_dev_replace
+= l
;
2504 static int scrub_pages_for_parity(struct scrub_parity
*sparity
,
2505 u64 logical
, u64 len
,
2506 u64 physical
, struct btrfs_device
*dev
,
2507 u64 flags
, u64 gen
, int mirror_num
, u8
*csum
)
2509 struct scrub_ctx
*sctx
= sparity
->sctx
;
2510 struct scrub_block
*sblock
;
2513 sblock
= kzalloc(sizeof(*sblock
), GFP_KERNEL
);
2515 spin_lock(&sctx
->stat_lock
);
2516 sctx
->stat
.malloc_errors
++;
2517 spin_unlock(&sctx
->stat_lock
);
2521 /* one ref inside this function, plus one for each page added to
2523 refcount_set(&sblock
->refs
, 1);
2524 sblock
->sctx
= sctx
;
2525 sblock
->no_io_error_seen
= 1;
2526 sblock
->sparity
= sparity
;
2527 scrub_parity_get(sparity
);
2529 for (index
= 0; len
> 0; index
++) {
2530 struct scrub_page
*spage
;
2531 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
2533 spage
= kzalloc(sizeof(*spage
), GFP_KERNEL
);
2536 spin_lock(&sctx
->stat_lock
);
2537 sctx
->stat
.malloc_errors
++;
2538 spin_unlock(&sctx
->stat_lock
);
2539 scrub_block_put(sblock
);
2542 BUG_ON(index
>= SCRUB_MAX_PAGES_PER_BLOCK
);
2543 /* For scrub block */
2544 scrub_page_get(spage
);
2545 sblock
->pagev
[index
] = spage
;
2546 /* For scrub parity */
2547 scrub_page_get(spage
);
2548 list_add_tail(&spage
->list
, &sparity
->spages
);
2549 spage
->sblock
= sblock
;
2551 spage
->flags
= flags
;
2552 spage
->generation
= gen
;
2553 spage
->logical
= logical
;
2554 spage
->physical
= physical
;
2555 spage
->mirror_num
= mirror_num
;
2557 spage
->have_csum
= 1;
2558 memcpy(spage
->csum
, csum
, sctx
->csum_size
);
2560 spage
->have_csum
= 0;
2562 sblock
->page_count
++;
2563 spage
->page
= alloc_page(GFP_KERNEL
);
2571 WARN_ON(sblock
->page_count
== 0);
2572 for (index
= 0; index
< sblock
->page_count
; index
++) {
2573 struct scrub_page
*spage
= sblock
->pagev
[index
];
2576 ret
= scrub_add_page_to_rd_bio(sctx
, spage
);
2578 scrub_block_put(sblock
);
2583 /* last one frees, either here or in bio completion for last page */
2584 scrub_block_put(sblock
);
2588 static int scrub_extent_for_parity(struct scrub_parity
*sparity
,
2589 u64 logical
, u64 len
,
2590 u64 physical
, struct btrfs_device
*dev
,
2591 u64 flags
, u64 gen
, int mirror_num
)
2593 struct scrub_ctx
*sctx
= sparity
->sctx
;
2595 u8 csum
[BTRFS_CSUM_SIZE
];
2598 if (test_bit(BTRFS_DEV_STATE_MISSING
, &dev
->dev_state
)) {
2599 scrub_parity_mark_sectors_error(sparity
, logical
, len
);
2603 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2604 blocksize
= sparity
->stripe_len
;
2605 } else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
2606 blocksize
= sparity
->stripe_len
;
2608 blocksize
= sctx
->fs_info
->sectorsize
;
2613 u64 l
= min_t(u64
, len
, blocksize
);
2616 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2617 /* push csums to sbio */
2618 have_csum
= scrub_find_csum(sctx
, logical
, csum
);
2622 ret
= scrub_pages_for_parity(sparity
, logical
, l
, physical
, dev
,
2623 flags
, gen
, mirror_num
,
2624 have_csum
? csum
: NULL
);
2636 * Given a physical address, this will calculate it's
2637 * logical offset. if this is a parity stripe, it will return
2638 * the most left data stripe's logical offset.
2640 * return 0 if it is a data stripe, 1 means parity stripe.
2642 static int get_raid56_logic_offset(u64 physical
, int num
,
2643 struct map_lookup
*map
, u64
*offset
,
2653 last_offset
= (physical
- map
->stripes
[num
].physical
) *
2654 nr_data_stripes(map
);
2656 *stripe_start
= last_offset
;
2658 *offset
= last_offset
;
2659 for (i
= 0; i
< nr_data_stripes(map
); i
++) {
2660 *offset
= last_offset
+ i
* map
->stripe_len
;
2662 stripe_nr
= div64_u64(*offset
, map
->stripe_len
);
2663 stripe_nr
= div_u64(stripe_nr
, nr_data_stripes(map
));
2665 /* Work out the disk rotation on this stripe-set */
2666 stripe_nr
= div_u64_rem(stripe_nr
, map
->num_stripes
, &rot
);
2667 /* calculate which stripe this data locates */
2669 stripe_index
= rot
% map
->num_stripes
;
2670 if (stripe_index
== num
)
2672 if (stripe_index
< num
)
2675 *offset
= last_offset
+ j
* map
->stripe_len
;
2679 static void scrub_free_parity(struct scrub_parity
*sparity
)
2681 struct scrub_ctx
*sctx
= sparity
->sctx
;
2682 struct scrub_page
*curr
, *next
;
2685 nbits
= bitmap_weight(sparity
->ebitmap
, sparity
->nsectors
);
2687 spin_lock(&sctx
->stat_lock
);
2688 sctx
->stat
.read_errors
+= nbits
;
2689 sctx
->stat
.uncorrectable_errors
+= nbits
;
2690 spin_unlock(&sctx
->stat_lock
);
2693 list_for_each_entry_safe(curr
, next
, &sparity
->spages
, list
) {
2694 list_del_init(&curr
->list
);
2695 scrub_page_put(curr
);
2701 static void scrub_parity_bio_endio_worker(struct btrfs_work
*work
)
2703 struct scrub_parity
*sparity
= container_of(work
, struct scrub_parity
,
2705 struct scrub_ctx
*sctx
= sparity
->sctx
;
2707 scrub_free_parity(sparity
);
2708 scrub_pending_bio_dec(sctx
);
2711 static void scrub_parity_bio_endio(struct bio
*bio
)
2713 struct scrub_parity
*sparity
= (struct scrub_parity
*)bio
->bi_private
;
2714 struct btrfs_fs_info
*fs_info
= sparity
->sctx
->fs_info
;
2717 bitmap_or(sparity
->ebitmap
, sparity
->ebitmap
, sparity
->dbitmap
,
2722 btrfs_init_work(&sparity
->work
, btrfs_scrubparity_helper
,
2723 scrub_parity_bio_endio_worker
, NULL
, NULL
);
2724 btrfs_queue_work(fs_info
->scrub_parity_workers
, &sparity
->work
);
2727 static void scrub_parity_check_and_repair(struct scrub_parity
*sparity
)
2729 struct scrub_ctx
*sctx
= sparity
->sctx
;
2730 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
2732 struct btrfs_raid_bio
*rbio
;
2733 struct btrfs_bio
*bbio
= NULL
;
2737 if (!bitmap_andnot(sparity
->dbitmap
, sparity
->dbitmap
, sparity
->ebitmap
,
2741 length
= sparity
->logic_end
- sparity
->logic_start
;
2743 btrfs_bio_counter_inc_blocked(fs_info
);
2744 ret
= btrfs_map_sblock(fs_info
, BTRFS_MAP_WRITE
, sparity
->logic_start
,
2746 if (ret
|| !bbio
|| !bbio
->raid_map
)
2749 bio
= btrfs_io_bio_alloc(0);
2750 bio
->bi_iter
.bi_sector
= sparity
->logic_start
>> 9;
2751 bio
->bi_private
= sparity
;
2752 bio
->bi_end_io
= scrub_parity_bio_endio
;
2754 rbio
= raid56_parity_alloc_scrub_rbio(fs_info
, bio
, bbio
,
2755 length
, sparity
->scrub_dev
,
2761 scrub_pending_bio_inc(sctx
);
2762 raid56_parity_submit_scrub_rbio(rbio
);
2768 btrfs_bio_counter_dec(fs_info
);
2769 btrfs_put_bbio(bbio
);
2770 bitmap_or(sparity
->ebitmap
, sparity
->ebitmap
, sparity
->dbitmap
,
2772 spin_lock(&sctx
->stat_lock
);
2773 sctx
->stat
.malloc_errors
++;
2774 spin_unlock(&sctx
->stat_lock
);
2776 scrub_free_parity(sparity
);
2779 static inline int scrub_calc_parity_bitmap_len(int nsectors
)
2781 return DIV_ROUND_UP(nsectors
, BITS_PER_LONG
) * sizeof(long);
2784 static void scrub_parity_get(struct scrub_parity
*sparity
)
2786 refcount_inc(&sparity
->refs
);
2789 static void scrub_parity_put(struct scrub_parity
*sparity
)
2791 if (!refcount_dec_and_test(&sparity
->refs
))
2794 scrub_parity_check_and_repair(sparity
);
2797 static noinline_for_stack
int scrub_raid56_parity(struct scrub_ctx
*sctx
,
2798 struct map_lookup
*map
,
2799 struct btrfs_device
*sdev
,
2800 struct btrfs_path
*path
,
2804 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
2805 struct btrfs_root
*root
= fs_info
->extent_root
;
2806 struct btrfs_root
*csum_root
= fs_info
->csum_root
;
2807 struct btrfs_extent_item
*extent
;
2808 struct btrfs_bio
*bbio
= NULL
;
2812 struct extent_buffer
*l
;
2813 struct btrfs_key key
;
2816 u64 extent_physical
;
2819 struct btrfs_device
*extent_dev
;
2820 struct scrub_parity
*sparity
;
2823 int extent_mirror_num
;
2826 nsectors
= div_u64(map
->stripe_len
, fs_info
->sectorsize
);
2827 bitmap_len
= scrub_calc_parity_bitmap_len(nsectors
);
2828 sparity
= kzalloc(sizeof(struct scrub_parity
) + 2 * bitmap_len
,
2831 spin_lock(&sctx
->stat_lock
);
2832 sctx
->stat
.malloc_errors
++;
2833 spin_unlock(&sctx
->stat_lock
);
2837 sparity
->stripe_len
= map
->stripe_len
;
2838 sparity
->nsectors
= nsectors
;
2839 sparity
->sctx
= sctx
;
2840 sparity
->scrub_dev
= sdev
;
2841 sparity
->logic_start
= logic_start
;
2842 sparity
->logic_end
= logic_end
;
2843 refcount_set(&sparity
->refs
, 1);
2844 INIT_LIST_HEAD(&sparity
->spages
);
2845 sparity
->dbitmap
= sparity
->bitmap
;
2846 sparity
->ebitmap
= (void *)sparity
->bitmap
+ bitmap_len
;
2849 while (logic_start
< logic_end
) {
2850 if (btrfs_fs_incompat(fs_info
, SKINNY_METADATA
))
2851 key
.type
= BTRFS_METADATA_ITEM_KEY
;
2853 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
2854 key
.objectid
= logic_start
;
2855 key
.offset
= (u64
)-1;
2857 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2862 ret
= btrfs_previous_extent_item(root
, path
, 0);
2866 btrfs_release_path(path
);
2867 ret
= btrfs_search_slot(NULL
, root
, &key
,
2879 slot
= path
->slots
[0];
2880 if (slot
>= btrfs_header_nritems(l
)) {
2881 ret
= btrfs_next_leaf(root
, path
);
2890 btrfs_item_key_to_cpu(l
, &key
, slot
);
2892 if (key
.type
!= BTRFS_EXTENT_ITEM_KEY
&&
2893 key
.type
!= BTRFS_METADATA_ITEM_KEY
)
2896 if (key
.type
== BTRFS_METADATA_ITEM_KEY
)
2897 bytes
= fs_info
->nodesize
;
2901 if (key
.objectid
+ bytes
<= logic_start
)
2904 if (key
.objectid
>= logic_end
) {
2909 while (key
.objectid
>= logic_start
+ map
->stripe_len
)
2910 logic_start
+= map
->stripe_len
;
2912 extent
= btrfs_item_ptr(l
, slot
,
2913 struct btrfs_extent_item
);
2914 flags
= btrfs_extent_flags(l
, extent
);
2915 generation
= btrfs_extent_generation(l
, extent
);
2917 if ((flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) &&
2918 (key
.objectid
< logic_start
||
2919 key
.objectid
+ bytes
>
2920 logic_start
+ map
->stripe_len
)) {
2922 "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
2923 key
.objectid
, logic_start
);
2924 spin_lock(&sctx
->stat_lock
);
2925 sctx
->stat
.uncorrectable_errors
++;
2926 spin_unlock(&sctx
->stat_lock
);
2930 extent_logical
= key
.objectid
;
2933 if (extent_logical
< logic_start
) {
2934 extent_len
-= logic_start
- extent_logical
;
2935 extent_logical
= logic_start
;
2938 if (extent_logical
+ extent_len
>
2939 logic_start
+ map
->stripe_len
)
2940 extent_len
= logic_start
+ map
->stripe_len
-
2943 scrub_parity_mark_sectors_data(sparity
, extent_logical
,
2946 mapped_length
= extent_len
;
2948 ret
= btrfs_map_block(fs_info
, BTRFS_MAP_READ
,
2949 extent_logical
, &mapped_length
, &bbio
,
2952 if (!bbio
|| mapped_length
< extent_len
)
2956 btrfs_put_bbio(bbio
);
2959 extent_physical
= bbio
->stripes
[0].physical
;
2960 extent_mirror_num
= bbio
->mirror_num
;
2961 extent_dev
= bbio
->stripes
[0].dev
;
2962 btrfs_put_bbio(bbio
);
2964 ret
= btrfs_lookup_csums_range(csum_root
,
2966 extent_logical
+ extent_len
- 1,
2967 &sctx
->csum_list
, 1);
2971 ret
= scrub_extent_for_parity(sparity
, extent_logical
,
2978 scrub_free_csums(sctx
);
2983 if (extent_logical
+ extent_len
<
2984 key
.objectid
+ bytes
) {
2985 logic_start
+= map
->stripe_len
;
2987 if (logic_start
>= logic_end
) {
2992 if (logic_start
< key
.objectid
+ bytes
) {
3001 btrfs_release_path(path
);
3006 logic_start
+= map
->stripe_len
;
3010 scrub_parity_mark_sectors_error(sparity
, logic_start
,
3011 logic_end
- logic_start
);
3012 scrub_parity_put(sparity
);
3014 mutex_lock(&sctx
->wr_lock
);
3015 scrub_wr_submit(sctx
);
3016 mutex_unlock(&sctx
->wr_lock
);
3018 btrfs_release_path(path
);
3019 return ret
< 0 ? ret
: 0;
3022 static noinline_for_stack
int scrub_stripe(struct scrub_ctx
*sctx
,
3023 struct map_lookup
*map
,
3024 struct btrfs_device
*scrub_dev
,
3025 int num
, u64 base
, u64 length
,
3028 struct btrfs_path
*path
, *ppath
;
3029 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
3030 struct btrfs_root
*root
= fs_info
->extent_root
;
3031 struct btrfs_root
*csum_root
= fs_info
->csum_root
;
3032 struct btrfs_extent_item
*extent
;
3033 struct blk_plug plug
;
3038 struct extent_buffer
*l
;
3045 struct reada_control
*reada1
;
3046 struct reada_control
*reada2
;
3047 struct btrfs_key key
;
3048 struct btrfs_key key_end
;
3049 u64 increment
= map
->stripe_len
;
3052 u64 extent_physical
;
3056 struct btrfs_device
*extent_dev
;
3057 int extent_mirror_num
;
3060 physical
= map
->stripes
[num
].physical
;
3062 nstripes
= div64_u64(length
, map
->stripe_len
);
3063 if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
3064 offset
= map
->stripe_len
* num
;
3065 increment
= map
->stripe_len
* map
->num_stripes
;
3067 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
3068 int factor
= map
->num_stripes
/ map
->sub_stripes
;
3069 offset
= map
->stripe_len
* (num
/ map
->sub_stripes
);
3070 increment
= map
->stripe_len
* factor
;
3071 mirror_num
= num
% map
->sub_stripes
+ 1;
3072 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
3073 increment
= map
->stripe_len
;
3074 mirror_num
= num
% map
->num_stripes
+ 1;
3075 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
3076 increment
= map
->stripe_len
;
3077 mirror_num
= num
% map
->num_stripes
+ 1;
3078 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
3079 get_raid56_logic_offset(physical
, num
, map
, &offset
, NULL
);
3080 increment
= map
->stripe_len
* nr_data_stripes(map
);
3083 increment
= map
->stripe_len
;
3087 path
= btrfs_alloc_path();
3091 ppath
= btrfs_alloc_path();
3093 btrfs_free_path(path
);
3098 * work on commit root. The related disk blocks are static as
3099 * long as COW is applied. This means, it is save to rewrite
3100 * them to repair disk errors without any race conditions
3102 path
->search_commit_root
= 1;
3103 path
->skip_locking
= 1;
3105 ppath
->search_commit_root
= 1;
3106 ppath
->skip_locking
= 1;
3108 * trigger the readahead for extent tree csum tree and wait for
3109 * completion. During readahead, the scrub is officially paused
3110 * to not hold off transaction commits
3112 logical
= base
+ offset
;
3113 physical_end
= physical
+ nstripes
* map
->stripe_len
;
3114 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
3115 get_raid56_logic_offset(physical_end
, num
,
3116 map
, &logic_end
, NULL
);
3119 logic_end
= logical
+ increment
* nstripes
;
3121 wait_event(sctx
->list_wait
,
3122 atomic_read(&sctx
->bios_in_flight
) == 0);
3123 scrub_blocked_if_needed(fs_info
);
3125 /* FIXME it might be better to start readahead at commit root */
3126 key
.objectid
= logical
;
3127 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
3128 key
.offset
= (u64
)0;
3129 key_end
.objectid
= logic_end
;
3130 key_end
.type
= BTRFS_METADATA_ITEM_KEY
;
3131 key_end
.offset
= (u64
)-1;
3132 reada1
= btrfs_reada_add(root
, &key
, &key_end
);
3134 key
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
3135 key
.type
= BTRFS_EXTENT_CSUM_KEY
;
3136 key
.offset
= logical
;
3137 key_end
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
3138 key_end
.type
= BTRFS_EXTENT_CSUM_KEY
;
3139 key_end
.offset
= logic_end
;
3140 reada2
= btrfs_reada_add(csum_root
, &key
, &key_end
);
3142 if (!IS_ERR(reada1
))
3143 btrfs_reada_wait(reada1
);
3144 if (!IS_ERR(reada2
))
3145 btrfs_reada_wait(reada2
);
3149 * collect all data csums for the stripe to avoid seeking during
3150 * the scrub. This might currently (crc32) end up to be about 1MB
3152 blk_start_plug(&plug
);
3155 * now find all extents for each stripe and scrub them
3158 while (physical
< physical_end
) {
3162 if (atomic_read(&fs_info
->scrub_cancel_req
) ||
3163 atomic_read(&sctx
->cancel_req
)) {
3168 * check to see if we have to pause
3170 if (atomic_read(&fs_info
->scrub_pause_req
)) {
3171 /* push queued extents */
3172 sctx
->flush_all_writes
= true;
3174 mutex_lock(&sctx
->wr_lock
);
3175 scrub_wr_submit(sctx
);
3176 mutex_unlock(&sctx
->wr_lock
);
3177 wait_event(sctx
->list_wait
,
3178 atomic_read(&sctx
->bios_in_flight
) == 0);
3179 sctx
->flush_all_writes
= false;
3180 scrub_blocked_if_needed(fs_info
);
3183 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
3184 ret
= get_raid56_logic_offset(physical
, num
, map
,
3189 /* it is parity strip */
3190 stripe_logical
+= base
;
3191 stripe_end
= stripe_logical
+ increment
;
3192 ret
= scrub_raid56_parity(sctx
, map
, scrub_dev
,
3193 ppath
, stripe_logical
,
3201 if (btrfs_fs_incompat(fs_info
, SKINNY_METADATA
))
3202 key
.type
= BTRFS_METADATA_ITEM_KEY
;
3204 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
3205 key
.objectid
= logical
;
3206 key
.offset
= (u64
)-1;
3208 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3213 ret
= btrfs_previous_extent_item(root
, path
, 0);
3217 /* there's no smaller item, so stick with the
3219 btrfs_release_path(path
);
3220 ret
= btrfs_search_slot(NULL
, root
, &key
,
3232 slot
= path
->slots
[0];
3233 if (slot
>= btrfs_header_nritems(l
)) {
3234 ret
= btrfs_next_leaf(root
, path
);
3243 btrfs_item_key_to_cpu(l
, &key
, slot
);
3245 if (key
.type
!= BTRFS_EXTENT_ITEM_KEY
&&
3246 key
.type
!= BTRFS_METADATA_ITEM_KEY
)
3249 if (key
.type
== BTRFS_METADATA_ITEM_KEY
)
3250 bytes
= fs_info
->nodesize
;
3254 if (key
.objectid
+ bytes
<= logical
)
3257 if (key
.objectid
>= logical
+ map
->stripe_len
) {
3258 /* out of this device extent */
3259 if (key
.objectid
>= logic_end
)
3264 extent
= btrfs_item_ptr(l
, slot
,
3265 struct btrfs_extent_item
);
3266 flags
= btrfs_extent_flags(l
, extent
);
3267 generation
= btrfs_extent_generation(l
, extent
);
3269 if ((flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) &&
3270 (key
.objectid
< logical
||
3271 key
.objectid
+ bytes
>
3272 logical
+ map
->stripe_len
)) {
3274 "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3275 key
.objectid
, logical
);
3276 spin_lock(&sctx
->stat_lock
);
3277 sctx
->stat
.uncorrectable_errors
++;
3278 spin_unlock(&sctx
->stat_lock
);
3283 extent_logical
= key
.objectid
;
3287 * trim extent to this stripe
3289 if (extent_logical
< logical
) {
3290 extent_len
-= logical
- extent_logical
;
3291 extent_logical
= logical
;
3293 if (extent_logical
+ extent_len
>
3294 logical
+ map
->stripe_len
) {
3295 extent_len
= logical
+ map
->stripe_len
-
3299 extent_physical
= extent_logical
- logical
+ physical
;
3300 extent_dev
= scrub_dev
;
3301 extent_mirror_num
= mirror_num
;
3303 scrub_remap_extent(fs_info
, extent_logical
,
3304 extent_len
, &extent_physical
,
3306 &extent_mirror_num
);
3308 ret
= btrfs_lookup_csums_range(csum_root
,
3312 &sctx
->csum_list
, 1);
3316 ret
= scrub_extent(sctx
, map
, extent_logical
, extent_len
,
3317 extent_physical
, extent_dev
, flags
,
3318 generation
, extent_mirror_num
,
3319 extent_logical
- logical
+ physical
);
3321 scrub_free_csums(sctx
);
3326 if (extent_logical
+ extent_len
<
3327 key
.objectid
+ bytes
) {
3328 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
3330 * loop until we find next data stripe
3331 * or we have finished all stripes.
3334 physical
+= map
->stripe_len
;
3335 ret
= get_raid56_logic_offset(physical
,
3340 if (ret
&& physical
< physical_end
) {
3341 stripe_logical
+= base
;
3342 stripe_end
= stripe_logical
+
3344 ret
= scrub_raid56_parity(sctx
,
3345 map
, scrub_dev
, ppath
,
3353 physical
+= map
->stripe_len
;
3354 logical
+= increment
;
3356 if (logical
< key
.objectid
+ bytes
) {
3361 if (physical
>= physical_end
) {
3369 btrfs_release_path(path
);
3371 logical
+= increment
;
3372 physical
+= map
->stripe_len
;
3373 spin_lock(&sctx
->stat_lock
);
3375 sctx
->stat
.last_physical
= map
->stripes
[num
].physical
+
3378 sctx
->stat
.last_physical
= physical
;
3379 spin_unlock(&sctx
->stat_lock
);
3384 /* push queued extents */
3386 mutex_lock(&sctx
->wr_lock
);
3387 scrub_wr_submit(sctx
);
3388 mutex_unlock(&sctx
->wr_lock
);
3390 blk_finish_plug(&plug
);
3391 btrfs_free_path(path
);
3392 btrfs_free_path(ppath
);
3393 return ret
< 0 ? ret
: 0;
3396 static noinline_for_stack
int scrub_chunk(struct scrub_ctx
*sctx
,
3397 struct btrfs_device
*scrub_dev
,
3398 u64 chunk_offset
, u64 length
,
3400 struct btrfs_block_group_cache
*cache
,
3403 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
3404 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
3405 struct map_lookup
*map
;
3406 struct extent_map
*em
;
3410 read_lock(&map_tree
->map_tree
.lock
);
3411 em
= lookup_extent_mapping(&map_tree
->map_tree
, chunk_offset
, 1);
3412 read_unlock(&map_tree
->map_tree
.lock
);
3416 * Might have been an unused block group deleted by the cleaner
3417 * kthread or relocation.
3419 spin_lock(&cache
->lock
);
3420 if (!cache
->removed
)
3422 spin_unlock(&cache
->lock
);
3427 map
= em
->map_lookup
;
3428 if (em
->start
!= chunk_offset
)
3431 if (em
->len
< length
)
3434 for (i
= 0; i
< map
->num_stripes
; ++i
) {
3435 if (map
->stripes
[i
].dev
->bdev
== scrub_dev
->bdev
&&
3436 map
->stripes
[i
].physical
== dev_offset
) {
3437 ret
= scrub_stripe(sctx
, map
, scrub_dev
, i
,
3438 chunk_offset
, length
,
3445 free_extent_map(em
);
3450 static noinline_for_stack
3451 int scrub_enumerate_chunks(struct scrub_ctx
*sctx
,
3452 struct btrfs_device
*scrub_dev
, u64 start
, u64 end
,
3455 struct btrfs_dev_extent
*dev_extent
= NULL
;
3456 struct btrfs_path
*path
;
3457 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
3458 struct btrfs_root
*root
= fs_info
->dev_root
;
3464 struct extent_buffer
*l
;
3465 struct btrfs_key key
;
3466 struct btrfs_key found_key
;
3467 struct btrfs_block_group_cache
*cache
;
3468 struct btrfs_dev_replace
*dev_replace
= &fs_info
->dev_replace
;
3470 path
= btrfs_alloc_path();
3474 path
->reada
= READA_FORWARD
;
3475 path
->search_commit_root
= 1;
3476 path
->skip_locking
= 1;
3478 key
.objectid
= scrub_dev
->devid
;
3480 key
.type
= BTRFS_DEV_EXTENT_KEY
;
3483 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3487 if (path
->slots
[0] >=
3488 btrfs_header_nritems(path
->nodes
[0])) {
3489 ret
= btrfs_next_leaf(root
, path
);
3502 slot
= path
->slots
[0];
3504 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
3506 if (found_key
.objectid
!= scrub_dev
->devid
)
3509 if (found_key
.type
!= BTRFS_DEV_EXTENT_KEY
)
3512 if (found_key
.offset
>= end
)
3515 if (found_key
.offset
< key
.offset
)
3518 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
3519 length
= btrfs_dev_extent_length(l
, dev_extent
);
3521 if (found_key
.offset
+ length
<= start
)
3524 chunk_offset
= btrfs_dev_extent_chunk_offset(l
, dev_extent
);
3527 * get a reference on the corresponding block group to prevent
3528 * the chunk from going away while we scrub it
3530 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
3532 /* some chunks are removed but not committed to disk yet,
3533 * continue scrubbing */
3538 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
3539 * to avoid deadlock caused by:
3540 * btrfs_inc_block_group_ro()
3541 * -> btrfs_wait_for_commit()
3542 * -> btrfs_commit_transaction()
3543 * -> btrfs_scrub_pause()
3545 scrub_pause_on(fs_info
);
3546 ret
= btrfs_inc_block_group_ro(cache
);
3547 if (!ret
&& is_dev_replace
) {
3549 * If we are doing a device replace wait for any tasks
3550 * that started dellaloc right before we set the block
3551 * group to RO mode, as they might have just allocated
3552 * an extent from it or decided they could do a nocow
3553 * write. And if any such tasks did that, wait for their
3554 * ordered extents to complete and then commit the
3555 * current transaction, so that we can later see the new
3556 * extent items in the extent tree - the ordered extents
3557 * create delayed data references (for cow writes) when
3558 * they complete, which will be run and insert the
3559 * corresponding extent items into the extent tree when
3560 * we commit the transaction they used when running
3561 * inode.c:btrfs_finish_ordered_io(). We later use
3562 * the commit root of the extent tree to find extents
3563 * to copy from the srcdev into the tgtdev, and we don't
3564 * want to miss any new extents.
3566 btrfs_wait_block_group_reservations(cache
);
3567 btrfs_wait_nocow_writers(cache
);
3568 ret
= btrfs_wait_ordered_roots(fs_info
, U64_MAX
,
3569 cache
->key
.objectid
,
3572 struct btrfs_trans_handle
*trans
;
3574 trans
= btrfs_join_transaction(root
);
3576 ret
= PTR_ERR(trans
);
3578 ret
= btrfs_commit_transaction(trans
);
3580 scrub_pause_off(fs_info
);
3581 btrfs_put_block_group(cache
);
3586 scrub_pause_off(fs_info
);
3590 } else if (ret
== -ENOSPC
) {
3592 * btrfs_inc_block_group_ro return -ENOSPC when it
3593 * failed in creating new chunk for metadata.
3594 * It is not a problem for scrub/replace, because
3595 * metadata are always cowed, and our scrub paused
3596 * commit_transactions.
3601 "failed setting block group ro: %d", ret
);
3602 btrfs_put_block_group(cache
);
3606 btrfs_dev_replace_write_lock(&fs_info
->dev_replace
);
3607 dev_replace
->cursor_right
= found_key
.offset
+ length
;
3608 dev_replace
->cursor_left
= found_key
.offset
;
3609 dev_replace
->item_needs_writeback
= 1;
3610 btrfs_dev_replace_write_unlock(&fs_info
->dev_replace
);
3611 ret
= scrub_chunk(sctx
, scrub_dev
, chunk_offset
, length
,
3612 found_key
.offset
, cache
, is_dev_replace
);
3615 * flush, submit all pending read and write bios, afterwards
3617 * Note that in the dev replace case, a read request causes
3618 * write requests that are submitted in the read completion
3619 * worker. Therefore in the current situation, it is required
3620 * that all write requests are flushed, so that all read and
3621 * write requests are really completed when bios_in_flight
3624 sctx
->flush_all_writes
= true;
3626 mutex_lock(&sctx
->wr_lock
);
3627 scrub_wr_submit(sctx
);
3628 mutex_unlock(&sctx
->wr_lock
);
3630 wait_event(sctx
->list_wait
,
3631 atomic_read(&sctx
->bios_in_flight
) == 0);
3633 scrub_pause_on(fs_info
);
3636 * must be called before we decrease @scrub_paused.
3637 * make sure we don't block transaction commit while
3638 * we are waiting pending workers finished.
3640 wait_event(sctx
->list_wait
,
3641 atomic_read(&sctx
->workers_pending
) == 0);
3642 sctx
->flush_all_writes
= false;
3644 scrub_pause_off(fs_info
);
3646 btrfs_dev_replace_write_lock(&fs_info
->dev_replace
);
3647 dev_replace
->cursor_left
= dev_replace
->cursor_right
;
3648 dev_replace
->item_needs_writeback
= 1;
3649 btrfs_dev_replace_write_unlock(&fs_info
->dev_replace
);
3652 btrfs_dec_block_group_ro(cache
);
3655 * We might have prevented the cleaner kthread from deleting
3656 * this block group if it was already unused because we raced
3657 * and set it to RO mode first. So add it back to the unused
3658 * list, otherwise it might not ever be deleted unless a manual
3659 * balance is triggered or it becomes used and unused again.
3661 spin_lock(&cache
->lock
);
3662 if (!cache
->removed
&& !cache
->ro
&& cache
->reserved
== 0 &&
3663 btrfs_block_group_used(&cache
->item
) == 0) {
3664 spin_unlock(&cache
->lock
);
3665 btrfs_mark_bg_unused(cache
);
3667 spin_unlock(&cache
->lock
);
3670 btrfs_put_block_group(cache
);
3673 if (is_dev_replace
&&
3674 atomic64_read(&dev_replace
->num_write_errors
) > 0) {
3678 if (sctx
->stat
.malloc_errors
> 0) {
3683 key
.offset
= found_key
.offset
+ length
;
3684 btrfs_release_path(path
);
3687 btrfs_free_path(path
);
3692 static noinline_for_stack
int scrub_supers(struct scrub_ctx
*sctx
,
3693 struct btrfs_device
*scrub_dev
)
3699 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
3701 if (test_bit(BTRFS_FS_STATE_ERROR
, &fs_info
->fs_state
))
3704 /* Seed devices of a new filesystem has their own generation. */
3705 if (scrub_dev
->fs_devices
!= fs_info
->fs_devices
)
3706 gen
= scrub_dev
->generation
;
3708 gen
= fs_info
->last_trans_committed
;
3710 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
3711 bytenr
= btrfs_sb_offset(i
);
3712 if (bytenr
+ BTRFS_SUPER_INFO_SIZE
>
3713 scrub_dev
->commit_total_bytes
)
3716 ret
= scrub_pages(sctx
, bytenr
, BTRFS_SUPER_INFO_SIZE
, bytenr
,
3717 scrub_dev
, BTRFS_EXTENT_FLAG_SUPER
, gen
, i
,
3722 wait_event(sctx
->list_wait
, atomic_read(&sctx
->bios_in_flight
) == 0);
3728 * get a reference count on fs_info->scrub_workers. start worker if necessary
3730 static noinline_for_stack
int scrub_workers_get(struct btrfs_fs_info
*fs_info
,
3733 unsigned int flags
= WQ_FREEZABLE
| WQ_UNBOUND
;
3734 int max_active
= fs_info
->thread_pool_size
;
3736 if (fs_info
->scrub_workers_refcnt
== 0) {
3737 fs_info
->scrub_workers
= btrfs_alloc_workqueue(fs_info
, "scrub",
3738 flags
, is_dev_replace
? 1 : max_active
, 4);
3739 if (!fs_info
->scrub_workers
)
3740 goto fail_scrub_workers
;
3742 fs_info
->scrub_wr_completion_workers
=
3743 btrfs_alloc_workqueue(fs_info
, "scrubwrc", flags
,
3745 if (!fs_info
->scrub_wr_completion_workers
)
3746 goto fail_scrub_wr_completion_workers
;
3748 fs_info
->scrub_parity_workers
=
3749 btrfs_alloc_workqueue(fs_info
, "scrubparity", flags
,
3751 if (!fs_info
->scrub_parity_workers
)
3752 goto fail_scrub_parity_workers
;
3754 ++fs_info
->scrub_workers_refcnt
;
3757 fail_scrub_parity_workers
:
3758 btrfs_destroy_workqueue(fs_info
->scrub_wr_completion_workers
);
3759 fail_scrub_wr_completion_workers
:
3760 btrfs_destroy_workqueue(fs_info
->scrub_workers
);
3765 static noinline_for_stack
void scrub_workers_put(struct btrfs_fs_info
*fs_info
)
3767 if (--fs_info
->scrub_workers_refcnt
== 0) {
3768 btrfs_destroy_workqueue(fs_info
->scrub_workers
);
3769 btrfs_destroy_workqueue(fs_info
->scrub_wr_completion_workers
);
3770 btrfs_destroy_workqueue(fs_info
->scrub_parity_workers
);
3772 WARN_ON(fs_info
->scrub_workers_refcnt
< 0);
3775 int btrfs_scrub_dev(struct btrfs_fs_info
*fs_info
, u64 devid
, u64 start
,
3776 u64 end
, struct btrfs_scrub_progress
*progress
,
3777 int readonly
, int is_dev_replace
)
3779 struct scrub_ctx
*sctx
;
3781 struct btrfs_device
*dev
;
3783 if (btrfs_fs_closing(fs_info
))
3786 if (fs_info
->nodesize
> BTRFS_STRIPE_LEN
) {
3788 * in this case scrub is unable to calculate the checksum
3789 * the way scrub is implemented. Do not handle this
3790 * situation at all because it won't ever happen.
3793 "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
3799 if (fs_info
->sectorsize
!= PAGE_SIZE
) {
3800 /* not supported for data w/o checksums */
3801 btrfs_err_rl(fs_info
,
3802 "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails",
3803 fs_info
->sectorsize
, PAGE_SIZE
);
3807 if (fs_info
->nodesize
>
3808 PAGE_SIZE
* SCRUB_MAX_PAGES_PER_BLOCK
||
3809 fs_info
->sectorsize
> PAGE_SIZE
* SCRUB_MAX_PAGES_PER_BLOCK
) {
3811 * would exhaust the array bounds of pagev member in
3812 * struct scrub_block
3815 "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
3817 SCRUB_MAX_PAGES_PER_BLOCK
,
3818 fs_info
->sectorsize
,
3819 SCRUB_MAX_PAGES_PER_BLOCK
);
3824 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
3825 dev
= btrfs_find_device(fs_info
, devid
, NULL
, NULL
);
3826 if (!dev
|| (test_bit(BTRFS_DEV_STATE_MISSING
, &dev
->dev_state
) &&
3828 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3832 if (!is_dev_replace
&& !readonly
&&
3833 !test_bit(BTRFS_DEV_STATE_WRITEABLE
, &dev
->dev_state
)) {
3834 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3835 btrfs_err_in_rcu(fs_info
, "scrub: device %s is not writable",
3836 rcu_str_deref(dev
->name
));
3840 mutex_lock(&fs_info
->scrub_lock
);
3841 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &dev
->dev_state
) ||
3842 test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &dev
->dev_state
)) {
3843 mutex_unlock(&fs_info
->scrub_lock
);
3844 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3848 btrfs_dev_replace_read_lock(&fs_info
->dev_replace
);
3849 if (dev
->scrub_ctx
||
3851 btrfs_dev_replace_is_ongoing(&fs_info
->dev_replace
))) {
3852 btrfs_dev_replace_read_unlock(&fs_info
->dev_replace
);
3853 mutex_unlock(&fs_info
->scrub_lock
);
3854 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3855 return -EINPROGRESS
;
3857 btrfs_dev_replace_read_unlock(&fs_info
->dev_replace
);
3859 ret
= scrub_workers_get(fs_info
, is_dev_replace
);
3861 mutex_unlock(&fs_info
->scrub_lock
);
3862 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3866 sctx
= scrub_setup_ctx(dev
, is_dev_replace
);
3868 mutex_unlock(&fs_info
->scrub_lock
);
3869 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3870 scrub_workers_put(fs_info
);
3871 return PTR_ERR(sctx
);
3873 sctx
->readonly
= readonly
;
3874 dev
->scrub_ctx
= sctx
;
3875 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3878 * checking @scrub_pause_req here, we can avoid
3879 * race between committing transaction and scrubbing.
3881 __scrub_blocked_if_needed(fs_info
);
3882 atomic_inc(&fs_info
->scrubs_running
);
3883 mutex_unlock(&fs_info
->scrub_lock
);
3885 if (!is_dev_replace
) {
3887 * by holding device list mutex, we can
3888 * kick off writing super in log tree sync.
3890 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
3891 ret
= scrub_supers(sctx
, dev
);
3892 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3896 ret
= scrub_enumerate_chunks(sctx
, dev
, start
, end
,
3899 wait_event(sctx
->list_wait
, atomic_read(&sctx
->bios_in_flight
) == 0);
3900 atomic_dec(&fs_info
->scrubs_running
);
3901 wake_up(&fs_info
->scrub_pause_wait
);
3903 wait_event(sctx
->list_wait
, atomic_read(&sctx
->workers_pending
) == 0);
3906 memcpy(progress
, &sctx
->stat
, sizeof(*progress
));
3908 mutex_lock(&fs_info
->scrub_lock
);
3909 dev
->scrub_ctx
= NULL
;
3910 scrub_workers_put(fs_info
);
3911 mutex_unlock(&fs_info
->scrub_lock
);
3913 scrub_put_ctx(sctx
);
3918 void btrfs_scrub_pause(struct btrfs_fs_info
*fs_info
)
3920 mutex_lock(&fs_info
->scrub_lock
);
3921 atomic_inc(&fs_info
->scrub_pause_req
);
3922 while (atomic_read(&fs_info
->scrubs_paused
) !=
3923 atomic_read(&fs_info
->scrubs_running
)) {
3924 mutex_unlock(&fs_info
->scrub_lock
);
3925 wait_event(fs_info
->scrub_pause_wait
,
3926 atomic_read(&fs_info
->scrubs_paused
) ==
3927 atomic_read(&fs_info
->scrubs_running
));
3928 mutex_lock(&fs_info
->scrub_lock
);
3930 mutex_unlock(&fs_info
->scrub_lock
);
3933 void btrfs_scrub_continue(struct btrfs_fs_info
*fs_info
)
3935 atomic_dec(&fs_info
->scrub_pause_req
);
3936 wake_up(&fs_info
->scrub_pause_wait
);
3939 int btrfs_scrub_cancel(struct btrfs_fs_info
*fs_info
)
3941 mutex_lock(&fs_info
->scrub_lock
);
3942 if (!atomic_read(&fs_info
->scrubs_running
)) {
3943 mutex_unlock(&fs_info
->scrub_lock
);
3947 atomic_inc(&fs_info
->scrub_cancel_req
);
3948 while (atomic_read(&fs_info
->scrubs_running
)) {
3949 mutex_unlock(&fs_info
->scrub_lock
);
3950 wait_event(fs_info
->scrub_pause_wait
,
3951 atomic_read(&fs_info
->scrubs_running
) == 0);
3952 mutex_lock(&fs_info
->scrub_lock
);
3954 atomic_dec(&fs_info
->scrub_cancel_req
);
3955 mutex_unlock(&fs_info
->scrub_lock
);
3960 int btrfs_scrub_cancel_dev(struct btrfs_fs_info
*fs_info
,
3961 struct btrfs_device
*dev
)
3963 struct scrub_ctx
*sctx
;
3965 mutex_lock(&fs_info
->scrub_lock
);
3966 sctx
= dev
->scrub_ctx
;
3968 mutex_unlock(&fs_info
->scrub_lock
);
3971 atomic_inc(&sctx
->cancel_req
);
3972 while (dev
->scrub_ctx
) {
3973 mutex_unlock(&fs_info
->scrub_lock
);
3974 wait_event(fs_info
->scrub_pause_wait
,
3975 dev
->scrub_ctx
== NULL
);
3976 mutex_lock(&fs_info
->scrub_lock
);
3978 mutex_unlock(&fs_info
->scrub_lock
);
3983 int btrfs_scrub_progress(struct btrfs_fs_info
*fs_info
, u64 devid
,
3984 struct btrfs_scrub_progress
*progress
)
3986 struct btrfs_device
*dev
;
3987 struct scrub_ctx
*sctx
= NULL
;
3989 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
3990 dev
= btrfs_find_device(fs_info
, devid
, NULL
, NULL
);
3992 sctx
= dev
->scrub_ctx
;
3994 memcpy(progress
, &sctx
->stat
, sizeof(*progress
));
3995 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3997 return dev
? (sctx
? 0 : -ENOTCONN
) : -ENODEV
;
4000 static void scrub_remap_extent(struct btrfs_fs_info
*fs_info
,
4001 u64 extent_logical
, u64 extent_len
,
4002 u64
*extent_physical
,
4003 struct btrfs_device
**extent_dev
,
4004 int *extent_mirror_num
)
4007 struct btrfs_bio
*bbio
= NULL
;
4010 mapped_length
= extent_len
;
4011 ret
= btrfs_map_block(fs_info
, BTRFS_MAP_READ
, extent_logical
,
4012 &mapped_length
, &bbio
, 0);
4013 if (ret
|| !bbio
|| mapped_length
< extent_len
||
4014 !bbio
->stripes
[0].dev
->bdev
) {
4015 btrfs_put_bbio(bbio
);
4019 *extent_physical
= bbio
->stripes
[0].physical
;
4020 *extent_mirror_num
= bbio
->mirror_num
;
4021 *extent_dev
= bbio
->stripes
[0].dev
;
4022 btrfs_put_bbio(bbio
);