2 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/blkdev.h>
20 #include <linux/ratelimit.h>
24 #include "ordered-data.h"
25 #include "transaction.h"
27 #include "extent_io.h"
28 #include "dev-replace.h"
29 #include "check-integrity.h"
30 #include "rcu-string.h"
34 * This is only the first step towards a full-features scrub. It reads all
35 * extent and super block and verifies the checksums. In case a bad checksum
36 * is found or the extent cannot be read, good data will be written back if
39 * Future enhancements:
40 * - In case an unrepairable extent is encountered, track which files are
41 * affected and report them
42 * - track and record media errors, throw out bad devices
43 * - add a mode to also read unallocated space
50 * the following three values only influence the performance.
51 * The last one configures the number of parallel and outstanding I/O
52 * operations. The first two values configure an upper limit for the number
53 * of (dynamically allocated) pages that are added to a bio.
55 #define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
56 #define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
57 #define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
60 * the following value times PAGE_SIZE needs to be large enough to match the
61 * largest node/leaf/sector size that shall be supported.
62 * Values larger than BTRFS_STRIPE_LEN are not supported.
64 #define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
66 struct scrub_recover
{
68 struct btrfs_bio
*bbio
;
73 struct scrub_block
*sblock
;
75 struct btrfs_device
*dev
;
76 struct list_head list
;
77 u64 flags
; /* extent flags */
81 u64 physical_for_dev_replace
;
84 unsigned int mirror_num
:8;
85 unsigned int have_csum
:1;
86 unsigned int io_error
:1;
88 u8 csum
[BTRFS_CSUM_SIZE
];
90 struct scrub_recover
*recover
;
95 struct scrub_ctx
*sctx
;
96 struct btrfs_device
*dev
;
101 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
102 struct scrub_page
*pagev
[SCRUB_PAGES_PER_WR_BIO
];
104 struct scrub_page
*pagev
[SCRUB_PAGES_PER_RD_BIO
];
108 struct btrfs_work work
;
112 struct scrub_page
*pagev
[SCRUB_MAX_PAGES_PER_BLOCK
];
114 atomic_t outstanding_pages
;
115 atomic_t refs
; /* free mem on transition to zero */
116 struct scrub_ctx
*sctx
;
117 struct scrub_parity
*sparity
;
119 unsigned int header_error
:1;
120 unsigned int checksum_error
:1;
121 unsigned int no_io_error_seen
:1;
122 unsigned int generation_error
:1; /* also sets header_error */
124 /* The following is for the data used to check parity */
125 /* It is for the data with checksum */
126 unsigned int data_corrected
:1;
128 struct btrfs_work work
;
131 /* Used for the chunks with parity stripe such RAID5/6 */
132 struct scrub_parity
{
133 struct scrub_ctx
*sctx
;
135 struct btrfs_device
*scrub_dev
;
147 struct list_head spages
;
149 /* Work of parity check and repair */
150 struct btrfs_work work
;
152 /* Mark the parity blocks which have data */
153 unsigned long *dbitmap
;
156 * Mark the parity blocks which have data, but errors happen when
157 * read data or check data
159 unsigned long *ebitmap
;
161 unsigned long bitmap
[0];
164 struct scrub_wr_ctx
{
165 struct scrub_bio
*wr_curr_bio
;
166 struct btrfs_device
*tgtdev
;
167 int pages_per_wr_bio
; /* <= SCRUB_PAGES_PER_WR_BIO */
168 atomic_t flush_all_writes
;
169 struct mutex wr_lock
;
173 struct scrub_bio
*bios
[SCRUB_BIOS_PER_SCTX
];
174 struct btrfs_root
*dev_root
;
177 atomic_t bios_in_flight
;
178 atomic_t workers_pending
;
179 spinlock_t list_lock
;
180 wait_queue_head_t list_wait
;
182 struct list_head csum_list
;
185 int pages_per_rd_bio
;
190 struct scrub_wr_ctx wr_ctx
;
195 struct btrfs_scrub_progress stat
;
196 spinlock_t stat_lock
;
199 * Use a ref counter to avoid use-after-free issues. Scrub workers
200 * decrement bios_in_flight and workers_pending and then do a wakeup
201 * on the list_wait wait queue. We must ensure the main scrub task
202 * doesn't free the scrub context before or while the workers are
203 * doing the wakeup() call.
208 struct scrub_fixup_nodatasum
{
209 struct scrub_ctx
*sctx
;
210 struct btrfs_device
*dev
;
212 struct btrfs_root
*root
;
213 struct btrfs_work work
;
217 struct scrub_nocow_inode
{
221 struct list_head list
;
224 struct scrub_copy_nocow_ctx
{
225 struct scrub_ctx
*sctx
;
229 u64 physical_for_dev_replace
;
230 struct list_head inodes
;
231 struct btrfs_work work
;
234 struct scrub_warning
{
235 struct btrfs_path
*path
;
236 u64 extent_item_size
;
240 struct btrfs_device
*dev
;
243 static void scrub_pending_bio_inc(struct scrub_ctx
*sctx
);
244 static void scrub_pending_bio_dec(struct scrub_ctx
*sctx
);
245 static void scrub_pending_trans_workers_inc(struct scrub_ctx
*sctx
);
246 static void scrub_pending_trans_workers_dec(struct scrub_ctx
*sctx
);
247 static int scrub_handle_errored_block(struct scrub_block
*sblock_to_check
);
248 static int scrub_setup_recheck_block(struct scrub_block
*original_sblock
,
249 struct scrub_block
*sblocks_for_recheck
);
250 static void scrub_recheck_block(struct btrfs_fs_info
*fs_info
,
251 struct scrub_block
*sblock
, int is_metadata
,
252 int have_csum
, u8
*csum
, u64 generation
,
253 u16 csum_size
, int retry_failed_mirror
);
254 static void scrub_recheck_block_checksum(struct btrfs_fs_info
*fs_info
,
255 struct scrub_block
*sblock
,
256 int is_metadata
, int have_csum
,
257 const u8
*csum
, u64 generation
,
259 static int scrub_repair_block_from_good_copy(struct scrub_block
*sblock_bad
,
260 struct scrub_block
*sblock_good
);
261 static int scrub_repair_page_from_good_copy(struct scrub_block
*sblock_bad
,
262 struct scrub_block
*sblock_good
,
263 int page_num
, int force_write
);
264 static void scrub_write_block_to_dev_replace(struct scrub_block
*sblock
);
265 static int scrub_write_page_to_dev_replace(struct scrub_block
*sblock
,
267 static int scrub_checksum_data(struct scrub_block
*sblock
);
268 static int scrub_checksum_tree_block(struct scrub_block
*sblock
);
269 static int scrub_checksum_super(struct scrub_block
*sblock
);
270 static void scrub_block_get(struct scrub_block
*sblock
);
271 static void scrub_block_put(struct scrub_block
*sblock
);
272 static void scrub_page_get(struct scrub_page
*spage
);
273 static void scrub_page_put(struct scrub_page
*spage
);
274 static void scrub_parity_get(struct scrub_parity
*sparity
);
275 static void scrub_parity_put(struct scrub_parity
*sparity
);
276 static int scrub_add_page_to_rd_bio(struct scrub_ctx
*sctx
,
277 struct scrub_page
*spage
);
278 static int scrub_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
279 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
280 u64 gen
, int mirror_num
, u8
*csum
, int force
,
281 u64 physical_for_dev_replace
);
282 static void scrub_bio_end_io(struct bio
*bio
);
283 static void scrub_bio_end_io_worker(struct btrfs_work
*work
);
284 static void scrub_block_complete(struct scrub_block
*sblock
);
285 static void scrub_remap_extent(struct btrfs_fs_info
*fs_info
,
286 u64 extent_logical
, u64 extent_len
,
287 u64
*extent_physical
,
288 struct btrfs_device
**extent_dev
,
289 int *extent_mirror_num
);
290 static int scrub_setup_wr_ctx(struct scrub_ctx
*sctx
,
291 struct scrub_wr_ctx
*wr_ctx
,
292 struct btrfs_fs_info
*fs_info
,
293 struct btrfs_device
*dev
,
295 static void scrub_free_wr_ctx(struct scrub_wr_ctx
*wr_ctx
);
296 static int scrub_add_page_to_wr_bio(struct scrub_ctx
*sctx
,
297 struct scrub_page
*spage
);
298 static void scrub_wr_submit(struct scrub_ctx
*sctx
);
299 static void scrub_wr_bio_end_io(struct bio
*bio
);
300 static void scrub_wr_bio_end_io_worker(struct btrfs_work
*work
);
301 static int write_page_nocow(struct scrub_ctx
*sctx
,
302 u64 physical_for_dev_replace
, struct page
*page
);
303 static int copy_nocow_pages_for_inode(u64 inum
, u64 offset
, u64 root
,
304 struct scrub_copy_nocow_ctx
*ctx
);
305 static int copy_nocow_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
306 int mirror_num
, u64 physical_for_dev_replace
);
307 static void copy_nocow_pages_worker(struct btrfs_work
*work
);
308 static void __scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
);
309 static void scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
);
310 static void scrub_put_ctx(struct scrub_ctx
*sctx
);
313 static void scrub_pending_bio_inc(struct scrub_ctx
*sctx
)
315 atomic_inc(&sctx
->refs
);
316 atomic_inc(&sctx
->bios_in_flight
);
319 static void scrub_pending_bio_dec(struct scrub_ctx
*sctx
)
321 atomic_dec(&sctx
->bios_in_flight
);
322 wake_up(&sctx
->list_wait
);
326 static void __scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
)
328 while (atomic_read(&fs_info
->scrub_pause_req
)) {
329 mutex_unlock(&fs_info
->scrub_lock
);
330 wait_event(fs_info
->scrub_pause_wait
,
331 atomic_read(&fs_info
->scrub_pause_req
) == 0);
332 mutex_lock(&fs_info
->scrub_lock
);
336 static void scrub_pause_on(struct btrfs_fs_info
*fs_info
)
338 atomic_inc(&fs_info
->scrubs_paused
);
339 wake_up(&fs_info
->scrub_pause_wait
);
342 static void scrub_pause_off(struct btrfs_fs_info
*fs_info
)
344 mutex_lock(&fs_info
->scrub_lock
);
345 __scrub_blocked_if_needed(fs_info
);
346 atomic_dec(&fs_info
->scrubs_paused
);
347 mutex_unlock(&fs_info
->scrub_lock
);
349 wake_up(&fs_info
->scrub_pause_wait
);
352 static void scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
)
354 scrub_pause_on(fs_info
);
355 scrub_pause_off(fs_info
);
359 * used for workers that require transaction commits (i.e., for the
362 static void scrub_pending_trans_workers_inc(struct scrub_ctx
*sctx
)
364 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
366 atomic_inc(&sctx
->refs
);
368 * increment scrubs_running to prevent cancel requests from
369 * completing as long as a worker is running. we must also
370 * increment scrubs_paused to prevent deadlocking on pause
371 * requests used for transactions commits (as the worker uses a
372 * transaction context). it is safe to regard the worker
373 * as paused for all matters practical. effectively, we only
374 * avoid cancellation requests from completing.
376 mutex_lock(&fs_info
->scrub_lock
);
377 atomic_inc(&fs_info
->scrubs_running
);
378 atomic_inc(&fs_info
->scrubs_paused
);
379 mutex_unlock(&fs_info
->scrub_lock
);
382 * check if @scrubs_running=@scrubs_paused condition
383 * inside wait_event() is not an atomic operation.
384 * which means we may inc/dec @scrub_running/paused
385 * at any time. Let's wake up @scrub_pause_wait as
386 * much as we can to let commit transaction blocked less.
388 wake_up(&fs_info
->scrub_pause_wait
);
390 atomic_inc(&sctx
->workers_pending
);
393 /* used for workers that require transaction commits */
394 static void scrub_pending_trans_workers_dec(struct scrub_ctx
*sctx
)
396 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
399 * see scrub_pending_trans_workers_inc() why we're pretending
400 * to be paused in the scrub counters
402 mutex_lock(&fs_info
->scrub_lock
);
403 atomic_dec(&fs_info
->scrubs_running
);
404 atomic_dec(&fs_info
->scrubs_paused
);
405 mutex_unlock(&fs_info
->scrub_lock
);
406 atomic_dec(&sctx
->workers_pending
);
407 wake_up(&fs_info
->scrub_pause_wait
);
408 wake_up(&sctx
->list_wait
);
412 static void scrub_free_csums(struct scrub_ctx
*sctx
)
414 while (!list_empty(&sctx
->csum_list
)) {
415 struct btrfs_ordered_sum
*sum
;
416 sum
= list_first_entry(&sctx
->csum_list
,
417 struct btrfs_ordered_sum
, list
);
418 list_del(&sum
->list
);
423 static noinline_for_stack
void scrub_free_ctx(struct scrub_ctx
*sctx
)
430 scrub_free_wr_ctx(&sctx
->wr_ctx
);
432 /* this can happen when scrub is cancelled */
433 if (sctx
->curr
!= -1) {
434 struct scrub_bio
*sbio
= sctx
->bios
[sctx
->curr
];
436 for (i
= 0; i
< sbio
->page_count
; i
++) {
437 WARN_ON(!sbio
->pagev
[i
]->page
);
438 scrub_block_put(sbio
->pagev
[i
]->sblock
);
443 for (i
= 0; i
< SCRUB_BIOS_PER_SCTX
; ++i
) {
444 struct scrub_bio
*sbio
= sctx
->bios
[i
];
451 scrub_free_csums(sctx
);
455 static void scrub_put_ctx(struct scrub_ctx
*sctx
)
457 if (atomic_dec_and_test(&sctx
->refs
))
458 scrub_free_ctx(sctx
);
461 static noinline_for_stack
462 struct scrub_ctx
*scrub_setup_ctx(struct btrfs_device
*dev
, int is_dev_replace
)
464 struct scrub_ctx
*sctx
;
466 struct btrfs_fs_info
*fs_info
= dev
->dev_root
->fs_info
;
469 sctx
= kzalloc(sizeof(*sctx
), GFP_NOFS
);
472 atomic_set(&sctx
->refs
, 1);
473 sctx
->is_dev_replace
= is_dev_replace
;
474 sctx
->pages_per_rd_bio
= SCRUB_PAGES_PER_RD_BIO
;
476 sctx
->dev_root
= dev
->dev_root
;
477 for (i
= 0; i
< SCRUB_BIOS_PER_SCTX
; ++i
) {
478 struct scrub_bio
*sbio
;
480 sbio
= kzalloc(sizeof(*sbio
), GFP_NOFS
);
483 sctx
->bios
[i
] = sbio
;
487 sbio
->page_count
= 0;
488 btrfs_init_work(&sbio
->work
, btrfs_scrub_helper
,
489 scrub_bio_end_io_worker
, NULL
, NULL
);
491 if (i
!= SCRUB_BIOS_PER_SCTX
- 1)
492 sctx
->bios
[i
]->next_free
= i
+ 1;
494 sctx
->bios
[i
]->next_free
= -1;
496 sctx
->first_free
= 0;
497 sctx
->nodesize
= dev
->dev_root
->nodesize
;
498 sctx
->sectorsize
= dev
->dev_root
->sectorsize
;
499 atomic_set(&sctx
->bios_in_flight
, 0);
500 atomic_set(&sctx
->workers_pending
, 0);
501 atomic_set(&sctx
->cancel_req
, 0);
502 sctx
->csum_size
= btrfs_super_csum_size(fs_info
->super_copy
);
503 INIT_LIST_HEAD(&sctx
->csum_list
);
505 spin_lock_init(&sctx
->list_lock
);
506 spin_lock_init(&sctx
->stat_lock
);
507 init_waitqueue_head(&sctx
->list_wait
);
509 ret
= scrub_setup_wr_ctx(sctx
, &sctx
->wr_ctx
, fs_info
,
510 fs_info
->dev_replace
.tgtdev
, is_dev_replace
);
512 scrub_free_ctx(sctx
);
518 scrub_free_ctx(sctx
);
519 return ERR_PTR(-ENOMEM
);
522 static int scrub_print_warning_inode(u64 inum
, u64 offset
, u64 root
,
529 struct extent_buffer
*eb
;
530 struct btrfs_inode_item
*inode_item
;
531 struct scrub_warning
*swarn
= warn_ctx
;
532 struct btrfs_fs_info
*fs_info
= swarn
->dev
->dev_root
->fs_info
;
533 struct inode_fs_paths
*ipath
= NULL
;
534 struct btrfs_root
*local_root
;
535 struct btrfs_key root_key
;
536 struct btrfs_key key
;
538 root_key
.objectid
= root
;
539 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
540 root_key
.offset
= (u64
)-1;
541 local_root
= btrfs_read_fs_root_no_name(fs_info
, &root_key
);
542 if (IS_ERR(local_root
)) {
543 ret
= PTR_ERR(local_root
);
548 * this makes the path point to (inum INODE_ITEM ioff)
551 key
.type
= BTRFS_INODE_ITEM_KEY
;
554 ret
= btrfs_search_slot(NULL
, local_root
, &key
, swarn
->path
, 0, 0);
556 btrfs_release_path(swarn
->path
);
560 eb
= swarn
->path
->nodes
[0];
561 inode_item
= btrfs_item_ptr(eb
, swarn
->path
->slots
[0],
562 struct btrfs_inode_item
);
563 isize
= btrfs_inode_size(eb
, inode_item
);
564 nlink
= btrfs_inode_nlink(eb
, inode_item
);
565 btrfs_release_path(swarn
->path
);
567 ipath
= init_ipath(4096, local_root
, swarn
->path
);
569 ret
= PTR_ERR(ipath
);
573 ret
= paths_from_inode(inum
, ipath
);
579 * we deliberately ignore the bit ipath might have been too small to
580 * hold all of the paths here
582 for (i
= 0; i
< ipath
->fspath
->elem_cnt
; ++i
)
583 printk_in_rcu(KERN_WARNING
"BTRFS: %s at logical %llu on dev "
584 "%s, sector %llu, root %llu, inode %llu, offset %llu, "
585 "length %llu, links %u (path: %s)\n", swarn
->errstr
,
586 swarn
->logical
, rcu_str_deref(swarn
->dev
->name
),
587 (unsigned long long)swarn
->sector
, root
, inum
, offset
,
588 min(isize
- offset
, (u64
)PAGE_SIZE
), nlink
,
589 (char *)(unsigned long)ipath
->fspath
->val
[i
]);
595 printk_in_rcu(KERN_WARNING
"BTRFS: %s at logical %llu on dev "
596 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
597 "resolving failed with ret=%d\n", swarn
->errstr
,
598 swarn
->logical
, rcu_str_deref(swarn
->dev
->name
),
599 (unsigned long long)swarn
->sector
, root
, inum
, offset
, ret
);
605 static void scrub_print_warning(const char *errstr
, struct scrub_block
*sblock
)
607 struct btrfs_device
*dev
;
608 struct btrfs_fs_info
*fs_info
;
609 struct btrfs_path
*path
;
610 struct btrfs_key found_key
;
611 struct extent_buffer
*eb
;
612 struct btrfs_extent_item
*ei
;
613 struct scrub_warning swarn
;
614 unsigned long ptr
= 0;
622 WARN_ON(sblock
->page_count
< 1);
623 dev
= sblock
->pagev
[0]->dev
;
624 fs_info
= sblock
->sctx
->dev_root
->fs_info
;
626 path
= btrfs_alloc_path();
630 swarn
.sector
= (sblock
->pagev
[0]->physical
) >> 9;
631 swarn
.logical
= sblock
->pagev
[0]->logical
;
632 swarn
.errstr
= errstr
;
635 ret
= extent_from_logical(fs_info
, swarn
.logical
, path
, &found_key
,
640 extent_item_pos
= swarn
.logical
- found_key
.objectid
;
641 swarn
.extent_item_size
= found_key
.offset
;
644 ei
= btrfs_item_ptr(eb
, path
->slots
[0], struct btrfs_extent_item
);
645 item_size
= btrfs_item_size_nr(eb
, path
->slots
[0]);
647 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
649 ret
= tree_backref_for_extent(&ptr
, eb
, &found_key
, ei
,
650 item_size
, &ref_root
,
652 printk_in_rcu(KERN_WARNING
653 "BTRFS: %s at logical %llu on dev %s, "
654 "sector %llu: metadata %s (level %d) in tree "
655 "%llu\n", errstr
, swarn
.logical
,
656 rcu_str_deref(dev
->name
),
657 (unsigned long long)swarn
.sector
,
658 ref_level
? "node" : "leaf",
659 ret
< 0 ? -1 : ref_level
,
660 ret
< 0 ? -1 : ref_root
);
662 btrfs_release_path(path
);
664 btrfs_release_path(path
);
667 iterate_extent_inodes(fs_info
, found_key
.objectid
,
669 scrub_print_warning_inode
, &swarn
);
673 btrfs_free_path(path
);
676 static int scrub_fixup_readpage(u64 inum
, u64 offset
, u64 root
, void *fixup_ctx
)
678 struct page
*page
= NULL
;
680 struct scrub_fixup_nodatasum
*fixup
= fixup_ctx
;
683 struct btrfs_key key
;
684 struct inode
*inode
= NULL
;
685 struct btrfs_fs_info
*fs_info
;
686 u64 end
= offset
+ PAGE_SIZE
- 1;
687 struct btrfs_root
*local_root
;
691 key
.type
= BTRFS_ROOT_ITEM_KEY
;
692 key
.offset
= (u64
)-1;
694 fs_info
= fixup
->root
->fs_info
;
695 srcu_index
= srcu_read_lock(&fs_info
->subvol_srcu
);
697 local_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
698 if (IS_ERR(local_root
)) {
699 srcu_read_unlock(&fs_info
->subvol_srcu
, srcu_index
);
700 return PTR_ERR(local_root
);
703 key
.type
= BTRFS_INODE_ITEM_KEY
;
706 inode
= btrfs_iget(fs_info
->sb
, &key
, local_root
, NULL
);
707 srcu_read_unlock(&fs_info
->subvol_srcu
, srcu_index
);
709 return PTR_ERR(inode
);
711 index
= offset
>> PAGE_CACHE_SHIFT
;
713 page
= find_or_create_page(inode
->i_mapping
, index
, GFP_NOFS
);
719 if (PageUptodate(page
)) {
720 if (PageDirty(page
)) {
722 * we need to write the data to the defect sector. the
723 * data that was in that sector is not in memory,
724 * because the page was modified. we must not write the
725 * modified page to that sector.
727 * TODO: what could be done here: wait for the delalloc
728 * runner to write out that page (might involve
729 * COW) and see whether the sector is still
730 * referenced afterwards.
732 * For the meantime, we'll treat this error
733 * incorrectable, although there is a chance that a
734 * later scrub will find the bad sector again and that
735 * there's no dirty page in memory, then.
740 ret
= repair_io_failure(inode
, offset
, PAGE_SIZE
,
741 fixup
->logical
, page
,
742 offset
- page_offset(page
),
748 * we need to get good data first. the general readpage path
749 * will call repair_io_failure for us, we just have to make
750 * sure we read the bad mirror.
752 ret
= set_extent_bits(&BTRFS_I(inode
)->io_tree
, offset
, end
,
753 EXTENT_DAMAGED
, GFP_NOFS
);
755 /* set_extent_bits should give proper error */
762 ret
= extent_read_full_page(&BTRFS_I(inode
)->io_tree
, page
,
765 wait_on_page_locked(page
);
767 corrected
= !test_range_bit(&BTRFS_I(inode
)->io_tree
, offset
,
768 end
, EXTENT_DAMAGED
, 0, NULL
);
770 clear_extent_bits(&BTRFS_I(inode
)->io_tree
, offset
, end
,
771 EXTENT_DAMAGED
, GFP_NOFS
);
783 if (ret
== 0 && corrected
) {
785 * we only need to call readpage for one of the inodes belonging
786 * to this extent. so make iterate_extent_inodes stop
794 static void scrub_fixup_nodatasum(struct btrfs_work
*work
)
797 struct scrub_fixup_nodatasum
*fixup
;
798 struct scrub_ctx
*sctx
;
799 struct btrfs_trans_handle
*trans
= NULL
;
800 struct btrfs_path
*path
;
801 int uncorrectable
= 0;
803 fixup
= container_of(work
, struct scrub_fixup_nodatasum
, work
);
806 path
= btrfs_alloc_path();
808 spin_lock(&sctx
->stat_lock
);
809 ++sctx
->stat
.malloc_errors
;
810 spin_unlock(&sctx
->stat_lock
);
815 trans
= btrfs_join_transaction(fixup
->root
);
822 * the idea is to trigger a regular read through the standard path. we
823 * read a page from the (failed) logical address by specifying the
824 * corresponding copynum of the failed sector. thus, that readpage is
826 * that is the point where on-the-fly error correction will kick in
827 * (once it's finished) and rewrite the failed sector if a good copy
830 ret
= iterate_inodes_from_logical(fixup
->logical
, fixup
->root
->fs_info
,
831 path
, scrub_fixup_readpage
,
839 spin_lock(&sctx
->stat_lock
);
840 ++sctx
->stat
.corrected_errors
;
841 spin_unlock(&sctx
->stat_lock
);
844 if (trans
&& !IS_ERR(trans
))
845 btrfs_end_transaction(trans
, fixup
->root
);
847 spin_lock(&sctx
->stat_lock
);
848 ++sctx
->stat
.uncorrectable_errors
;
849 spin_unlock(&sctx
->stat_lock
);
850 btrfs_dev_replace_stats_inc(
851 &sctx
->dev_root
->fs_info
->dev_replace
.
852 num_uncorrectable_read_errors
);
853 printk_ratelimited_in_rcu(KERN_ERR
"BTRFS: "
854 "unable to fixup (nodatasum) error at logical %llu on dev %s\n",
855 fixup
->logical
, rcu_str_deref(fixup
->dev
->name
));
858 btrfs_free_path(path
);
861 scrub_pending_trans_workers_dec(sctx
);
864 static inline void scrub_get_recover(struct scrub_recover
*recover
)
866 atomic_inc(&recover
->refs
);
869 static inline void scrub_put_recover(struct scrub_recover
*recover
)
871 if (atomic_dec_and_test(&recover
->refs
)) {
872 btrfs_put_bbio(recover
->bbio
);
878 * scrub_handle_errored_block gets called when either verification of the
879 * pages failed or the bio failed to read, e.g. with EIO. In the latter
880 * case, this function handles all pages in the bio, even though only one
882 * The goal of this function is to repair the errored block by using the
883 * contents of one of the mirrors.
885 static int scrub_handle_errored_block(struct scrub_block
*sblock_to_check
)
887 struct scrub_ctx
*sctx
= sblock_to_check
->sctx
;
888 struct btrfs_device
*dev
;
889 struct btrfs_fs_info
*fs_info
;
893 unsigned int failed_mirror_index
;
894 unsigned int is_metadata
;
895 unsigned int have_csum
;
897 struct scrub_block
*sblocks_for_recheck
; /* holds one for each mirror */
898 struct scrub_block
*sblock_bad
;
903 static DEFINE_RATELIMIT_STATE(_rs
, DEFAULT_RATELIMIT_INTERVAL
,
904 DEFAULT_RATELIMIT_BURST
);
906 BUG_ON(sblock_to_check
->page_count
< 1);
907 fs_info
= sctx
->dev_root
->fs_info
;
908 if (sblock_to_check
->pagev
[0]->flags
& BTRFS_EXTENT_FLAG_SUPER
) {
910 * if we find an error in a super block, we just report it.
911 * They will get written with the next transaction commit
914 spin_lock(&sctx
->stat_lock
);
915 ++sctx
->stat
.super_errors
;
916 spin_unlock(&sctx
->stat_lock
);
919 length
= sblock_to_check
->page_count
* PAGE_SIZE
;
920 logical
= sblock_to_check
->pagev
[0]->logical
;
921 generation
= sblock_to_check
->pagev
[0]->generation
;
922 BUG_ON(sblock_to_check
->pagev
[0]->mirror_num
< 1);
923 failed_mirror_index
= sblock_to_check
->pagev
[0]->mirror_num
- 1;
924 is_metadata
= !(sblock_to_check
->pagev
[0]->flags
&
925 BTRFS_EXTENT_FLAG_DATA
);
926 have_csum
= sblock_to_check
->pagev
[0]->have_csum
;
927 csum
= sblock_to_check
->pagev
[0]->csum
;
928 dev
= sblock_to_check
->pagev
[0]->dev
;
930 if (sctx
->is_dev_replace
&& !is_metadata
&& !have_csum
) {
931 sblocks_for_recheck
= NULL
;
936 * read all mirrors one after the other. This includes to
937 * re-read the extent or metadata block that failed (that was
938 * the cause that this fixup code is called) another time,
939 * page by page this time in order to know which pages
940 * caused I/O errors and which ones are good (for all mirrors).
941 * It is the goal to handle the situation when more than one
942 * mirror contains I/O errors, but the errors do not
943 * overlap, i.e. the data can be repaired by selecting the
944 * pages from those mirrors without I/O error on the
945 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
946 * would be that mirror #1 has an I/O error on the first page,
947 * the second page is good, and mirror #2 has an I/O error on
948 * the second page, but the first page is good.
949 * Then the first page of the first mirror can be repaired by
950 * taking the first page of the second mirror, and the
951 * second page of the second mirror can be repaired by
952 * copying the contents of the 2nd page of the 1st mirror.
953 * One more note: if the pages of one mirror contain I/O
954 * errors, the checksum cannot be verified. In order to get
955 * the best data for repairing, the first attempt is to find
956 * a mirror without I/O errors and with a validated checksum.
957 * Only if this is not possible, the pages are picked from
958 * mirrors with I/O errors without considering the checksum.
959 * If the latter is the case, at the end, the checksum of the
960 * repaired area is verified in order to correctly maintain
964 sblocks_for_recheck
= kcalloc(BTRFS_MAX_MIRRORS
,
965 sizeof(*sblocks_for_recheck
), GFP_NOFS
);
966 if (!sblocks_for_recheck
) {
967 spin_lock(&sctx
->stat_lock
);
968 sctx
->stat
.malloc_errors
++;
969 sctx
->stat
.read_errors
++;
970 sctx
->stat
.uncorrectable_errors
++;
971 spin_unlock(&sctx
->stat_lock
);
972 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
976 /* setup the context, map the logical blocks and alloc the pages */
977 ret
= scrub_setup_recheck_block(sblock_to_check
, sblocks_for_recheck
);
979 spin_lock(&sctx
->stat_lock
);
980 sctx
->stat
.read_errors
++;
981 sctx
->stat
.uncorrectable_errors
++;
982 spin_unlock(&sctx
->stat_lock
);
983 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
986 BUG_ON(failed_mirror_index
>= BTRFS_MAX_MIRRORS
);
987 sblock_bad
= sblocks_for_recheck
+ failed_mirror_index
;
989 /* build and submit the bios for the failed mirror, check checksums */
990 scrub_recheck_block(fs_info
, sblock_bad
, is_metadata
, have_csum
,
991 csum
, generation
, sctx
->csum_size
, 1);
993 if (!sblock_bad
->header_error
&& !sblock_bad
->checksum_error
&&
994 sblock_bad
->no_io_error_seen
) {
996 * the error disappeared after reading page by page, or
997 * the area was part of a huge bio and other parts of the
998 * bio caused I/O errors, or the block layer merged several
999 * read requests into one and the error is caused by a
1000 * different bio (usually one of the two latter cases is
1003 spin_lock(&sctx
->stat_lock
);
1004 sctx
->stat
.unverified_errors
++;
1005 sblock_to_check
->data_corrected
= 1;
1006 spin_unlock(&sctx
->stat_lock
);
1008 if (sctx
->is_dev_replace
)
1009 scrub_write_block_to_dev_replace(sblock_bad
);
1013 if (!sblock_bad
->no_io_error_seen
) {
1014 spin_lock(&sctx
->stat_lock
);
1015 sctx
->stat
.read_errors
++;
1016 spin_unlock(&sctx
->stat_lock
);
1017 if (__ratelimit(&_rs
))
1018 scrub_print_warning("i/o error", sblock_to_check
);
1019 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
1020 } else if (sblock_bad
->checksum_error
) {
1021 spin_lock(&sctx
->stat_lock
);
1022 sctx
->stat
.csum_errors
++;
1023 spin_unlock(&sctx
->stat_lock
);
1024 if (__ratelimit(&_rs
))
1025 scrub_print_warning("checksum error", sblock_to_check
);
1026 btrfs_dev_stat_inc_and_print(dev
,
1027 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
1028 } else if (sblock_bad
->header_error
) {
1029 spin_lock(&sctx
->stat_lock
);
1030 sctx
->stat
.verify_errors
++;
1031 spin_unlock(&sctx
->stat_lock
);
1032 if (__ratelimit(&_rs
))
1033 scrub_print_warning("checksum/header error",
1035 if (sblock_bad
->generation_error
)
1036 btrfs_dev_stat_inc_and_print(dev
,
1037 BTRFS_DEV_STAT_GENERATION_ERRS
);
1039 btrfs_dev_stat_inc_and_print(dev
,
1040 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
1043 if (sctx
->readonly
) {
1044 ASSERT(!sctx
->is_dev_replace
);
1048 if (!is_metadata
&& !have_csum
) {
1049 struct scrub_fixup_nodatasum
*fixup_nodatasum
;
1051 WARN_ON(sctx
->is_dev_replace
);
1056 * !is_metadata and !have_csum, this means that the data
1057 * might not be COW'ed, that it might be modified
1058 * concurrently. The general strategy to work on the
1059 * commit root does not help in the case when COW is not
1062 fixup_nodatasum
= kzalloc(sizeof(*fixup_nodatasum
), GFP_NOFS
);
1063 if (!fixup_nodatasum
)
1064 goto did_not_correct_error
;
1065 fixup_nodatasum
->sctx
= sctx
;
1066 fixup_nodatasum
->dev
= dev
;
1067 fixup_nodatasum
->logical
= logical
;
1068 fixup_nodatasum
->root
= fs_info
->extent_root
;
1069 fixup_nodatasum
->mirror_num
= failed_mirror_index
+ 1;
1070 scrub_pending_trans_workers_inc(sctx
);
1071 btrfs_init_work(&fixup_nodatasum
->work
, btrfs_scrub_helper
,
1072 scrub_fixup_nodatasum
, NULL
, NULL
);
1073 btrfs_queue_work(fs_info
->scrub_workers
,
1074 &fixup_nodatasum
->work
);
1079 * now build and submit the bios for the other mirrors, check
1081 * First try to pick the mirror which is completely without I/O
1082 * errors and also does not have a checksum error.
1083 * If one is found, and if a checksum is present, the full block
1084 * that is known to contain an error is rewritten. Afterwards
1085 * the block is known to be corrected.
1086 * If a mirror is found which is completely correct, and no
1087 * checksum is present, only those pages are rewritten that had
1088 * an I/O error in the block to be repaired, since it cannot be
1089 * determined, which copy of the other pages is better (and it
1090 * could happen otherwise that a correct page would be
1091 * overwritten by a bad one).
1093 for (mirror_index
= 0;
1094 mirror_index
< BTRFS_MAX_MIRRORS
&&
1095 sblocks_for_recheck
[mirror_index
].page_count
> 0;
1097 struct scrub_block
*sblock_other
;
1099 if (mirror_index
== failed_mirror_index
)
1101 sblock_other
= sblocks_for_recheck
+ mirror_index
;
1103 /* build and submit the bios, check checksums */
1104 scrub_recheck_block(fs_info
, sblock_other
, is_metadata
,
1105 have_csum
, csum
, generation
,
1106 sctx
->csum_size
, 0);
1108 if (!sblock_other
->header_error
&&
1109 !sblock_other
->checksum_error
&&
1110 sblock_other
->no_io_error_seen
) {
1111 if (sctx
->is_dev_replace
) {
1112 scrub_write_block_to_dev_replace(sblock_other
);
1113 goto corrected_error
;
1115 ret
= scrub_repair_block_from_good_copy(
1116 sblock_bad
, sblock_other
);
1118 goto corrected_error
;
1123 if (sblock_bad
->no_io_error_seen
&& !sctx
->is_dev_replace
)
1124 goto did_not_correct_error
;
1127 * In case of I/O errors in the area that is supposed to be
1128 * repaired, continue by picking good copies of those pages.
1129 * Select the good pages from mirrors to rewrite bad pages from
1130 * the area to fix. Afterwards verify the checksum of the block
1131 * that is supposed to be repaired. This verification step is
1132 * only done for the purpose of statistic counting and for the
1133 * final scrub report, whether errors remain.
1134 * A perfect algorithm could make use of the checksum and try
1135 * all possible combinations of pages from the different mirrors
1136 * until the checksum verification succeeds. For example, when
1137 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1138 * of mirror #2 is readable but the final checksum test fails,
1139 * then the 2nd page of mirror #3 could be tried, whether now
1140 * the final checksum succeedes. But this would be a rare
1141 * exception and is therefore not implemented. At least it is
1142 * avoided that the good copy is overwritten.
1143 * A more useful improvement would be to pick the sectors
1144 * without I/O error based on sector sizes (512 bytes on legacy
1145 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1146 * mirror could be repaired by taking 512 byte of a different
1147 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1148 * area are unreadable.
1151 for (page_num
= 0; page_num
< sblock_bad
->page_count
;
1153 struct scrub_page
*page_bad
= sblock_bad
->pagev
[page_num
];
1154 struct scrub_block
*sblock_other
= NULL
;
1156 /* skip no-io-error page in scrub */
1157 if (!page_bad
->io_error
&& !sctx
->is_dev_replace
)
1160 /* try to find no-io-error page in mirrors */
1161 if (page_bad
->io_error
) {
1162 for (mirror_index
= 0;
1163 mirror_index
< BTRFS_MAX_MIRRORS
&&
1164 sblocks_for_recheck
[mirror_index
].page_count
> 0;
1166 if (!sblocks_for_recheck
[mirror_index
].
1167 pagev
[page_num
]->io_error
) {
1168 sblock_other
= sblocks_for_recheck
+
1177 if (sctx
->is_dev_replace
) {
1179 * did not find a mirror to fetch the page
1180 * from. scrub_write_page_to_dev_replace()
1181 * handles this case (page->io_error), by
1182 * filling the block with zeros before
1183 * submitting the write request
1186 sblock_other
= sblock_bad
;
1188 if (scrub_write_page_to_dev_replace(sblock_other
,
1190 btrfs_dev_replace_stats_inc(
1192 fs_info
->dev_replace
.
1196 } else if (sblock_other
) {
1197 ret
= scrub_repair_page_from_good_copy(sblock_bad
,
1201 page_bad
->io_error
= 0;
1207 if (success
&& !sctx
->is_dev_replace
) {
1208 if (is_metadata
|| have_csum
) {
1210 * need to verify the checksum now that all
1211 * sectors on disk are repaired (the write
1212 * request for data to be repaired is on its way).
1213 * Just be lazy and use scrub_recheck_block()
1214 * which re-reads the data before the checksum
1215 * is verified, but most likely the data comes out
1216 * of the page cache.
1218 scrub_recheck_block(fs_info
, sblock_bad
,
1219 is_metadata
, have_csum
, csum
,
1220 generation
, sctx
->csum_size
, 1);
1221 if (!sblock_bad
->header_error
&&
1222 !sblock_bad
->checksum_error
&&
1223 sblock_bad
->no_io_error_seen
)
1224 goto corrected_error
;
1226 goto did_not_correct_error
;
1229 spin_lock(&sctx
->stat_lock
);
1230 sctx
->stat
.corrected_errors
++;
1231 sblock_to_check
->data_corrected
= 1;
1232 spin_unlock(&sctx
->stat_lock
);
1233 printk_ratelimited_in_rcu(KERN_ERR
1234 "BTRFS: fixed up error at logical %llu on dev %s\n",
1235 logical
, rcu_str_deref(dev
->name
));
1238 did_not_correct_error
:
1239 spin_lock(&sctx
->stat_lock
);
1240 sctx
->stat
.uncorrectable_errors
++;
1241 spin_unlock(&sctx
->stat_lock
);
1242 printk_ratelimited_in_rcu(KERN_ERR
1243 "BTRFS: unable to fixup (regular) error at logical %llu on dev %s\n",
1244 logical
, rcu_str_deref(dev
->name
));
1248 if (sblocks_for_recheck
) {
1249 for (mirror_index
= 0; mirror_index
< BTRFS_MAX_MIRRORS
;
1251 struct scrub_block
*sblock
= sblocks_for_recheck
+
1253 struct scrub_recover
*recover
;
1256 for (page_index
= 0; page_index
< sblock
->page_count
;
1258 sblock
->pagev
[page_index
]->sblock
= NULL
;
1259 recover
= sblock
->pagev
[page_index
]->recover
;
1261 scrub_put_recover(recover
);
1262 sblock
->pagev
[page_index
]->recover
=
1265 scrub_page_put(sblock
->pagev
[page_index
]);
1268 kfree(sblocks_for_recheck
);
1274 static inline int scrub_nr_raid_mirrors(struct btrfs_bio
*bbio
)
1276 if (bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID5
)
1278 else if (bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID6
)
1281 return (int)bbio
->num_stripes
;
1284 static inline void scrub_stripe_index_and_offset(u64 logical
, u64 map_type
,
1287 int nstripes
, int mirror
,
1293 if (map_type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
1295 for (i
= 0; i
< nstripes
; i
++) {
1296 if (raid_map
[i
] == RAID6_Q_STRIPE
||
1297 raid_map
[i
] == RAID5_P_STRIPE
)
1300 if (logical
>= raid_map
[i
] &&
1301 logical
< raid_map
[i
] + mapped_length
)
1306 *stripe_offset
= logical
- raid_map
[i
];
1308 /* The other RAID type */
1309 *stripe_index
= mirror
;
1314 static int scrub_setup_recheck_block(struct scrub_block
*original_sblock
,
1315 struct scrub_block
*sblocks_for_recheck
)
1317 struct scrub_ctx
*sctx
= original_sblock
->sctx
;
1318 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
1319 u64 length
= original_sblock
->page_count
* PAGE_SIZE
;
1320 u64 logical
= original_sblock
->pagev
[0]->logical
;
1321 struct scrub_recover
*recover
;
1322 struct btrfs_bio
*bbio
;
1333 * note: the two members refs and outstanding_pages
1334 * are not used (and not set) in the blocks that are used for
1335 * the recheck procedure
1338 while (length
> 0) {
1339 sublen
= min_t(u64
, length
, PAGE_SIZE
);
1340 mapped_length
= sublen
;
1344 * with a length of PAGE_SIZE, each returned stripe
1345 * represents one mirror
1347 ret
= btrfs_map_sblock(fs_info
, REQ_GET_READ_MIRRORS
, logical
,
1348 &mapped_length
, &bbio
, 0, 1);
1349 if (ret
|| !bbio
|| mapped_length
< sublen
) {
1350 btrfs_put_bbio(bbio
);
1354 recover
= kzalloc(sizeof(struct scrub_recover
), GFP_NOFS
);
1356 btrfs_put_bbio(bbio
);
1360 atomic_set(&recover
->refs
, 1);
1361 recover
->bbio
= bbio
;
1362 recover
->map_length
= mapped_length
;
1364 BUG_ON(page_index
>= SCRUB_PAGES_PER_RD_BIO
);
1366 nmirrors
= min(scrub_nr_raid_mirrors(bbio
), BTRFS_MAX_MIRRORS
);
1368 for (mirror_index
= 0; mirror_index
< nmirrors
;
1370 struct scrub_block
*sblock
;
1371 struct scrub_page
*page
;
1373 sblock
= sblocks_for_recheck
+ mirror_index
;
1374 sblock
->sctx
= sctx
;
1375 page
= kzalloc(sizeof(*page
), GFP_NOFS
);
1378 spin_lock(&sctx
->stat_lock
);
1379 sctx
->stat
.malloc_errors
++;
1380 spin_unlock(&sctx
->stat_lock
);
1381 scrub_put_recover(recover
);
1384 scrub_page_get(page
);
1385 sblock
->pagev
[page_index
] = page
;
1386 page
->logical
= logical
;
1388 scrub_stripe_index_and_offset(logical
,
1397 page
->physical
= bbio
->stripes
[stripe_index
].physical
+
1399 page
->dev
= bbio
->stripes
[stripe_index
].dev
;
1401 BUG_ON(page_index
>= original_sblock
->page_count
);
1402 page
->physical_for_dev_replace
=
1403 original_sblock
->pagev
[page_index
]->
1404 physical_for_dev_replace
;
1405 /* for missing devices, dev->bdev is NULL */
1406 page
->mirror_num
= mirror_index
+ 1;
1407 sblock
->page_count
++;
1408 page
->page
= alloc_page(GFP_NOFS
);
1412 scrub_get_recover(recover
);
1413 page
->recover
= recover
;
1415 scrub_put_recover(recover
);
1424 struct scrub_bio_ret
{
1425 struct completion event
;
1429 static void scrub_bio_wait_endio(struct bio
*bio
)
1431 struct scrub_bio_ret
*ret
= bio
->bi_private
;
1433 ret
->error
= bio
->bi_error
;
1434 complete(&ret
->event
);
1437 static inline int scrub_is_page_on_raid56(struct scrub_page
*page
)
1439 return page
->recover
&&
1440 (page
->recover
->bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID56_MASK
);
1443 static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info
*fs_info
,
1445 struct scrub_page
*page
)
1447 struct scrub_bio_ret done
;
1450 init_completion(&done
.event
);
1452 bio
->bi_iter
.bi_sector
= page
->logical
>> 9;
1453 bio
->bi_private
= &done
;
1454 bio
->bi_end_io
= scrub_bio_wait_endio
;
1456 ret
= raid56_parity_recover(fs_info
->fs_root
, bio
, page
->recover
->bbio
,
1457 page
->recover
->map_length
,
1458 page
->mirror_num
, 0);
1462 wait_for_completion(&done
.event
);
1470 * this function will check the on disk data for checksum errors, header
1471 * errors and read I/O errors. If any I/O errors happen, the exact pages
1472 * which are errored are marked as being bad. The goal is to enable scrub
1473 * to take those pages that are not errored from all the mirrors so that
1474 * the pages that are errored in the just handled mirror can be repaired.
1476 static void scrub_recheck_block(struct btrfs_fs_info
*fs_info
,
1477 struct scrub_block
*sblock
, int is_metadata
,
1478 int have_csum
, u8
*csum
, u64 generation
,
1479 u16 csum_size
, int retry_failed_mirror
)
1483 sblock
->no_io_error_seen
= 1;
1484 sblock
->header_error
= 0;
1485 sblock
->checksum_error
= 0;
1487 for (page_num
= 0; page_num
< sblock
->page_count
; page_num
++) {
1489 struct scrub_page
*page
= sblock
->pagev
[page_num
];
1491 if (page
->dev
->bdev
== NULL
) {
1493 sblock
->no_io_error_seen
= 0;
1497 WARN_ON(!page
->page
);
1498 bio
= btrfs_io_bio_alloc(GFP_NOFS
, 1);
1501 sblock
->no_io_error_seen
= 0;
1504 bio
->bi_bdev
= page
->dev
->bdev
;
1506 bio_add_page(bio
, page
->page
, PAGE_SIZE
, 0);
1507 if (!retry_failed_mirror
&& scrub_is_page_on_raid56(page
)) {
1508 if (scrub_submit_raid56_bio_wait(fs_info
, bio
, page
))
1509 sblock
->no_io_error_seen
= 0;
1511 bio
->bi_iter
.bi_sector
= page
->physical
>> 9;
1513 if (btrfsic_submit_bio_wait(READ
, bio
))
1514 sblock
->no_io_error_seen
= 0;
1520 if (sblock
->no_io_error_seen
)
1521 scrub_recheck_block_checksum(fs_info
, sblock
, is_metadata
,
1522 have_csum
, csum
, generation
,
1528 static inline int scrub_check_fsid(u8 fsid
[],
1529 struct scrub_page
*spage
)
1531 struct btrfs_fs_devices
*fs_devices
= spage
->dev
->fs_devices
;
1534 ret
= memcmp(fsid
, fs_devices
->fsid
, BTRFS_UUID_SIZE
);
1538 static void scrub_recheck_block_checksum(struct btrfs_fs_info
*fs_info
,
1539 struct scrub_block
*sblock
,
1540 int is_metadata
, int have_csum
,
1541 const u8
*csum
, u64 generation
,
1545 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1547 void *mapped_buffer
;
1549 WARN_ON(!sblock
->pagev
[0]->page
);
1551 struct btrfs_header
*h
;
1553 mapped_buffer
= kmap_atomic(sblock
->pagev
[0]->page
);
1554 h
= (struct btrfs_header
*)mapped_buffer
;
1556 if (sblock
->pagev
[0]->logical
!= btrfs_stack_header_bytenr(h
) ||
1557 !scrub_check_fsid(h
->fsid
, sblock
->pagev
[0]) ||
1558 memcmp(h
->chunk_tree_uuid
, fs_info
->chunk_tree_uuid
,
1560 sblock
->header_error
= 1;
1561 } else if (generation
!= btrfs_stack_header_generation(h
)) {
1562 sblock
->header_error
= 1;
1563 sblock
->generation_error
= 1;
1570 mapped_buffer
= kmap_atomic(sblock
->pagev
[0]->page
);
1573 for (page_num
= 0;;) {
1574 if (page_num
== 0 && is_metadata
)
1575 crc
= btrfs_csum_data(
1576 ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
,
1577 crc
, PAGE_SIZE
- BTRFS_CSUM_SIZE
);
1579 crc
= btrfs_csum_data(mapped_buffer
, crc
, PAGE_SIZE
);
1581 kunmap_atomic(mapped_buffer
);
1583 if (page_num
>= sblock
->page_count
)
1585 WARN_ON(!sblock
->pagev
[page_num
]->page
);
1587 mapped_buffer
= kmap_atomic(sblock
->pagev
[page_num
]->page
);
1590 btrfs_csum_final(crc
, calculated_csum
);
1591 if (memcmp(calculated_csum
, csum
, csum_size
))
1592 sblock
->checksum_error
= 1;
1595 static int scrub_repair_block_from_good_copy(struct scrub_block
*sblock_bad
,
1596 struct scrub_block
*sblock_good
)
1601 for (page_num
= 0; page_num
< sblock_bad
->page_count
; page_num
++) {
1604 ret_sub
= scrub_repair_page_from_good_copy(sblock_bad
,
1614 static int scrub_repair_page_from_good_copy(struct scrub_block
*sblock_bad
,
1615 struct scrub_block
*sblock_good
,
1616 int page_num
, int force_write
)
1618 struct scrub_page
*page_bad
= sblock_bad
->pagev
[page_num
];
1619 struct scrub_page
*page_good
= sblock_good
->pagev
[page_num
];
1621 BUG_ON(page_bad
->page
== NULL
);
1622 BUG_ON(page_good
->page
== NULL
);
1623 if (force_write
|| sblock_bad
->header_error
||
1624 sblock_bad
->checksum_error
|| page_bad
->io_error
) {
1628 if (!page_bad
->dev
->bdev
) {
1629 printk_ratelimited(KERN_WARNING
"BTRFS: "
1630 "scrub_repair_page_from_good_copy(bdev == NULL) "
1631 "is unexpected!\n");
1635 bio
= btrfs_io_bio_alloc(GFP_NOFS
, 1);
1638 bio
->bi_bdev
= page_bad
->dev
->bdev
;
1639 bio
->bi_iter
.bi_sector
= page_bad
->physical
>> 9;
1641 ret
= bio_add_page(bio
, page_good
->page
, PAGE_SIZE
, 0);
1642 if (PAGE_SIZE
!= ret
) {
1647 if (btrfsic_submit_bio_wait(WRITE
, bio
)) {
1648 btrfs_dev_stat_inc_and_print(page_bad
->dev
,
1649 BTRFS_DEV_STAT_WRITE_ERRS
);
1650 btrfs_dev_replace_stats_inc(
1651 &sblock_bad
->sctx
->dev_root
->fs_info
->
1652 dev_replace
.num_write_errors
);
1662 static void scrub_write_block_to_dev_replace(struct scrub_block
*sblock
)
1667 * This block is used for the check of the parity on the source device,
1668 * so the data needn't be written into the destination device.
1670 if (sblock
->sparity
)
1673 for (page_num
= 0; page_num
< sblock
->page_count
; page_num
++) {
1676 ret
= scrub_write_page_to_dev_replace(sblock
, page_num
);
1678 btrfs_dev_replace_stats_inc(
1679 &sblock
->sctx
->dev_root
->fs_info
->dev_replace
.
1684 static int scrub_write_page_to_dev_replace(struct scrub_block
*sblock
,
1687 struct scrub_page
*spage
= sblock
->pagev
[page_num
];
1689 BUG_ON(spage
->page
== NULL
);
1690 if (spage
->io_error
) {
1691 void *mapped_buffer
= kmap_atomic(spage
->page
);
1693 memset(mapped_buffer
, 0, PAGE_CACHE_SIZE
);
1694 flush_dcache_page(spage
->page
);
1695 kunmap_atomic(mapped_buffer
);
1697 return scrub_add_page_to_wr_bio(sblock
->sctx
, spage
);
1700 static int scrub_add_page_to_wr_bio(struct scrub_ctx
*sctx
,
1701 struct scrub_page
*spage
)
1703 struct scrub_wr_ctx
*wr_ctx
= &sctx
->wr_ctx
;
1704 struct scrub_bio
*sbio
;
1707 mutex_lock(&wr_ctx
->wr_lock
);
1709 if (!wr_ctx
->wr_curr_bio
) {
1710 wr_ctx
->wr_curr_bio
= kzalloc(sizeof(*wr_ctx
->wr_curr_bio
),
1712 if (!wr_ctx
->wr_curr_bio
) {
1713 mutex_unlock(&wr_ctx
->wr_lock
);
1716 wr_ctx
->wr_curr_bio
->sctx
= sctx
;
1717 wr_ctx
->wr_curr_bio
->page_count
= 0;
1719 sbio
= wr_ctx
->wr_curr_bio
;
1720 if (sbio
->page_count
== 0) {
1723 sbio
->physical
= spage
->physical_for_dev_replace
;
1724 sbio
->logical
= spage
->logical
;
1725 sbio
->dev
= wr_ctx
->tgtdev
;
1728 bio
= btrfs_io_bio_alloc(GFP_NOFS
, wr_ctx
->pages_per_wr_bio
);
1730 mutex_unlock(&wr_ctx
->wr_lock
);
1736 bio
->bi_private
= sbio
;
1737 bio
->bi_end_io
= scrub_wr_bio_end_io
;
1738 bio
->bi_bdev
= sbio
->dev
->bdev
;
1739 bio
->bi_iter
.bi_sector
= sbio
->physical
>> 9;
1741 } else if (sbio
->physical
+ sbio
->page_count
* PAGE_SIZE
!=
1742 spage
->physical_for_dev_replace
||
1743 sbio
->logical
+ sbio
->page_count
* PAGE_SIZE
!=
1745 scrub_wr_submit(sctx
);
1749 ret
= bio_add_page(sbio
->bio
, spage
->page
, PAGE_SIZE
, 0);
1750 if (ret
!= PAGE_SIZE
) {
1751 if (sbio
->page_count
< 1) {
1754 mutex_unlock(&wr_ctx
->wr_lock
);
1757 scrub_wr_submit(sctx
);
1761 sbio
->pagev
[sbio
->page_count
] = spage
;
1762 scrub_page_get(spage
);
1764 if (sbio
->page_count
== wr_ctx
->pages_per_wr_bio
)
1765 scrub_wr_submit(sctx
);
1766 mutex_unlock(&wr_ctx
->wr_lock
);
1771 static void scrub_wr_submit(struct scrub_ctx
*sctx
)
1773 struct scrub_wr_ctx
*wr_ctx
= &sctx
->wr_ctx
;
1774 struct scrub_bio
*sbio
;
1776 if (!wr_ctx
->wr_curr_bio
)
1779 sbio
= wr_ctx
->wr_curr_bio
;
1780 wr_ctx
->wr_curr_bio
= NULL
;
1781 WARN_ON(!sbio
->bio
->bi_bdev
);
1782 scrub_pending_bio_inc(sctx
);
1783 /* process all writes in a single worker thread. Then the block layer
1784 * orders the requests before sending them to the driver which
1785 * doubled the write performance on spinning disks when measured
1787 btrfsic_submit_bio(WRITE
, sbio
->bio
);
1790 static void scrub_wr_bio_end_io(struct bio
*bio
)
1792 struct scrub_bio
*sbio
= bio
->bi_private
;
1793 struct btrfs_fs_info
*fs_info
= sbio
->dev
->dev_root
->fs_info
;
1795 sbio
->err
= bio
->bi_error
;
1798 btrfs_init_work(&sbio
->work
, btrfs_scrubwrc_helper
,
1799 scrub_wr_bio_end_io_worker
, NULL
, NULL
);
1800 btrfs_queue_work(fs_info
->scrub_wr_completion_workers
, &sbio
->work
);
1803 static void scrub_wr_bio_end_io_worker(struct btrfs_work
*work
)
1805 struct scrub_bio
*sbio
= container_of(work
, struct scrub_bio
, work
);
1806 struct scrub_ctx
*sctx
= sbio
->sctx
;
1809 WARN_ON(sbio
->page_count
> SCRUB_PAGES_PER_WR_BIO
);
1811 struct btrfs_dev_replace
*dev_replace
=
1812 &sbio
->sctx
->dev_root
->fs_info
->dev_replace
;
1814 for (i
= 0; i
< sbio
->page_count
; i
++) {
1815 struct scrub_page
*spage
= sbio
->pagev
[i
];
1817 spage
->io_error
= 1;
1818 btrfs_dev_replace_stats_inc(&dev_replace
->
1823 for (i
= 0; i
< sbio
->page_count
; i
++)
1824 scrub_page_put(sbio
->pagev
[i
]);
1828 scrub_pending_bio_dec(sctx
);
1831 static int scrub_checksum(struct scrub_block
*sblock
)
1836 WARN_ON(sblock
->page_count
< 1);
1837 flags
= sblock
->pagev
[0]->flags
;
1839 if (flags
& BTRFS_EXTENT_FLAG_DATA
)
1840 ret
= scrub_checksum_data(sblock
);
1841 else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)
1842 ret
= scrub_checksum_tree_block(sblock
);
1843 else if (flags
& BTRFS_EXTENT_FLAG_SUPER
)
1844 (void)scrub_checksum_super(sblock
);
1848 scrub_handle_errored_block(sblock
);
1853 static int scrub_checksum_data(struct scrub_block
*sblock
)
1855 struct scrub_ctx
*sctx
= sblock
->sctx
;
1856 u8 csum
[BTRFS_CSUM_SIZE
];
1865 BUG_ON(sblock
->page_count
< 1);
1866 if (!sblock
->pagev
[0]->have_csum
)
1869 on_disk_csum
= sblock
->pagev
[0]->csum
;
1870 page
= sblock
->pagev
[0]->page
;
1871 buffer
= kmap_atomic(page
);
1873 len
= sctx
->sectorsize
;
1876 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
1878 crc
= btrfs_csum_data(buffer
, crc
, l
);
1879 kunmap_atomic(buffer
);
1884 BUG_ON(index
>= sblock
->page_count
);
1885 BUG_ON(!sblock
->pagev
[index
]->page
);
1886 page
= sblock
->pagev
[index
]->page
;
1887 buffer
= kmap_atomic(page
);
1890 btrfs_csum_final(crc
, csum
);
1891 if (memcmp(csum
, on_disk_csum
, sctx
->csum_size
))
1897 static int scrub_checksum_tree_block(struct scrub_block
*sblock
)
1899 struct scrub_ctx
*sctx
= sblock
->sctx
;
1900 struct btrfs_header
*h
;
1901 struct btrfs_root
*root
= sctx
->dev_root
;
1902 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1903 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1904 u8 on_disk_csum
[BTRFS_CSUM_SIZE
];
1906 void *mapped_buffer
;
1915 BUG_ON(sblock
->page_count
< 1);
1916 page
= sblock
->pagev
[0]->page
;
1917 mapped_buffer
= kmap_atomic(page
);
1918 h
= (struct btrfs_header
*)mapped_buffer
;
1919 memcpy(on_disk_csum
, h
->csum
, sctx
->csum_size
);
1922 * we don't use the getter functions here, as we
1923 * a) don't have an extent buffer and
1924 * b) the page is already kmapped
1927 if (sblock
->pagev
[0]->logical
!= btrfs_stack_header_bytenr(h
))
1930 if (sblock
->pagev
[0]->generation
!= btrfs_stack_header_generation(h
))
1933 if (!scrub_check_fsid(h
->fsid
, sblock
->pagev
[0]))
1936 if (memcmp(h
->chunk_tree_uuid
, fs_info
->chunk_tree_uuid
,
1940 len
= sctx
->nodesize
- BTRFS_CSUM_SIZE
;
1941 mapped_size
= PAGE_SIZE
- BTRFS_CSUM_SIZE
;
1942 p
= ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
;
1945 u64 l
= min_t(u64
, len
, mapped_size
);
1947 crc
= btrfs_csum_data(p
, crc
, l
);
1948 kunmap_atomic(mapped_buffer
);
1953 BUG_ON(index
>= sblock
->page_count
);
1954 BUG_ON(!sblock
->pagev
[index
]->page
);
1955 page
= sblock
->pagev
[index
]->page
;
1956 mapped_buffer
= kmap_atomic(page
);
1957 mapped_size
= PAGE_SIZE
;
1961 btrfs_csum_final(crc
, calculated_csum
);
1962 if (memcmp(calculated_csum
, on_disk_csum
, sctx
->csum_size
))
1965 return fail
|| crc_fail
;
1968 static int scrub_checksum_super(struct scrub_block
*sblock
)
1970 struct btrfs_super_block
*s
;
1971 struct scrub_ctx
*sctx
= sblock
->sctx
;
1972 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1973 u8 on_disk_csum
[BTRFS_CSUM_SIZE
];
1975 void *mapped_buffer
;
1984 BUG_ON(sblock
->page_count
< 1);
1985 page
= sblock
->pagev
[0]->page
;
1986 mapped_buffer
= kmap_atomic(page
);
1987 s
= (struct btrfs_super_block
*)mapped_buffer
;
1988 memcpy(on_disk_csum
, s
->csum
, sctx
->csum_size
);
1990 if (sblock
->pagev
[0]->logical
!= btrfs_super_bytenr(s
))
1993 if (sblock
->pagev
[0]->generation
!= btrfs_super_generation(s
))
1996 if (!scrub_check_fsid(s
->fsid
, sblock
->pagev
[0]))
1999 len
= BTRFS_SUPER_INFO_SIZE
- BTRFS_CSUM_SIZE
;
2000 mapped_size
= PAGE_SIZE
- BTRFS_CSUM_SIZE
;
2001 p
= ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
;
2004 u64 l
= min_t(u64
, len
, mapped_size
);
2006 crc
= btrfs_csum_data(p
, crc
, l
);
2007 kunmap_atomic(mapped_buffer
);
2012 BUG_ON(index
>= sblock
->page_count
);
2013 BUG_ON(!sblock
->pagev
[index
]->page
);
2014 page
= sblock
->pagev
[index
]->page
;
2015 mapped_buffer
= kmap_atomic(page
);
2016 mapped_size
= PAGE_SIZE
;
2020 btrfs_csum_final(crc
, calculated_csum
);
2021 if (memcmp(calculated_csum
, on_disk_csum
, sctx
->csum_size
))
2024 if (fail_cor
+ fail_gen
) {
2026 * if we find an error in a super block, we just report it.
2027 * They will get written with the next transaction commit
2030 spin_lock(&sctx
->stat_lock
);
2031 ++sctx
->stat
.super_errors
;
2032 spin_unlock(&sctx
->stat_lock
);
2034 btrfs_dev_stat_inc_and_print(sblock
->pagev
[0]->dev
,
2035 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
2037 btrfs_dev_stat_inc_and_print(sblock
->pagev
[0]->dev
,
2038 BTRFS_DEV_STAT_GENERATION_ERRS
);
2041 return fail_cor
+ fail_gen
;
2044 static void scrub_block_get(struct scrub_block
*sblock
)
2046 atomic_inc(&sblock
->refs
);
2049 static void scrub_block_put(struct scrub_block
*sblock
)
2051 if (atomic_dec_and_test(&sblock
->refs
)) {
2054 if (sblock
->sparity
)
2055 scrub_parity_put(sblock
->sparity
);
2057 for (i
= 0; i
< sblock
->page_count
; i
++)
2058 scrub_page_put(sblock
->pagev
[i
]);
2063 static void scrub_page_get(struct scrub_page
*spage
)
2065 atomic_inc(&spage
->refs
);
2068 static void scrub_page_put(struct scrub_page
*spage
)
2070 if (atomic_dec_and_test(&spage
->refs
)) {
2072 __free_page(spage
->page
);
2077 static void scrub_submit(struct scrub_ctx
*sctx
)
2079 struct scrub_bio
*sbio
;
2081 if (sctx
->curr
== -1)
2084 sbio
= sctx
->bios
[sctx
->curr
];
2086 scrub_pending_bio_inc(sctx
);
2087 btrfsic_submit_bio(READ
, sbio
->bio
);
2090 static int scrub_add_page_to_rd_bio(struct scrub_ctx
*sctx
,
2091 struct scrub_page
*spage
)
2093 struct scrub_block
*sblock
= spage
->sblock
;
2094 struct scrub_bio
*sbio
;
2099 * grab a fresh bio or wait for one to become available
2101 while (sctx
->curr
== -1) {
2102 spin_lock(&sctx
->list_lock
);
2103 sctx
->curr
= sctx
->first_free
;
2104 if (sctx
->curr
!= -1) {
2105 sctx
->first_free
= sctx
->bios
[sctx
->curr
]->next_free
;
2106 sctx
->bios
[sctx
->curr
]->next_free
= -1;
2107 sctx
->bios
[sctx
->curr
]->page_count
= 0;
2108 spin_unlock(&sctx
->list_lock
);
2110 spin_unlock(&sctx
->list_lock
);
2111 wait_event(sctx
->list_wait
, sctx
->first_free
!= -1);
2114 sbio
= sctx
->bios
[sctx
->curr
];
2115 if (sbio
->page_count
== 0) {
2118 sbio
->physical
= spage
->physical
;
2119 sbio
->logical
= spage
->logical
;
2120 sbio
->dev
= spage
->dev
;
2123 bio
= btrfs_io_bio_alloc(GFP_NOFS
, sctx
->pages_per_rd_bio
);
2129 bio
->bi_private
= sbio
;
2130 bio
->bi_end_io
= scrub_bio_end_io
;
2131 bio
->bi_bdev
= sbio
->dev
->bdev
;
2132 bio
->bi_iter
.bi_sector
= sbio
->physical
>> 9;
2134 } else if (sbio
->physical
+ sbio
->page_count
* PAGE_SIZE
!=
2136 sbio
->logical
+ sbio
->page_count
* PAGE_SIZE
!=
2138 sbio
->dev
!= spage
->dev
) {
2143 sbio
->pagev
[sbio
->page_count
] = spage
;
2144 ret
= bio_add_page(sbio
->bio
, spage
->page
, PAGE_SIZE
, 0);
2145 if (ret
!= PAGE_SIZE
) {
2146 if (sbio
->page_count
< 1) {
2155 scrub_block_get(sblock
); /* one for the page added to the bio */
2156 atomic_inc(&sblock
->outstanding_pages
);
2158 if (sbio
->page_count
== sctx
->pages_per_rd_bio
)
2164 static void scrub_missing_raid56_end_io(struct bio
*bio
)
2166 struct scrub_block
*sblock
= bio
->bi_private
;
2167 struct btrfs_fs_info
*fs_info
= sblock
->sctx
->dev_root
->fs_info
;
2170 sblock
->no_io_error_seen
= 0;
2172 btrfs_queue_work(fs_info
->scrub_workers
, &sblock
->work
);
2175 static void scrub_missing_raid56_worker(struct btrfs_work
*work
)
2177 struct scrub_block
*sblock
= container_of(work
, struct scrub_block
, work
);
2178 struct scrub_ctx
*sctx
= sblock
->sctx
;
2179 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
2180 unsigned int is_metadata
;
2181 unsigned int have_csum
;
2185 struct btrfs_device
*dev
;
2187 is_metadata
= !(sblock
->pagev
[0]->flags
& BTRFS_EXTENT_FLAG_DATA
);
2188 have_csum
= sblock
->pagev
[0]->have_csum
;
2189 csum
= sblock
->pagev
[0]->csum
;
2190 generation
= sblock
->pagev
[0]->generation
;
2191 logical
= sblock
->pagev
[0]->logical
;
2192 dev
= sblock
->pagev
[0]->dev
;
2194 if (sblock
->no_io_error_seen
) {
2195 scrub_recheck_block_checksum(fs_info
, sblock
, is_metadata
,
2196 have_csum
, csum
, generation
,
2200 if (!sblock
->no_io_error_seen
) {
2201 spin_lock(&sctx
->stat_lock
);
2202 sctx
->stat
.read_errors
++;
2203 spin_unlock(&sctx
->stat_lock
);
2204 printk_ratelimited_in_rcu(KERN_ERR
2205 "BTRFS: I/O error rebulding logical %llu for dev %s\n",
2206 logical
, rcu_str_deref(dev
->name
));
2207 } else if (sblock
->header_error
|| sblock
->checksum_error
) {
2208 spin_lock(&sctx
->stat_lock
);
2209 sctx
->stat
.uncorrectable_errors
++;
2210 spin_unlock(&sctx
->stat_lock
);
2211 printk_ratelimited_in_rcu(KERN_ERR
2212 "BTRFS: failed to rebuild valid logical %llu for dev %s\n",
2213 logical
, rcu_str_deref(dev
->name
));
2215 scrub_write_block_to_dev_replace(sblock
);
2218 scrub_block_put(sblock
);
2220 if (sctx
->is_dev_replace
&&
2221 atomic_read(&sctx
->wr_ctx
.flush_all_writes
)) {
2222 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
2223 scrub_wr_submit(sctx
);
2224 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
2227 scrub_pending_bio_dec(sctx
);
2230 static void scrub_missing_raid56_pages(struct scrub_block
*sblock
)
2232 struct scrub_ctx
*sctx
= sblock
->sctx
;
2233 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
2234 u64 length
= sblock
->page_count
* PAGE_SIZE
;
2235 u64 logical
= sblock
->pagev
[0]->logical
;
2236 struct btrfs_bio
*bbio
;
2238 struct btrfs_raid_bio
*rbio
;
2242 ret
= btrfs_map_sblock(fs_info
, REQ_GET_READ_MIRRORS
, logical
, &length
,
2244 if (ret
|| !bbio
|| !bbio
->raid_map
)
2247 if (WARN_ON(!sctx
->is_dev_replace
||
2248 !(bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID56_MASK
))) {
2250 * We shouldn't be scrubbing a missing device. Even for dev
2251 * replace, we should only get here for RAID 5/6. We either
2252 * managed to mount something with no mirrors remaining or
2253 * there's a bug in scrub_remap_extent()/btrfs_map_block().
2258 bio
= btrfs_io_bio_alloc(GFP_NOFS
, 0);
2262 bio
->bi_iter
.bi_sector
= logical
>> 9;
2263 bio
->bi_private
= sblock
;
2264 bio
->bi_end_io
= scrub_missing_raid56_end_io
;
2266 rbio
= raid56_alloc_missing_rbio(sctx
->dev_root
, bio
, bbio
, length
);
2270 for (i
= 0; i
< sblock
->page_count
; i
++) {
2271 struct scrub_page
*spage
= sblock
->pagev
[i
];
2273 raid56_add_scrub_pages(rbio
, spage
->page
, spage
->logical
);
2276 btrfs_init_work(&sblock
->work
, btrfs_scrub_helper
,
2277 scrub_missing_raid56_worker
, NULL
, NULL
);
2278 scrub_block_get(sblock
);
2279 scrub_pending_bio_inc(sctx
);
2280 raid56_submit_missing_rbio(rbio
);
2286 btrfs_put_bbio(bbio
);
2287 spin_lock(&sctx
->stat_lock
);
2288 sctx
->stat
.malloc_errors
++;
2289 spin_unlock(&sctx
->stat_lock
);
2292 static int scrub_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
2293 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
2294 u64 gen
, int mirror_num
, u8
*csum
, int force
,
2295 u64 physical_for_dev_replace
)
2297 struct scrub_block
*sblock
;
2300 sblock
= kzalloc(sizeof(*sblock
), GFP_NOFS
);
2302 spin_lock(&sctx
->stat_lock
);
2303 sctx
->stat
.malloc_errors
++;
2304 spin_unlock(&sctx
->stat_lock
);
2308 /* one ref inside this function, plus one for each page added to
2310 atomic_set(&sblock
->refs
, 1);
2311 sblock
->sctx
= sctx
;
2312 sblock
->no_io_error_seen
= 1;
2314 for (index
= 0; len
> 0; index
++) {
2315 struct scrub_page
*spage
;
2316 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
2318 spage
= kzalloc(sizeof(*spage
), GFP_NOFS
);
2321 spin_lock(&sctx
->stat_lock
);
2322 sctx
->stat
.malloc_errors
++;
2323 spin_unlock(&sctx
->stat_lock
);
2324 scrub_block_put(sblock
);
2327 BUG_ON(index
>= SCRUB_MAX_PAGES_PER_BLOCK
);
2328 scrub_page_get(spage
);
2329 sblock
->pagev
[index
] = spage
;
2330 spage
->sblock
= sblock
;
2332 spage
->flags
= flags
;
2333 spage
->generation
= gen
;
2334 spage
->logical
= logical
;
2335 spage
->physical
= physical
;
2336 spage
->physical_for_dev_replace
= physical_for_dev_replace
;
2337 spage
->mirror_num
= mirror_num
;
2339 spage
->have_csum
= 1;
2340 memcpy(spage
->csum
, csum
, sctx
->csum_size
);
2342 spage
->have_csum
= 0;
2344 sblock
->page_count
++;
2345 spage
->page
= alloc_page(GFP_NOFS
);
2351 physical_for_dev_replace
+= l
;
2354 WARN_ON(sblock
->page_count
== 0);
2357 * This case should only be hit for RAID 5/6 device replace. See
2358 * the comment in scrub_missing_raid56_pages() for details.
2360 scrub_missing_raid56_pages(sblock
);
2362 for (index
= 0; index
< sblock
->page_count
; index
++) {
2363 struct scrub_page
*spage
= sblock
->pagev
[index
];
2366 ret
= scrub_add_page_to_rd_bio(sctx
, spage
);
2368 scrub_block_put(sblock
);
2377 /* last one frees, either here or in bio completion for last page */
2378 scrub_block_put(sblock
);
2382 static void scrub_bio_end_io(struct bio
*bio
)
2384 struct scrub_bio
*sbio
= bio
->bi_private
;
2385 struct btrfs_fs_info
*fs_info
= sbio
->dev
->dev_root
->fs_info
;
2387 sbio
->err
= bio
->bi_error
;
2390 btrfs_queue_work(fs_info
->scrub_workers
, &sbio
->work
);
2393 static void scrub_bio_end_io_worker(struct btrfs_work
*work
)
2395 struct scrub_bio
*sbio
= container_of(work
, struct scrub_bio
, work
);
2396 struct scrub_ctx
*sctx
= sbio
->sctx
;
2399 BUG_ON(sbio
->page_count
> SCRUB_PAGES_PER_RD_BIO
);
2401 for (i
= 0; i
< sbio
->page_count
; i
++) {
2402 struct scrub_page
*spage
= sbio
->pagev
[i
];
2404 spage
->io_error
= 1;
2405 spage
->sblock
->no_io_error_seen
= 0;
2409 /* now complete the scrub_block items that have all pages completed */
2410 for (i
= 0; i
< sbio
->page_count
; i
++) {
2411 struct scrub_page
*spage
= sbio
->pagev
[i
];
2412 struct scrub_block
*sblock
= spage
->sblock
;
2414 if (atomic_dec_and_test(&sblock
->outstanding_pages
))
2415 scrub_block_complete(sblock
);
2416 scrub_block_put(sblock
);
2421 spin_lock(&sctx
->list_lock
);
2422 sbio
->next_free
= sctx
->first_free
;
2423 sctx
->first_free
= sbio
->index
;
2424 spin_unlock(&sctx
->list_lock
);
2426 if (sctx
->is_dev_replace
&&
2427 atomic_read(&sctx
->wr_ctx
.flush_all_writes
)) {
2428 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
2429 scrub_wr_submit(sctx
);
2430 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
2433 scrub_pending_bio_dec(sctx
);
2436 static inline void __scrub_mark_bitmap(struct scrub_parity
*sparity
,
2437 unsigned long *bitmap
,
2442 int sectorsize
= sparity
->sctx
->dev_root
->sectorsize
;
2444 if (len
>= sparity
->stripe_len
) {
2445 bitmap_set(bitmap
, 0, sparity
->nsectors
);
2449 start
-= sparity
->logic_start
;
2450 start
= div_u64_rem(start
, sparity
->stripe_len
, &offset
);
2451 offset
/= sectorsize
;
2452 nsectors
= (int)len
/ sectorsize
;
2454 if (offset
+ nsectors
<= sparity
->nsectors
) {
2455 bitmap_set(bitmap
, offset
, nsectors
);
2459 bitmap_set(bitmap
, offset
, sparity
->nsectors
- offset
);
2460 bitmap_set(bitmap
, 0, nsectors
- (sparity
->nsectors
- offset
));
2463 static inline void scrub_parity_mark_sectors_error(struct scrub_parity
*sparity
,
2466 __scrub_mark_bitmap(sparity
, sparity
->ebitmap
, start
, len
);
2469 static inline void scrub_parity_mark_sectors_data(struct scrub_parity
*sparity
,
2472 __scrub_mark_bitmap(sparity
, sparity
->dbitmap
, start
, len
);
2475 static void scrub_block_complete(struct scrub_block
*sblock
)
2479 if (!sblock
->no_io_error_seen
) {
2481 scrub_handle_errored_block(sblock
);
2484 * if has checksum error, write via repair mechanism in
2485 * dev replace case, otherwise write here in dev replace
2488 corrupted
= scrub_checksum(sblock
);
2489 if (!corrupted
&& sblock
->sctx
->is_dev_replace
)
2490 scrub_write_block_to_dev_replace(sblock
);
2493 if (sblock
->sparity
&& corrupted
&& !sblock
->data_corrected
) {
2494 u64 start
= sblock
->pagev
[0]->logical
;
2495 u64 end
= sblock
->pagev
[sblock
->page_count
- 1]->logical
+
2498 scrub_parity_mark_sectors_error(sblock
->sparity
,
2499 start
, end
- start
);
2503 static int scrub_find_csum(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
2506 struct btrfs_ordered_sum
*sum
= NULL
;
2507 unsigned long index
;
2508 unsigned long num_sectors
;
2510 while (!list_empty(&sctx
->csum_list
)) {
2511 sum
= list_first_entry(&sctx
->csum_list
,
2512 struct btrfs_ordered_sum
, list
);
2513 if (sum
->bytenr
> logical
)
2515 if (sum
->bytenr
+ sum
->len
> logical
)
2518 ++sctx
->stat
.csum_discards
;
2519 list_del(&sum
->list
);
2526 index
= ((u32
)(logical
- sum
->bytenr
)) / sctx
->sectorsize
;
2527 num_sectors
= sum
->len
/ sctx
->sectorsize
;
2528 memcpy(csum
, sum
->sums
+ index
, sctx
->csum_size
);
2529 if (index
== num_sectors
- 1) {
2530 list_del(&sum
->list
);
2536 /* scrub extent tries to collect up to 64 kB for each bio */
2537 static int scrub_extent(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
2538 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
2539 u64 gen
, int mirror_num
, u64 physical_for_dev_replace
)
2542 u8 csum
[BTRFS_CSUM_SIZE
];
2545 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2546 blocksize
= sctx
->sectorsize
;
2547 spin_lock(&sctx
->stat_lock
);
2548 sctx
->stat
.data_extents_scrubbed
++;
2549 sctx
->stat
.data_bytes_scrubbed
+= len
;
2550 spin_unlock(&sctx
->stat_lock
);
2551 } else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
2552 blocksize
= sctx
->nodesize
;
2553 spin_lock(&sctx
->stat_lock
);
2554 sctx
->stat
.tree_extents_scrubbed
++;
2555 sctx
->stat
.tree_bytes_scrubbed
+= len
;
2556 spin_unlock(&sctx
->stat_lock
);
2558 blocksize
= sctx
->sectorsize
;
2563 u64 l
= min_t(u64
, len
, blocksize
);
2566 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2567 /* push csums to sbio */
2568 have_csum
= scrub_find_csum(sctx
, logical
, l
, csum
);
2570 ++sctx
->stat
.no_csum
;
2571 if (sctx
->is_dev_replace
&& !have_csum
) {
2572 ret
= copy_nocow_pages(sctx
, logical
, l
,
2574 physical_for_dev_replace
);
2575 goto behind_scrub_pages
;
2578 ret
= scrub_pages(sctx
, logical
, l
, physical
, dev
, flags
, gen
,
2579 mirror_num
, have_csum
? csum
: NULL
, 0,
2580 physical_for_dev_replace
);
2587 physical_for_dev_replace
+= l
;
2592 static int scrub_pages_for_parity(struct scrub_parity
*sparity
,
2593 u64 logical
, u64 len
,
2594 u64 physical
, struct btrfs_device
*dev
,
2595 u64 flags
, u64 gen
, int mirror_num
, u8
*csum
)
2597 struct scrub_ctx
*sctx
= sparity
->sctx
;
2598 struct scrub_block
*sblock
;
2601 sblock
= kzalloc(sizeof(*sblock
), GFP_NOFS
);
2603 spin_lock(&sctx
->stat_lock
);
2604 sctx
->stat
.malloc_errors
++;
2605 spin_unlock(&sctx
->stat_lock
);
2609 /* one ref inside this function, plus one for each page added to
2611 atomic_set(&sblock
->refs
, 1);
2612 sblock
->sctx
= sctx
;
2613 sblock
->no_io_error_seen
= 1;
2614 sblock
->sparity
= sparity
;
2615 scrub_parity_get(sparity
);
2617 for (index
= 0; len
> 0; index
++) {
2618 struct scrub_page
*spage
;
2619 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
2621 spage
= kzalloc(sizeof(*spage
), GFP_NOFS
);
2624 spin_lock(&sctx
->stat_lock
);
2625 sctx
->stat
.malloc_errors
++;
2626 spin_unlock(&sctx
->stat_lock
);
2627 scrub_block_put(sblock
);
2630 BUG_ON(index
>= SCRUB_MAX_PAGES_PER_BLOCK
);
2631 /* For scrub block */
2632 scrub_page_get(spage
);
2633 sblock
->pagev
[index
] = spage
;
2634 /* For scrub parity */
2635 scrub_page_get(spage
);
2636 list_add_tail(&spage
->list
, &sparity
->spages
);
2637 spage
->sblock
= sblock
;
2639 spage
->flags
= flags
;
2640 spage
->generation
= gen
;
2641 spage
->logical
= logical
;
2642 spage
->physical
= physical
;
2643 spage
->mirror_num
= mirror_num
;
2645 spage
->have_csum
= 1;
2646 memcpy(spage
->csum
, csum
, sctx
->csum_size
);
2648 spage
->have_csum
= 0;
2650 sblock
->page_count
++;
2651 spage
->page
= alloc_page(GFP_NOFS
);
2659 WARN_ON(sblock
->page_count
== 0);
2660 for (index
= 0; index
< sblock
->page_count
; index
++) {
2661 struct scrub_page
*spage
= sblock
->pagev
[index
];
2664 ret
= scrub_add_page_to_rd_bio(sctx
, spage
);
2666 scrub_block_put(sblock
);
2671 /* last one frees, either here or in bio completion for last page */
2672 scrub_block_put(sblock
);
2676 static int scrub_extent_for_parity(struct scrub_parity
*sparity
,
2677 u64 logical
, u64 len
,
2678 u64 physical
, struct btrfs_device
*dev
,
2679 u64 flags
, u64 gen
, int mirror_num
)
2681 struct scrub_ctx
*sctx
= sparity
->sctx
;
2683 u8 csum
[BTRFS_CSUM_SIZE
];
2687 scrub_parity_mark_sectors_error(sparity
, logical
, len
);
2691 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2692 blocksize
= sctx
->sectorsize
;
2693 } else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
2694 blocksize
= sctx
->nodesize
;
2696 blocksize
= sctx
->sectorsize
;
2701 u64 l
= min_t(u64
, len
, blocksize
);
2704 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2705 /* push csums to sbio */
2706 have_csum
= scrub_find_csum(sctx
, logical
, l
, csum
);
2710 ret
= scrub_pages_for_parity(sparity
, logical
, l
, physical
, dev
,
2711 flags
, gen
, mirror_num
,
2712 have_csum
? csum
: NULL
);
2724 * Given a physical address, this will calculate it's
2725 * logical offset. if this is a parity stripe, it will return
2726 * the most left data stripe's logical offset.
2728 * return 0 if it is a data stripe, 1 means parity stripe.
2730 static int get_raid56_logic_offset(u64 physical
, int num
,
2731 struct map_lookup
*map
, u64
*offset
,
2741 last_offset
= (physical
- map
->stripes
[num
].physical
) *
2742 nr_data_stripes(map
);
2744 *stripe_start
= last_offset
;
2746 *offset
= last_offset
;
2747 for (i
= 0; i
< nr_data_stripes(map
); i
++) {
2748 *offset
= last_offset
+ i
* map
->stripe_len
;
2750 stripe_nr
= div_u64(*offset
, map
->stripe_len
);
2751 stripe_nr
= div_u64(stripe_nr
, nr_data_stripes(map
));
2753 /* Work out the disk rotation on this stripe-set */
2754 stripe_nr
= div_u64_rem(stripe_nr
, map
->num_stripes
, &rot
);
2755 /* calculate which stripe this data locates */
2757 stripe_index
= rot
% map
->num_stripes
;
2758 if (stripe_index
== num
)
2760 if (stripe_index
< num
)
2763 *offset
= last_offset
+ j
* map
->stripe_len
;
2767 static void scrub_free_parity(struct scrub_parity
*sparity
)
2769 struct scrub_ctx
*sctx
= sparity
->sctx
;
2770 struct scrub_page
*curr
, *next
;
2773 nbits
= bitmap_weight(sparity
->ebitmap
, sparity
->nsectors
);
2775 spin_lock(&sctx
->stat_lock
);
2776 sctx
->stat
.read_errors
+= nbits
;
2777 sctx
->stat
.uncorrectable_errors
+= nbits
;
2778 spin_unlock(&sctx
->stat_lock
);
2781 list_for_each_entry_safe(curr
, next
, &sparity
->spages
, list
) {
2782 list_del_init(&curr
->list
);
2783 scrub_page_put(curr
);
2789 static void scrub_parity_bio_endio_worker(struct btrfs_work
*work
)
2791 struct scrub_parity
*sparity
= container_of(work
, struct scrub_parity
,
2793 struct scrub_ctx
*sctx
= sparity
->sctx
;
2795 scrub_free_parity(sparity
);
2796 scrub_pending_bio_dec(sctx
);
2799 static void scrub_parity_bio_endio(struct bio
*bio
)
2801 struct scrub_parity
*sparity
= (struct scrub_parity
*)bio
->bi_private
;
2804 bitmap_or(sparity
->ebitmap
, sparity
->ebitmap
, sparity
->dbitmap
,
2809 btrfs_init_work(&sparity
->work
, btrfs_scrubparity_helper
,
2810 scrub_parity_bio_endio_worker
, NULL
, NULL
);
2811 btrfs_queue_work(sparity
->sctx
->dev_root
->fs_info
->scrub_parity_workers
,
2815 static void scrub_parity_check_and_repair(struct scrub_parity
*sparity
)
2817 struct scrub_ctx
*sctx
= sparity
->sctx
;
2819 struct btrfs_raid_bio
*rbio
;
2820 struct scrub_page
*spage
;
2821 struct btrfs_bio
*bbio
= NULL
;
2825 if (!bitmap_andnot(sparity
->dbitmap
, sparity
->dbitmap
, sparity
->ebitmap
,
2829 length
= sparity
->logic_end
- sparity
->logic_start
;
2830 ret
= btrfs_map_sblock(sctx
->dev_root
->fs_info
, WRITE
,
2831 sparity
->logic_start
,
2832 &length
, &bbio
, 0, 1);
2833 if (ret
|| !bbio
|| !bbio
->raid_map
)
2836 bio
= btrfs_io_bio_alloc(GFP_NOFS
, 0);
2840 bio
->bi_iter
.bi_sector
= sparity
->logic_start
>> 9;
2841 bio
->bi_private
= sparity
;
2842 bio
->bi_end_io
= scrub_parity_bio_endio
;
2844 rbio
= raid56_parity_alloc_scrub_rbio(sctx
->dev_root
, bio
, bbio
,
2845 length
, sparity
->scrub_dev
,
2851 list_for_each_entry(spage
, &sparity
->spages
, list
)
2852 raid56_add_scrub_pages(rbio
, spage
->page
, spage
->logical
);
2854 scrub_pending_bio_inc(sctx
);
2855 raid56_parity_submit_scrub_rbio(rbio
);
2861 btrfs_put_bbio(bbio
);
2862 bitmap_or(sparity
->ebitmap
, sparity
->ebitmap
, sparity
->dbitmap
,
2864 spin_lock(&sctx
->stat_lock
);
2865 sctx
->stat
.malloc_errors
++;
2866 spin_unlock(&sctx
->stat_lock
);
2868 scrub_free_parity(sparity
);
2871 static inline int scrub_calc_parity_bitmap_len(int nsectors
)
2873 return DIV_ROUND_UP(nsectors
, BITS_PER_LONG
) * (BITS_PER_LONG
/ 8);
2876 static void scrub_parity_get(struct scrub_parity
*sparity
)
2878 atomic_inc(&sparity
->refs
);
2881 static void scrub_parity_put(struct scrub_parity
*sparity
)
2883 if (!atomic_dec_and_test(&sparity
->refs
))
2886 scrub_parity_check_and_repair(sparity
);
2889 static noinline_for_stack
int scrub_raid56_parity(struct scrub_ctx
*sctx
,
2890 struct map_lookup
*map
,
2891 struct btrfs_device
*sdev
,
2892 struct btrfs_path
*path
,
2896 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
2897 struct btrfs_root
*root
= fs_info
->extent_root
;
2898 struct btrfs_root
*csum_root
= fs_info
->csum_root
;
2899 struct btrfs_extent_item
*extent
;
2900 struct btrfs_bio
*bbio
= NULL
;
2904 struct extent_buffer
*l
;
2905 struct btrfs_key key
;
2908 u64 extent_physical
;
2911 struct btrfs_device
*extent_dev
;
2912 struct scrub_parity
*sparity
;
2915 int extent_mirror_num
;
2918 nsectors
= map
->stripe_len
/ root
->sectorsize
;
2919 bitmap_len
= scrub_calc_parity_bitmap_len(nsectors
);
2920 sparity
= kzalloc(sizeof(struct scrub_parity
) + 2 * bitmap_len
,
2923 spin_lock(&sctx
->stat_lock
);
2924 sctx
->stat
.malloc_errors
++;
2925 spin_unlock(&sctx
->stat_lock
);
2929 sparity
->stripe_len
= map
->stripe_len
;
2930 sparity
->nsectors
= nsectors
;
2931 sparity
->sctx
= sctx
;
2932 sparity
->scrub_dev
= sdev
;
2933 sparity
->logic_start
= logic_start
;
2934 sparity
->logic_end
= logic_end
;
2935 atomic_set(&sparity
->refs
, 1);
2936 INIT_LIST_HEAD(&sparity
->spages
);
2937 sparity
->dbitmap
= sparity
->bitmap
;
2938 sparity
->ebitmap
= (void *)sparity
->bitmap
+ bitmap_len
;
2941 while (logic_start
< logic_end
) {
2942 if (btrfs_fs_incompat(fs_info
, SKINNY_METADATA
))
2943 key
.type
= BTRFS_METADATA_ITEM_KEY
;
2945 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
2946 key
.objectid
= logic_start
;
2947 key
.offset
= (u64
)-1;
2949 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2954 ret
= btrfs_previous_extent_item(root
, path
, 0);
2958 btrfs_release_path(path
);
2959 ret
= btrfs_search_slot(NULL
, root
, &key
,
2971 slot
= path
->slots
[0];
2972 if (slot
>= btrfs_header_nritems(l
)) {
2973 ret
= btrfs_next_leaf(root
, path
);
2982 btrfs_item_key_to_cpu(l
, &key
, slot
);
2984 if (key
.type
!= BTRFS_EXTENT_ITEM_KEY
&&
2985 key
.type
!= BTRFS_METADATA_ITEM_KEY
)
2988 if (key
.type
== BTRFS_METADATA_ITEM_KEY
)
2989 bytes
= root
->nodesize
;
2993 if (key
.objectid
+ bytes
<= logic_start
)
2996 if (key
.objectid
>= logic_end
) {
3001 while (key
.objectid
>= logic_start
+ map
->stripe_len
)
3002 logic_start
+= map
->stripe_len
;
3004 extent
= btrfs_item_ptr(l
, slot
,
3005 struct btrfs_extent_item
);
3006 flags
= btrfs_extent_flags(l
, extent
);
3007 generation
= btrfs_extent_generation(l
, extent
);
3009 if ((flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) &&
3010 (key
.objectid
< logic_start
||
3011 key
.objectid
+ bytes
>
3012 logic_start
+ map
->stripe_len
)) {
3013 btrfs_err(fs_info
, "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3014 key
.objectid
, logic_start
);
3018 extent_logical
= key
.objectid
;
3021 if (extent_logical
< logic_start
) {
3022 extent_len
-= logic_start
- extent_logical
;
3023 extent_logical
= logic_start
;
3026 if (extent_logical
+ extent_len
>
3027 logic_start
+ map
->stripe_len
)
3028 extent_len
= logic_start
+ map
->stripe_len
-
3031 scrub_parity_mark_sectors_data(sparity
, extent_logical
,
3034 mapped_length
= extent_len
;
3035 ret
= btrfs_map_block(fs_info
, READ
, extent_logical
,
3036 &mapped_length
, &bbio
, 0);
3038 if (!bbio
|| mapped_length
< extent_len
)
3042 btrfs_put_bbio(bbio
);
3045 extent_physical
= bbio
->stripes
[0].physical
;
3046 extent_mirror_num
= bbio
->mirror_num
;
3047 extent_dev
= bbio
->stripes
[0].dev
;
3048 btrfs_put_bbio(bbio
);
3050 ret
= btrfs_lookup_csums_range(csum_root
,
3052 extent_logical
+ extent_len
- 1,
3053 &sctx
->csum_list
, 1);
3057 ret
= scrub_extent_for_parity(sparity
, extent_logical
,
3064 scrub_free_csums(sctx
);
3069 if (extent_logical
+ extent_len
<
3070 key
.objectid
+ bytes
) {
3071 logic_start
+= map
->stripe_len
;
3073 if (logic_start
>= logic_end
) {
3078 if (logic_start
< key
.objectid
+ bytes
) {
3087 btrfs_release_path(path
);
3092 logic_start
+= map
->stripe_len
;
3096 scrub_parity_mark_sectors_error(sparity
, logic_start
,
3097 logic_end
- logic_start
);
3098 scrub_parity_put(sparity
);
3100 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
3101 scrub_wr_submit(sctx
);
3102 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
3104 btrfs_release_path(path
);
3105 return ret
< 0 ? ret
: 0;
3108 static noinline_for_stack
int scrub_stripe(struct scrub_ctx
*sctx
,
3109 struct map_lookup
*map
,
3110 struct btrfs_device
*scrub_dev
,
3111 int num
, u64 base
, u64 length
,
3114 struct btrfs_path
*path
, *ppath
;
3115 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
3116 struct btrfs_root
*root
= fs_info
->extent_root
;
3117 struct btrfs_root
*csum_root
= fs_info
->csum_root
;
3118 struct btrfs_extent_item
*extent
;
3119 struct blk_plug plug
;
3124 struct extent_buffer
*l
;
3125 struct btrfs_key key
;
3132 struct reada_control
*reada1
;
3133 struct reada_control
*reada2
;
3134 struct btrfs_key key_start
;
3135 struct btrfs_key key_end
;
3136 u64 increment
= map
->stripe_len
;
3139 u64 extent_physical
;
3143 struct btrfs_device
*extent_dev
;
3144 int extent_mirror_num
;
3147 physical
= map
->stripes
[num
].physical
;
3149 nstripes
= div_u64(length
, map
->stripe_len
);
3150 if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
3151 offset
= map
->stripe_len
* num
;
3152 increment
= map
->stripe_len
* map
->num_stripes
;
3154 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
3155 int factor
= map
->num_stripes
/ map
->sub_stripes
;
3156 offset
= map
->stripe_len
* (num
/ map
->sub_stripes
);
3157 increment
= map
->stripe_len
* factor
;
3158 mirror_num
= num
% map
->sub_stripes
+ 1;
3159 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
3160 increment
= map
->stripe_len
;
3161 mirror_num
= num
% map
->num_stripes
+ 1;
3162 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
3163 increment
= map
->stripe_len
;
3164 mirror_num
= num
% map
->num_stripes
+ 1;
3165 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
3166 get_raid56_logic_offset(physical
, num
, map
, &offset
, NULL
);
3167 increment
= map
->stripe_len
* nr_data_stripes(map
);
3170 increment
= map
->stripe_len
;
3174 path
= btrfs_alloc_path();
3178 ppath
= btrfs_alloc_path();
3180 btrfs_free_path(path
);
3185 * work on commit root. The related disk blocks are static as
3186 * long as COW is applied. This means, it is save to rewrite
3187 * them to repair disk errors without any race conditions
3189 path
->search_commit_root
= 1;
3190 path
->skip_locking
= 1;
3192 ppath
->search_commit_root
= 1;
3193 ppath
->skip_locking
= 1;
3195 * trigger the readahead for extent tree csum tree and wait for
3196 * completion. During readahead, the scrub is officially paused
3197 * to not hold off transaction commits
3199 logical
= base
+ offset
;
3200 physical_end
= physical
+ nstripes
* map
->stripe_len
;
3201 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
3202 get_raid56_logic_offset(physical_end
, num
,
3203 map
, &logic_end
, NULL
);
3206 logic_end
= logical
+ increment
* nstripes
;
3208 wait_event(sctx
->list_wait
,
3209 atomic_read(&sctx
->bios_in_flight
) == 0);
3210 scrub_blocked_if_needed(fs_info
);
3212 /* FIXME it might be better to start readahead at commit root */
3213 key_start
.objectid
= logical
;
3214 key_start
.type
= BTRFS_EXTENT_ITEM_KEY
;
3215 key_start
.offset
= (u64
)0;
3216 key_end
.objectid
= logic_end
;
3217 key_end
.type
= BTRFS_METADATA_ITEM_KEY
;
3218 key_end
.offset
= (u64
)-1;
3219 reada1
= btrfs_reada_add(root
, &key_start
, &key_end
);
3221 key_start
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
3222 key_start
.type
= BTRFS_EXTENT_CSUM_KEY
;
3223 key_start
.offset
= logical
;
3224 key_end
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
3225 key_end
.type
= BTRFS_EXTENT_CSUM_KEY
;
3226 key_end
.offset
= logic_end
;
3227 reada2
= btrfs_reada_add(csum_root
, &key_start
, &key_end
);
3229 if (!IS_ERR(reada1
))
3230 btrfs_reada_wait(reada1
);
3231 if (!IS_ERR(reada2
))
3232 btrfs_reada_wait(reada2
);
3236 * collect all data csums for the stripe to avoid seeking during
3237 * the scrub. This might currently (crc32) end up to be about 1MB
3239 blk_start_plug(&plug
);
3242 * now find all extents for each stripe and scrub them
3245 while (physical
< physical_end
) {
3249 if (atomic_read(&fs_info
->scrub_cancel_req
) ||
3250 atomic_read(&sctx
->cancel_req
)) {
3255 * check to see if we have to pause
3257 if (atomic_read(&fs_info
->scrub_pause_req
)) {
3258 /* push queued extents */
3259 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 1);
3261 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
3262 scrub_wr_submit(sctx
);
3263 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
3264 wait_event(sctx
->list_wait
,
3265 atomic_read(&sctx
->bios_in_flight
) == 0);
3266 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 0);
3267 scrub_blocked_if_needed(fs_info
);
3270 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
3271 ret
= get_raid56_logic_offset(physical
, num
, map
,
3276 /* it is parity strip */
3277 stripe_logical
+= base
;
3278 stripe_end
= stripe_logical
+ increment
;
3279 ret
= scrub_raid56_parity(sctx
, map
, scrub_dev
,
3280 ppath
, stripe_logical
,
3288 if (btrfs_fs_incompat(fs_info
, SKINNY_METADATA
))
3289 key
.type
= BTRFS_METADATA_ITEM_KEY
;
3291 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
3292 key
.objectid
= logical
;
3293 key
.offset
= (u64
)-1;
3295 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3300 ret
= btrfs_previous_extent_item(root
, path
, 0);
3304 /* there's no smaller item, so stick with the
3306 btrfs_release_path(path
);
3307 ret
= btrfs_search_slot(NULL
, root
, &key
,
3319 slot
= path
->slots
[0];
3320 if (slot
>= btrfs_header_nritems(l
)) {
3321 ret
= btrfs_next_leaf(root
, path
);
3330 btrfs_item_key_to_cpu(l
, &key
, slot
);
3332 if (key
.type
!= BTRFS_EXTENT_ITEM_KEY
&&
3333 key
.type
!= BTRFS_METADATA_ITEM_KEY
)
3336 if (key
.type
== BTRFS_METADATA_ITEM_KEY
)
3337 bytes
= root
->nodesize
;
3341 if (key
.objectid
+ bytes
<= logical
)
3344 if (key
.objectid
>= logical
+ map
->stripe_len
) {
3345 /* out of this device extent */
3346 if (key
.objectid
>= logic_end
)
3351 extent
= btrfs_item_ptr(l
, slot
,
3352 struct btrfs_extent_item
);
3353 flags
= btrfs_extent_flags(l
, extent
);
3354 generation
= btrfs_extent_generation(l
, extent
);
3356 if ((flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) &&
3357 (key
.objectid
< logical
||
3358 key
.objectid
+ bytes
>
3359 logical
+ map
->stripe_len
)) {
3361 "scrub: tree block %llu spanning "
3362 "stripes, ignored. logical=%llu",
3363 key
.objectid
, logical
);
3368 extent_logical
= key
.objectid
;
3372 * trim extent to this stripe
3374 if (extent_logical
< logical
) {
3375 extent_len
-= logical
- extent_logical
;
3376 extent_logical
= logical
;
3378 if (extent_logical
+ extent_len
>
3379 logical
+ map
->stripe_len
) {
3380 extent_len
= logical
+ map
->stripe_len
-
3384 extent_physical
= extent_logical
- logical
+ physical
;
3385 extent_dev
= scrub_dev
;
3386 extent_mirror_num
= mirror_num
;
3388 scrub_remap_extent(fs_info
, extent_logical
,
3389 extent_len
, &extent_physical
,
3391 &extent_mirror_num
);
3393 ret
= btrfs_lookup_csums_range(csum_root
,
3397 &sctx
->csum_list
, 1);
3401 ret
= scrub_extent(sctx
, extent_logical
, extent_len
,
3402 extent_physical
, extent_dev
, flags
,
3403 generation
, extent_mirror_num
,
3404 extent_logical
- logical
+ physical
);
3406 scrub_free_csums(sctx
);
3411 if (extent_logical
+ extent_len
<
3412 key
.objectid
+ bytes
) {
3413 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
3415 * loop until we find next data stripe
3416 * or we have finished all stripes.
3419 physical
+= map
->stripe_len
;
3420 ret
= get_raid56_logic_offset(physical
,
3425 if (ret
&& physical
< physical_end
) {
3426 stripe_logical
+= base
;
3427 stripe_end
= stripe_logical
+
3429 ret
= scrub_raid56_parity(sctx
,
3430 map
, scrub_dev
, ppath
,
3438 physical
+= map
->stripe_len
;
3439 logical
+= increment
;
3441 if (logical
< key
.objectid
+ bytes
) {
3446 if (physical
>= physical_end
) {
3454 btrfs_release_path(path
);
3456 logical
+= increment
;
3457 physical
+= map
->stripe_len
;
3458 spin_lock(&sctx
->stat_lock
);
3460 sctx
->stat
.last_physical
= map
->stripes
[num
].physical
+
3463 sctx
->stat
.last_physical
= physical
;
3464 spin_unlock(&sctx
->stat_lock
);
3469 /* push queued extents */
3471 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
3472 scrub_wr_submit(sctx
);
3473 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
3475 blk_finish_plug(&plug
);
3476 btrfs_free_path(path
);
3477 btrfs_free_path(ppath
);
3478 return ret
< 0 ? ret
: 0;
3481 static noinline_for_stack
int scrub_chunk(struct scrub_ctx
*sctx
,
3482 struct btrfs_device
*scrub_dev
,
3483 u64 chunk_offset
, u64 length
,
3484 u64 dev_offset
, int is_dev_replace
)
3486 struct btrfs_mapping_tree
*map_tree
=
3487 &sctx
->dev_root
->fs_info
->mapping_tree
;
3488 struct map_lookup
*map
;
3489 struct extent_map
*em
;
3493 read_lock(&map_tree
->map_tree
.lock
);
3494 em
= lookup_extent_mapping(&map_tree
->map_tree
, chunk_offset
, 1);
3495 read_unlock(&map_tree
->map_tree
.lock
);
3500 map
= (struct map_lookup
*)em
->bdev
;
3501 if (em
->start
!= chunk_offset
)
3504 if (em
->len
< length
)
3507 for (i
= 0; i
< map
->num_stripes
; ++i
) {
3508 if (map
->stripes
[i
].dev
->bdev
== scrub_dev
->bdev
&&
3509 map
->stripes
[i
].physical
== dev_offset
) {
3510 ret
= scrub_stripe(sctx
, map
, scrub_dev
, i
,
3511 chunk_offset
, length
,
3518 free_extent_map(em
);
3523 static noinline_for_stack
3524 int scrub_enumerate_chunks(struct scrub_ctx
*sctx
,
3525 struct btrfs_device
*scrub_dev
, u64 start
, u64 end
,
3528 struct btrfs_dev_extent
*dev_extent
= NULL
;
3529 struct btrfs_path
*path
;
3530 struct btrfs_root
*root
= sctx
->dev_root
;
3531 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3536 struct extent_buffer
*l
;
3537 struct btrfs_key key
;
3538 struct btrfs_key found_key
;
3539 struct btrfs_block_group_cache
*cache
;
3540 struct btrfs_dev_replace
*dev_replace
= &fs_info
->dev_replace
;
3542 path
= btrfs_alloc_path();
3547 path
->search_commit_root
= 1;
3548 path
->skip_locking
= 1;
3550 key
.objectid
= scrub_dev
->devid
;
3552 key
.type
= BTRFS_DEV_EXTENT_KEY
;
3555 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3559 if (path
->slots
[0] >=
3560 btrfs_header_nritems(path
->nodes
[0])) {
3561 ret
= btrfs_next_leaf(root
, path
);
3574 slot
= path
->slots
[0];
3576 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
3578 if (found_key
.objectid
!= scrub_dev
->devid
)
3581 if (found_key
.type
!= BTRFS_DEV_EXTENT_KEY
)
3584 if (found_key
.offset
>= end
)
3587 if (found_key
.offset
< key
.offset
)
3590 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
3591 length
= btrfs_dev_extent_length(l
, dev_extent
);
3593 if (found_key
.offset
+ length
<= start
)
3596 chunk_offset
= btrfs_dev_extent_chunk_offset(l
, dev_extent
);
3599 * get a reference on the corresponding block group to prevent
3600 * the chunk from going away while we scrub it
3602 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
3604 /* some chunks are removed but not committed to disk yet,
3605 * continue scrubbing */
3610 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
3611 * to avoid deadlock caused by:
3612 * btrfs_inc_block_group_ro()
3613 * -> btrfs_wait_for_commit()
3614 * -> btrfs_commit_transaction()
3615 * -> btrfs_scrub_pause()
3617 scrub_pause_on(fs_info
);
3618 ret
= btrfs_inc_block_group_ro(root
, cache
);
3619 scrub_pause_off(fs_info
);
3621 btrfs_put_block_group(cache
);
3625 dev_replace
->cursor_right
= found_key
.offset
+ length
;
3626 dev_replace
->cursor_left
= found_key
.offset
;
3627 dev_replace
->item_needs_writeback
= 1;
3628 ret
= scrub_chunk(sctx
, scrub_dev
, chunk_offset
, length
,
3629 found_key
.offset
, is_dev_replace
);
3632 * flush, submit all pending read and write bios, afterwards
3634 * Note that in the dev replace case, a read request causes
3635 * write requests that are submitted in the read completion
3636 * worker. Therefore in the current situation, it is required
3637 * that all write requests are flushed, so that all read and
3638 * write requests are really completed when bios_in_flight
3641 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 1);
3643 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
3644 scrub_wr_submit(sctx
);
3645 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
3647 wait_event(sctx
->list_wait
,
3648 atomic_read(&sctx
->bios_in_flight
) == 0);
3650 scrub_pause_on(fs_info
);
3653 * must be called before we decrease @scrub_paused.
3654 * make sure we don't block transaction commit while
3655 * we are waiting pending workers finished.
3657 wait_event(sctx
->list_wait
,
3658 atomic_read(&sctx
->workers_pending
) == 0);
3659 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 0);
3661 scrub_pause_off(fs_info
);
3663 btrfs_dec_block_group_ro(root
, cache
);
3665 btrfs_put_block_group(cache
);
3668 if (is_dev_replace
&&
3669 atomic64_read(&dev_replace
->num_write_errors
) > 0) {
3673 if (sctx
->stat
.malloc_errors
> 0) {
3678 dev_replace
->cursor_left
= dev_replace
->cursor_right
;
3679 dev_replace
->item_needs_writeback
= 1;
3681 key
.offset
= found_key
.offset
+ length
;
3682 btrfs_release_path(path
);
3685 btrfs_free_path(path
);
3690 static noinline_for_stack
int scrub_supers(struct scrub_ctx
*sctx
,
3691 struct btrfs_device
*scrub_dev
)
3697 struct btrfs_root
*root
= sctx
->dev_root
;
3699 if (test_bit(BTRFS_FS_STATE_ERROR
, &root
->fs_info
->fs_state
))
3702 /* Seed devices of a new filesystem has their own generation. */
3703 if (scrub_dev
->fs_devices
!= root
->fs_info
->fs_devices
)
3704 gen
= scrub_dev
->generation
;
3706 gen
= root
->fs_info
->last_trans_committed
;
3708 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
3709 bytenr
= btrfs_sb_offset(i
);
3710 if (bytenr
+ BTRFS_SUPER_INFO_SIZE
>
3711 scrub_dev
->commit_total_bytes
)
3714 ret
= scrub_pages(sctx
, bytenr
, BTRFS_SUPER_INFO_SIZE
, bytenr
,
3715 scrub_dev
, BTRFS_EXTENT_FLAG_SUPER
, gen
, i
,
3720 wait_event(sctx
->list_wait
, atomic_read(&sctx
->bios_in_flight
) == 0);
3726 * get a reference count on fs_info->scrub_workers. start worker if necessary
3728 static noinline_for_stack
int scrub_workers_get(struct btrfs_fs_info
*fs_info
,
3731 unsigned int flags
= WQ_FREEZABLE
| WQ_UNBOUND
;
3732 int max_active
= fs_info
->thread_pool_size
;
3734 if (fs_info
->scrub_workers_refcnt
== 0) {
3736 fs_info
->scrub_workers
=
3737 btrfs_alloc_workqueue("btrfs-scrub", flags
,
3740 fs_info
->scrub_workers
=
3741 btrfs_alloc_workqueue("btrfs-scrub", flags
,
3743 if (!fs_info
->scrub_workers
)
3744 goto fail_scrub_workers
;
3746 fs_info
->scrub_wr_completion_workers
=
3747 btrfs_alloc_workqueue("btrfs-scrubwrc", flags
,
3749 if (!fs_info
->scrub_wr_completion_workers
)
3750 goto fail_scrub_wr_completion_workers
;
3752 fs_info
->scrub_nocow_workers
=
3753 btrfs_alloc_workqueue("btrfs-scrubnc", flags
, 1, 0);
3754 if (!fs_info
->scrub_nocow_workers
)
3755 goto fail_scrub_nocow_workers
;
3756 fs_info
->scrub_parity_workers
=
3757 btrfs_alloc_workqueue("btrfs-scrubparity", flags
,
3759 if (!fs_info
->scrub_parity_workers
)
3760 goto fail_scrub_parity_workers
;
3762 ++fs_info
->scrub_workers_refcnt
;
3765 fail_scrub_parity_workers
:
3766 btrfs_destroy_workqueue(fs_info
->scrub_nocow_workers
);
3767 fail_scrub_nocow_workers
:
3768 btrfs_destroy_workqueue(fs_info
->scrub_wr_completion_workers
);
3769 fail_scrub_wr_completion_workers
:
3770 btrfs_destroy_workqueue(fs_info
->scrub_workers
);
3775 static noinline_for_stack
void scrub_workers_put(struct btrfs_fs_info
*fs_info
)
3777 if (--fs_info
->scrub_workers_refcnt
== 0) {
3778 btrfs_destroy_workqueue(fs_info
->scrub_workers
);
3779 btrfs_destroy_workqueue(fs_info
->scrub_wr_completion_workers
);
3780 btrfs_destroy_workqueue(fs_info
->scrub_nocow_workers
);
3781 btrfs_destroy_workqueue(fs_info
->scrub_parity_workers
);
3783 WARN_ON(fs_info
->scrub_workers_refcnt
< 0);
3786 int btrfs_scrub_dev(struct btrfs_fs_info
*fs_info
, u64 devid
, u64 start
,
3787 u64 end
, struct btrfs_scrub_progress
*progress
,
3788 int readonly
, int is_dev_replace
)
3790 struct scrub_ctx
*sctx
;
3792 struct btrfs_device
*dev
;
3793 struct rcu_string
*name
;
3795 if (btrfs_fs_closing(fs_info
))
3798 if (fs_info
->chunk_root
->nodesize
> BTRFS_STRIPE_LEN
) {
3800 * in this case scrub is unable to calculate the checksum
3801 * the way scrub is implemented. Do not handle this
3802 * situation at all because it won't ever happen.
3805 "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
3806 fs_info
->chunk_root
->nodesize
, BTRFS_STRIPE_LEN
);
3810 if (fs_info
->chunk_root
->sectorsize
!= PAGE_SIZE
) {
3811 /* not supported for data w/o checksums */
3813 "scrub: size assumption sectorsize != PAGE_SIZE "
3814 "(%d != %lu) fails",
3815 fs_info
->chunk_root
->sectorsize
, PAGE_SIZE
);
3819 if (fs_info
->chunk_root
->nodesize
>
3820 PAGE_SIZE
* SCRUB_MAX_PAGES_PER_BLOCK
||
3821 fs_info
->chunk_root
->sectorsize
>
3822 PAGE_SIZE
* SCRUB_MAX_PAGES_PER_BLOCK
) {
3824 * would exhaust the array bounds of pagev member in
3825 * struct scrub_block
3827 btrfs_err(fs_info
, "scrub: size assumption nodesize and sectorsize "
3828 "<= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
3829 fs_info
->chunk_root
->nodesize
,
3830 SCRUB_MAX_PAGES_PER_BLOCK
,
3831 fs_info
->chunk_root
->sectorsize
,
3832 SCRUB_MAX_PAGES_PER_BLOCK
);
3837 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
3838 dev
= btrfs_find_device(fs_info
, devid
, NULL
, NULL
);
3839 if (!dev
|| (dev
->missing
&& !is_dev_replace
)) {
3840 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3844 if (!is_dev_replace
&& !readonly
&& !dev
->writeable
) {
3845 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3847 name
= rcu_dereference(dev
->name
);
3848 btrfs_err(fs_info
, "scrub: device %s is not writable",
3854 mutex_lock(&fs_info
->scrub_lock
);
3855 if (!dev
->in_fs_metadata
|| dev
->is_tgtdev_for_dev_replace
) {
3856 mutex_unlock(&fs_info
->scrub_lock
);
3857 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3861 btrfs_dev_replace_lock(&fs_info
->dev_replace
);
3862 if (dev
->scrub_device
||
3864 btrfs_dev_replace_is_ongoing(&fs_info
->dev_replace
))) {
3865 btrfs_dev_replace_unlock(&fs_info
->dev_replace
);
3866 mutex_unlock(&fs_info
->scrub_lock
);
3867 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3868 return -EINPROGRESS
;
3870 btrfs_dev_replace_unlock(&fs_info
->dev_replace
);
3872 ret
= scrub_workers_get(fs_info
, is_dev_replace
);
3874 mutex_unlock(&fs_info
->scrub_lock
);
3875 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3879 sctx
= scrub_setup_ctx(dev
, is_dev_replace
);
3881 mutex_unlock(&fs_info
->scrub_lock
);
3882 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3883 scrub_workers_put(fs_info
);
3884 return PTR_ERR(sctx
);
3886 sctx
->readonly
= readonly
;
3887 dev
->scrub_device
= sctx
;
3888 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3891 * checking @scrub_pause_req here, we can avoid
3892 * race between committing transaction and scrubbing.
3894 __scrub_blocked_if_needed(fs_info
);
3895 atomic_inc(&fs_info
->scrubs_running
);
3896 mutex_unlock(&fs_info
->scrub_lock
);
3898 if (!is_dev_replace
) {
3900 * by holding device list mutex, we can
3901 * kick off writing super in log tree sync.
3903 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
3904 ret
= scrub_supers(sctx
, dev
);
3905 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3909 ret
= scrub_enumerate_chunks(sctx
, dev
, start
, end
,
3912 wait_event(sctx
->list_wait
, atomic_read(&sctx
->bios_in_flight
) == 0);
3913 atomic_dec(&fs_info
->scrubs_running
);
3914 wake_up(&fs_info
->scrub_pause_wait
);
3916 wait_event(sctx
->list_wait
, atomic_read(&sctx
->workers_pending
) == 0);
3919 memcpy(progress
, &sctx
->stat
, sizeof(*progress
));
3921 mutex_lock(&fs_info
->scrub_lock
);
3922 dev
->scrub_device
= NULL
;
3923 scrub_workers_put(fs_info
);
3924 mutex_unlock(&fs_info
->scrub_lock
);
3926 scrub_put_ctx(sctx
);
3931 void btrfs_scrub_pause(struct btrfs_root
*root
)
3933 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3935 mutex_lock(&fs_info
->scrub_lock
);
3936 atomic_inc(&fs_info
->scrub_pause_req
);
3937 while (atomic_read(&fs_info
->scrubs_paused
) !=
3938 atomic_read(&fs_info
->scrubs_running
)) {
3939 mutex_unlock(&fs_info
->scrub_lock
);
3940 wait_event(fs_info
->scrub_pause_wait
,
3941 atomic_read(&fs_info
->scrubs_paused
) ==
3942 atomic_read(&fs_info
->scrubs_running
));
3943 mutex_lock(&fs_info
->scrub_lock
);
3945 mutex_unlock(&fs_info
->scrub_lock
);
3948 void btrfs_scrub_continue(struct btrfs_root
*root
)
3950 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3952 atomic_dec(&fs_info
->scrub_pause_req
);
3953 wake_up(&fs_info
->scrub_pause_wait
);
3956 int btrfs_scrub_cancel(struct btrfs_fs_info
*fs_info
)
3958 mutex_lock(&fs_info
->scrub_lock
);
3959 if (!atomic_read(&fs_info
->scrubs_running
)) {
3960 mutex_unlock(&fs_info
->scrub_lock
);
3964 atomic_inc(&fs_info
->scrub_cancel_req
);
3965 while (atomic_read(&fs_info
->scrubs_running
)) {
3966 mutex_unlock(&fs_info
->scrub_lock
);
3967 wait_event(fs_info
->scrub_pause_wait
,
3968 atomic_read(&fs_info
->scrubs_running
) == 0);
3969 mutex_lock(&fs_info
->scrub_lock
);
3971 atomic_dec(&fs_info
->scrub_cancel_req
);
3972 mutex_unlock(&fs_info
->scrub_lock
);
3977 int btrfs_scrub_cancel_dev(struct btrfs_fs_info
*fs_info
,
3978 struct btrfs_device
*dev
)
3980 struct scrub_ctx
*sctx
;
3982 mutex_lock(&fs_info
->scrub_lock
);
3983 sctx
= dev
->scrub_device
;
3985 mutex_unlock(&fs_info
->scrub_lock
);
3988 atomic_inc(&sctx
->cancel_req
);
3989 while (dev
->scrub_device
) {
3990 mutex_unlock(&fs_info
->scrub_lock
);
3991 wait_event(fs_info
->scrub_pause_wait
,
3992 dev
->scrub_device
== NULL
);
3993 mutex_lock(&fs_info
->scrub_lock
);
3995 mutex_unlock(&fs_info
->scrub_lock
);
4000 int btrfs_scrub_progress(struct btrfs_root
*root
, u64 devid
,
4001 struct btrfs_scrub_progress
*progress
)
4003 struct btrfs_device
*dev
;
4004 struct scrub_ctx
*sctx
= NULL
;
4006 mutex_lock(&root
->fs_info
->fs_devices
->device_list_mutex
);
4007 dev
= btrfs_find_device(root
->fs_info
, devid
, NULL
, NULL
);
4009 sctx
= dev
->scrub_device
;
4011 memcpy(progress
, &sctx
->stat
, sizeof(*progress
));
4012 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
4014 return dev
? (sctx
? 0 : -ENOTCONN
) : -ENODEV
;
4017 static void scrub_remap_extent(struct btrfs_fs_info
*fs_info
,
4018 u64 extent_logical
, u64 extent_len
,
4019 u64
*extent_physical
,
4020 struct btrfs_device
**extent_dev
,
4021 int *extent_mirror_num
)
4024 struct btrfs_bio
*bbio
= NULL
;
4027 mapped_length
= extent_len
;
4028 ret
= btrfs_map_block(fs_info
, READ
, extent_logical
,
4029 &mapped_length
, &bbio
, 0);
4030 if (ret
|| !bbio
|| mapped_length
< extent_len
||
4031 !bbio
->stripes
[0].dev
->bdev
) {
4032 btrfs_put_bbio(bbio
);
4036 *extent_physical
= bbio
->stripes
[0].physical
;
4037 *extent_mirror_num
= bbio
->mirror_num
;
4038 *extent_dev
= bbio
->stripes
[0].dev
;
4039 btrfs_put_bbio(bbio
);
4042 static int scrub_setup_wr_ctx(struct scrub_ctx
*sctx
,
4043 struct scrub_wr_ctx
*wr_ctx
,
4044 struct btrfs_fs_info
*fs_info
,
4045 struct btrfs_device
*dev
,
4048 WARN_ON(wr_ctx
->wr_curr_bio
!= NULL
);
4050 mutex_init(&wr_ctx
->wr_lock
);
4051 wr_ctx
->wr_curr_bio
= NULL
;
4052 if (!is_dev_replace
)
4055 WARN_ON(!dev
->bdev
);
4056 wr_ctx
->pages_per_wr_bio
= SCRUB_PAGES_PER_WR_BIO
;
4057 wr_ctx
->tgtdev
= dev
;
4058 atomic_set(&wr_ctx
->flush_all_writes
, 0);
4062 static void scrub_free_wr_ctx(struct scrub_wr_ctx
*wr_ctx
)
4064 mutex_lock(&wr_ctx
->wr_lock
);
4065 kfree(wr_ctx
->wr_curr_bio
);
4066 wr_ctx
->wr_curr_bio
= NULL
;
4067 mutex_unlock(&wr_ctx
->wr_lock
);
4070 static int copy_nocow_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
4071 int mirror_num
, u64 physical_for_dev_replace
)
4073 struct scrub_copy_nocow_ctx
*nocow_ctx
;
4074 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
4076 nocow_ctx
= kzalloc(sizeof(*nocow_ctx
), GFP_NOFS
);
4078 spin_lock(&sctx
->stat_lock
);
4079 sctx
->stat
.malloc_errors
++;
4080 spin_unlock(&sctx
->stat_lock
);
4084 scrub_pending_trans_workers_inc(sctx
);
4086 nocow_ctx
->sctx
= sctx
;
4087 nocow_ctx
->logical
= logical
;
4088 nocow_ctx
->len
= len
;
4089 nocow_ctx
->mirror_num
= mirror_num
;
4090 nocow_ctx
->physical_for_dev_replace
= physical_for_dev_replace
;
4091 btrfs_init_work(&nocow_ctx
->work
, btrfs_scrubnc_helper
,
4092 copy_nocow_pages_worker
, NULL
, NULL
);
4093 INIT_LIST_HEAD(&nocow_ctx
->inodes
);
4094 btrfs_queue_work(fs_info
->scrub_nocow_workers
,
4100 static int record_inode_for_nocow(u64 inum
, u64 offset
, u64 root
, void *ctx
)
4102 struct scrub_copy_nocow_ctx
*nocow_ctx
= ctx
;
4103 struct scrub_nocow_inode
*nocow_inode
;
4105 nocow_inode
= kzalloc(sizeof(*nocow_inode
), GFP_NOFS
);
4108 nocow_inode
->inum
= inum
;
4109 nocow_inode
->offset
= offset
;
4110 nocow_inode
->root
= root
;
4111 list_add_tail(&nocow_inode
->list
, &nocow_ctx
->inodes
);
4115 #define COPY_COMPLETE 1
4117 static void copy_nocow_pages_worker(struct btrfs_work
*work
)
4119 struct scrub_copy_nocow_ctx
*nocow_ctx
=
4120 container_of(work
, struct scrub_copy_nocow_ctx
, work
);
4121 struct scrub_ctx
*sctx
= nocow_ctx
->sctx
;
4122 u64 logical
= nocow_ctx
->logical
;
4123 u64 len
= nocow_ctx
->len
;
4124 int mirror_num
= nocow_ctx
->mirror_num
;
4125 u64 physical_for_dev_replace
= nocow_ctx
->physical_for_dev_replace
;
4127 struct btrfs_trans_handle
*trans
= NULL
;
4128 struct btrfs_fs_info
*fs_info
;
4129 struct btrfs_path
*path
;
4130 struct btrfs_root
*root
;
4131 int not_written
= 0;
4133 fs_info
= sctx
->dev_root
->fs_info
;
4134 root
= fs_info
->extent_root
;
4136 path
= btrfs_alloc_path();
4138 spin_lock(&sctx
->stat_lock
);
4139 sctx
->stat
.malloc_errors
++;
4140 spin_unlock(&sctx
->stat_lock
);
4145 trans
= btrfs_join_transaction(root
);
4146 if (IS_ERR(trans
)) {
4151 ret
= iterate_inodes_from_logical(logical
, fs_info
, path
,
4152 record_inode_for_nocow
, nocow_ctx
);
4153 if (ret
!= 0 && ret
!= -ENOENT
) {
4154 btrfs_warn(fs_info
, "iterate_inodes_from_logical() failed: log %llu, "
4155 "phys %llu, len %llu, mir %u, ret %d",
4156 logical
, physical_for_dev_replace
, len
, mirror_num
,
4162 btrfs_end_transaction(trans
, root
);
4164 while (!list_empty(&nocow_ctx
->inodes
)) {
4165 struct scrub_nocow_inode
*entry
;
4166 entry
= list_first_entry(&nocow_ctx
->inodes
,
4167 struct scrub_nocow_inode
,
4169 list_del_init(&entry
->list
);
4170 ret
= copy_nocow_pages_for_inode(entry
->inum
, entry
->offset
,
4171 entry
->root
, nocow_ctx
);
4173 if (ret
== COPY_COMPLETE
) {
4181 while (!list_empty(&nocow_ctx
->inodes
)) {
4182 struct scrub_nocow_inode
*entry
;
4183 entry
= list_first_entry(&nocow_ctx
->inodes
,
4184 struct scrub_nocow_inode
,
4186 list_del_init(&entry
->list
);
4189 if (trans
&& !IS_ERR(trans
))
4190 btrfs_end_transaction(trans
, root
);
4192 btrfs_dev_replace_stats_inc(&fs_info
->dev_replace
.
4193 num_uncorrectable_read_errors
);
4195 btrfs_free_path(path
);
4198 scrub_pending_trans_workers_dec(sctx
);
4201 static int check_extent_to_block(struct inode
*inode
, u64 start
, u64 len
,
4204 struct extent_state
*cached_state
= NULL
;
4205 struct btrfs_ordered_extent
*ordered
;
4206 struct extent_io_tree
*io_tree
;
4207 struct extent_map
*em
;
4208 u64 lockstart
= start
, lockend
= start
+ len
- 1;
4211 io_tree
= &BTRFS_I(inode
)->io_tree
;
4213 lock_extent_bits(io_tree
, lockstart
, lockend
, 0, &cached_state
);
4214 ordered
= btrfs_lookup_ordered_range(inode
, lockstart
, len
);
4216 btrfs_put_ordered_extent(ordered
);
4221 em
= btrfs_get_extent(inode
, NULL
, 0, start
, len
, 0);
4228 * This extent does not actually cover the logical extent anymore,
4229 * move on to the next inode.
4231 if (em
->block_start
> logical
||
4232 em
->block_start
+ em
->block_len
< logical
+ len
) {
4233 free_extent_map(em
);
4237 free_extent_map(em
);
4240 unlock_extent_cached(io_tree
, lockstart
, lockend
, &cached_state
,
4245 static int copy_nocow_pages_for_inode(u64 inum
, u64 offset
, u64 root
,
4246 struct scrub_copy_nocow_ctx
*nocow_ctx
)
4248 struct btrfs_fs_info
*fs_info
= nocow_ctx
->sctx
->dev_root
->fs_info
;
4249 struct btrfs_key key
;
4250 struct inode
*inode
;
4252 struct btrfs_root
*local_root
;
4253 struct extent_io_tree
*io_tree
;
4254 u64 physical_for_dev_replace
;
4255 u64 nocow_ctx_logical
;
4256 u64 len
= nocow_ctx
->len
;
4257 unsigned long index
;
4262 key
.objectid
= root
;
4263 key
.type
= BTRFS_ROOT_ITEM_KEY
;
4264 key
.offset
= (u64
)-1;
4266 srcu_index
= srcu_read_lock(&fs_info
->subvol_srcu
);
4268 local_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
4269 if (IS_ERR(local_root
)) {
4270 srcu_read_unlock(&fs_info
->subvol_srcu
, srcu_index
);
4271 return PTR_ERR(local_root
);
4274 key
.type
= BTRFS_INODE_ITEM_KEY
;
4275 key
.objectid
= inum
;
4277 inode
= btrfs_iget(fs_info
->sb
, &key
, local_root
, NULL
);
4278 srcu_read_unlock(&fs_info
->subvol_srcu
, srcu_index
);
4280 return PTR_ERR(inode
);
4282 /* Avoid truncate/dio/punch hole.. */
4283 mutex_lock(&inode
->i_mutex
);
4284 inode_dio_wait(inode
);
4286 physical_for_dev_replace
= nocow_ctx
->physical_for_dev_replace
;
4287 io_tree
= &BTRFS_I(inode
)->io_tree
;
4288 nocow_ctx_logical
= nocow_ctx
->logical
;
4290 ret
= check_extent_to_block(inode
, offset
, len
, nocow_ctx_logical
);
4292 ret
= ret
> 0 ? 0 : ret
;
4296 while (len
>= PAGE_CACHE_SIZE
) {
4297 index
= offset
>> PAGE_CACHE_SHIFT
;
4299 page
= find_or_create_page(inode
->i_mapping
, index
, GFP_NOFS
);
4301 btrfs_err(fs_info
, "find_or_create_page() failed");
4306 if (PageUptodate(page
)) {
4307 if (PageDirty(page
))
4310 ClearPageError(page
);
4311 err
= extent_read_full_page(io_tree
, page
,
4313 nocow_ctx
->mirror_num
);
4321 * If the page has been remove from the page cache,
4322 * the data on it is meaningless, because it may be
4323 * old one, the new data may be written into the new
4324 * page in the page cache.
4326 if (page
->mapping
!= inode
->i_mapping
) {
4328 page_cache_release(page
);
4331 if (!PageUptodate(page
)) {
4337 ret
= check_extent_to_block(inode
, offset
, len
,
4340 ret
= ret
> 0 ? 0 : ret
;
4344 err
= write_page_nocow(nocow_ctx
->sctx
,
4345 physical_for_dev_replace
, page
);
4350 page_cache_release(page
);
4355 offset
+= PAGE_CACHE_SIZE
;
4356 physical_for_dev_replace
+= PAGE_CACHE_SIZE
;
4357 nocow_ctx_logical
+= PAGE_CACHE_SIZE
;
4358 len
-= PAGE_CACHE_SIZE
;
4360 ret
= COPY_COMPLETE
;
4362 mutex_unlock(&inode
->i_mutex
);
4367 static int write_page_nocow(struct scrub_ctx
*sctx
,
4368 u64 physical_for_dev_replace
, struct page
*page
)
4371 struct btrfs_device
*dev
;
4374 dev
= sctx
->wr_ctx
.tgtdev
;
4378 printk_ratelimited(KERN_WARNING
4379 "BTRFS: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
4382 bio
= btrfs_io_bio_alloc(GFP_NOFS
, 1);
4384 spin_lock(&sctx
->stat_lock
);
4385 sctx
->stat
.malloc_errors
++;
4386 spin_unlock(&sctx
->stat_lock
);
4389 bio
->bi_iter
.bi_size
= 0;
4390 bio
->bi_iter
.bi_sector
= physical_for_dev_replace
>> 9;
4391 bio
->bi_bdev
= dev
->bdev
;
4392 ret
= bio_add_page(bio
, page
, PAGE_CACHE_SIZE
, 0);
4393 if (ret
!= PAGE_CACHE_SIZE
) {
4396 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_WRITE_ERRS
);
4400 if (btrfsic_submit_bio_wait(WRITE_SYNC
, bio
))
4401 goto leave_with_eio
;