2 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/blkdev.h>
20 #include <linux/ratelimit.h>
24 #include "ordered-data.h"
25 #include "transaction.h"
27 #include "extent_io.h"
28 #include "dev-replace.h"
29 #include "check-integrity.h"
30 #include "rcu-string.h"
34 * This is only the first step towards a full-features scrub. It reads all
35 * extent and super block and verifies the checksums. In case a bad checksum
36 * is found or the extent cannot be read, good data will be written back if
39 * Future enhancements:
40 * - In case an unrepairable extent is encountered, track which files are
41 * affected and report them
42 * - track and record media errors, throw out bad devices
43 * - add a mode to also read unallocated space
50 * the following three values only influence the performance.
51 * The last one configures the number of parallel and outstanding I/O
52 * operations. The first two values configure an upper limit for the number
53 * of (dynamically allocated) pages that are added to a bio.
55 #define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
56 #define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
57 #define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
60 * the following value times PAGE_SIZE needs to be large enough to match the
61 * largest node/leaf/sector size that shall be supported.
62 * Values larger than BTRFS_STRIPE_LEN are not supported.
64 #define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
67 struct scrub_block
*sblock
;
69 struct btrfs_device
*dev
;
70 u64 flags
; /* extent flags */
74 u64 physical_for_dev_replace
;
77 unsigned int mirror_num
:8;
78 unsigned int have_csum
:1;
79 unsigned int io_error
:1;
81 u8 csum
[BTRFS_CSUM_SIZE
];
86 struct scrub_ctx
*sctx
;
87 struct btrfs_device
*dev
;
92 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
93 struct scrub_page
*pagev
[SCRUB_PAGES_PER_WR_BIO
];
95 struct scrub_page
*pagev
[SCRUB_PAGES_PER_RD_BIO
];
99 struct btrfs_work work
;
103 struct scrub_page
*pagev
[SCRUB_MAX_PAGES_PER_BLOCK
];
105 atomic_t outstanding_pages
;
106 atomic_t ref_count
; /* free mem on transition to zero */
107 struct scrub_ctx
*sctx
;
109 unsigned int header_error
:1;
110 unsigned int checksum_error
:1;
111 unsigned int no_io_error_seen
:1;
112 unsigned int generation_error
:1; /* also sets header_error */
116 struct scrub_wr_ctx
{
117 struct scrub_bio
*wr_curr_bio
;
118 struct btrfs_device
*tgtdev
;
119 int pages_per_wr_bio
; /* <= SCRUB_PAGES_PER_WR_BIO */
120 atomic_t flush_all_writes
;
121 struct mutex wr_lock
;
125 struct scrub_bio
*bios
[SCRUB_BIOS_PER_SCTX
];
126 struct btrfs_root
*dev_root
;
129 atomic_t bios_in_flight
;
130 atomic_t workers_pending
;
131 spinlock_t list_lock
;
132 wait_queue_head_t list_wait
;
134 struct list_head csum_list
;
137 int pages_per_rd_bio
;
143 struct scrub_wr_ctx wr_ctx
;
148 struct btrfs_scrub_progress stat
;
149 spinlock_t stat_lock
;
152 struct scrub_fixup_nodatasum
{
153 struct scrub_ctx
*sctx
;
154 struct btrfs_device
*dev
;
156 struct btrfs_root
*root
;
157 struct btrfs_work work
;
161 struct scrub_nocow_inode
{
165 struct list_head list
;
168 struct scrub_copy_nocow_ctx
{
169 struct scrub_ctx
*sctx
;
173 u64 physical_for_dev_replace
;
174 struct list_head inodes
;
175 struct btrfs_work work
;
178 struct scrub_warning
{
179 struct btrfs_path
*path
;
180 u64 extent_item_size
;
186 struct btrfs_device
*dev
;
192 static void scrub_pending_bio_inc(struct scrub_ctx
*sctx
);
193 static void scrub_pending_bio_dec(struct scrub_ctx
*sctx
);
194 static void scrub_pending_trans_workers_inc(struct scrub_ctx
*sctx
);
195 static void scrub_pending_trans_workers_dec(struct scrub_ctx
*sctx
);
196 static int scrub_handle_errored_block(struct scrub_block
*sblock_to_check
);
197 static int scrub_setup_recheck_block(struct scrub_ctx
*sctx
,
198 struct btrfs_fs_info
*fs_info
,
199 struct scrub_block
*original_sblock
,
200 u64 length
, u64 logical
,
201 struct scrub_block
*sblocks_for_recheck
);
202 static void scrub_recheck_block(struct btrfs_fs_info
*fs_info
,
203 struct scrub_block
*sblock
, int is_metadata
,
204 int have_csum
, u8
*csum
, u64 generation
,
206 static void scrub_recheck_block_checksum(struct btrfs_fs_info
*fs_info
,
207 struct scrub_block
*sblock
,
208 int is_metadata
, int have_csum
,
209 const u8
*csum
, u64 generation
,
211 static void scrub_complete_bio_end_io(struct bio
*bio
, int err
);
212 static int scrub_repair_block_from_good_copy(struct scrub_block
*sblock_bad
,
213 struct scrub_block
*sblock_good
,
215 static int scrub_repair_page_from_good_copy(struct scrub_block
*sblock_bad
,
216 struct scrub_block
*sblock_good
,
217 int page_num
, int force_write
);
218 static void scrub_write_block_to_dev_replace(struct scrub_block
*sblock
);
219 static int scrub_write_page_to_dev_replace(struct scrub_block
*sblock
,
221 static int scrub_checksum_data(struct scrub_block
*sblock
);
222 static int scrub_checksum_tree_block(struct scrub_block
*sblock
);
223 static int scrub_checksum_super(struct scrub_block
*sblock
);
224 static void scrub_block_get(struct scrub_block
*sblock
);
225 static void scrub_block_put(struct scrub_block
*sblock
);
226 static void scrub_page_get(struct scrub_page
*spage
);
227 static void scrub_page_put(struct scrub_page
*spage
);
228 static int scrub_add_page_to_rd_bio(struct scrub_ctx
*sctx
,
229 struct scrub_page
*spage
);
230 static int scrub_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
231 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
232 u64 gen
, int mirror_num
, u8
*csum
, int force
,
233 u64 physical_for_dev_replace
);
234 static void scrub_bio_end_io(struct bio
*bio
, int err
);
235 static void scrub_bio_end_io_worker(struct btrfs_work
*work
);
236 static void scrub_block_complete(struct scrub_block
*sblock
);
237 static void scrub_remap_extent(struct btrfs_fs_info
*fs_info
,
238 u64 extent_logical
, u64 extent_len
,
239 u64
*extent_physical
,
240 struct btrfs_device
**extent_dev
,
241 int *extent_mirror_num
);
242 static int scrub_setup_wr_ctx(struct scrub_ctx
*sctx
,
243 struct scrub_wr_ctx
*wr_ctx
,
244 struct btrfs_fs_info
*fs_info
,
245 struct btrfs_device
*dev
,
247 static void scrub_free_wr_ctx(struct scrub_wr_ctx
*wr_ctx
);
248 static int scrub_add_page_to_wr_bio(struct scrub_ctx
*sctx
,
249 struct scrub_page
*spage
);
250 static void scrub_wr_submit(struct scrub_ctx
*sctx
);
251 static void scrub_wr_bio_end_io(struct bio
*bio
, int err
);
252 static void scrub_wr_bio_end_io_worker(struct btrfs_work
*work
);
253 static int write_page_nocow(struct scrub_ctx
*sctx
,
254 u64 physical_for_dev_replace
, struct page
*page
);
255 static int copy_nocow_pages_for_inode(u64 inum
, u64 offset
, u64 root
,
256 struct scrub_copy_nocow_ctx
*ctx
);
257 static int copy_nocow_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
258 int mirror_num
, u64 physical_for_dev_replace
);
259 static void copy_nocow_pages_worker(struct btrfs_work
*work
);
262 static void scrub_pending_bio_inc(struct scrub_ctx
*sctx
)
264 atomic_inc(&sctx
->bios_in_flight
);
267 static void scrub_pending_bio_dec(struct scrub_ctx
*sctx
)
269 atomic_dec(&sctx
->bios_in_flight
);
270 wake_up(&sctx
->list_wait
);
274 * used for workers that require transaction commits (i.e., for the
277 static void scrub_pending_trans_workers_inc(struct scrub_ctx
*sctx
)
279 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
282 * increment scrubs_running to prevent cancel requests from
283 * completing as long as a worker is running. we must also
284 * increment scrubs_paused to prevent deadlocking on pause
285 * requests used for transactions commits (as the worker uses a
286 * transaction context). it is safe to regard the worker
287 * as paused for all matters practical. effectively, we only
288 * avoid cancellation requests from completing.
290 mutex_lock(&fs_info
->scrub_lock
);
291 atomic_inc(&fs_info
->scrubs_running
);
292 atomic_inc(&fs_info
->scrubs_paused
);
293 mutex_unlock(&fs_info
->scrub_lock
);
294 atomic_inc(&sctx
->workers_pending
);
297 /* used for workers that require transaction commits */
298 static void scrub_pending_trans_workers_dec(struct scrub_ctx
*sctx
)
300 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
303 * see scrub_pending_trans_workers_inc() why we're pretending
304 * to be paused in the scrub counters
306 mutex_lock(&fs_info
->scrub_lock
);
307 atomic_dec(&fs_info
->scrubs_running
);
308 atomic_dec(&fs_info
->scrubs_paused
);
309 mutex_unlock(&fs_info
->scrub_lock
);
310 atomic_dec(&sctx
->workers_pending
);
311 wake_up(&fs_info
->scrub_pause_wait
);
312 wake_up(&sctx
->list_wait
);
315 static void scrub_free_csums(struct scrub_ctx
*sctx
)
317 while (!list_empty(&sctx
->csum_list
)) {
318 struct btrfs_ordered_sum
*sum
;
319 sum
= list_first_entry(&sctx
->csum_list
,
320 struct btrfs_ordered_sum
, list
);
321 list_del(&sum
->list
);
326 static noinline_for_stack
void scrub_free_ctx(struct scrub_ctx
*sctx
)
333 scrub_free_wr_ctx(&sctx
->wr_ctx
);
335 /* this can happen when scrub is cancelled */
336 if (sctx
->curr
!= -1) {
337 struct scrub_bio
*sbio
= sctx
->bios
[sctx
->curr
];
339 for (i
= 0; i
< sbio
->page_count
; i
++) {
340 WARN_ON(!sbio
->pagev
[i
]->page
);
341 scrub_block_put(sbio
->pagev
[i
]->sblock
);
346 for (i
= 0; i
< SCRUB_BIOS_PER_SCTX
; ++i
) {
347 struct scrub_bio
*sbio
= sctx
->bios
[i
];
354 scrub_free_csums(sctx
);
358 static noinline_for_stack
359 struct scrub_ctx
*scrub_setup_ctx(struct btrfs_device
*dev
, int is_dev_replace
)
361 struct scrub_ctx
*sctx
;
363 struct btrfs_fs_info
*fs_info
= dev
->dev_root
->fs_info
;
364 int pages_per_rd_bio
;
368 * the setting of pages_per_rd_bio is correct for scrub but might
369 * be wrong for the dev_replace code where we might read from
370 * different devices in the initial huge bios. However, that
371 * code is able to correctly handle the case when adding a page
375 pages_per_rd_bio
= min_t(int, SCRUB_PAGES_PER_RD_BIO
,
376 bio_get_nr_vecs(dev
->bdev
));
378 pages_per_rd_bio
= SCRUB_PAGES_PER_RD_BIO
;
379 sctx
= kzalloc(sizeof(*sctx
), GFP_NOFS
);
382 sctx
->is_dev_replace
= is_dev_replace
;
383 sctx
->pages_per_rd_bio
= pages_per_rd_bio
;
385 sctx
->dev_root
= dev
->dev_root
;
386 for (i
= 0; i
< SCRUB_BIOS_PER_SCTX
; ++i
) {
387 struct scrub_bio
*sbio
;
389 sbio
= kzalloc(sizeof(*sbio
), GFP_NOFS
);
392 sctx
->bios
[i
] = sbio
;
396 sbio
->page_count
= 0;
397 sbio
->work
.func
= scrub_bio_end_io_worker
;
399 if (i
!= SCRUB_BIOS_PER_SCTX
- 1)
400 sctx
->bios
[i
]->next_free
= i
+ 1;
402 sctx
->bios
[i
]->next_free
= -1;
404 sctx
->first_free
= 0;
405 sctx
->nodesize
= dev
->dev_root
->nodesize
;
406 sctx
->leafsize
= dev
->dev_root
->leafsize
;
407 sctx
->sectorsize
= dev
->dev_root
->sectorsize
;
408 atomic_set(&sctx
->bios_in_flight
, 0);
409 atomic_set(&sctx
->workers_pending
, 0);
410 atomic_set(&sctx
->cancel_req
, 0);
411 sctx
->csum_size
= btrfs_super_csum_size(fs_info
->super_copy
);
412 INIT_LIST_HEAD(&sctx
->csum_list
);
414 spin_lock_init(&sctx
->list_lock
);
415 spin_lock_init(&sctx
->stat_lock
);
416 init_waitqueue_head(&sctx
->list_wait
);
418 ret
= scrub_setup_wr_ctx(sctx
, &sctx
->wr_ctx
, fs_info
,
419 fs_info
->dev_replace
.tgtdev
, is_dev_replace
);
421 scrub_free_ctx(sctx
);
427 scrub_free_ctx(sctx
);
428 return ERR_PTR(-ENOMEM
);
431 static int scrub_print_warning_inode(u64 inum
, u64 offset
, u64 root
,
438 struct extent_buffer
*eb
;
439 struct btrfs_inode_item
*inode_item
;
440 struct scrub_warning
*swarn
= warn_ctx
;
441 struct btrfs_fs_info
*fs_info
= swarn
->dev
->dev_root
->fs_info
;
442 struct inode_fs_paths
*ipath
= NULL
;
443 struct btrfs_root
*local_root
;
444 struct btrfs_key root_key
;
446 root_key
.objectid
= root
;
447 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
448 root_key
.offset
= (u64
)-1;
449 local_root
= btrfs_read_fs_root_no_name(fs_info
, &root_key
);
450 if (IS_ERR(local_root
)) {
451 ret
= PTR_ERR(local_root
);
455 ret
= inode_item_info(inum
, 0, local_root
, swarn
->path
);
457 btrfs_release_path(swarn
->path
);
461 eb
= swarn
->path
->nodes
[0];
462 inode_item
= btrfs_item_ptr(eb
, swarn
->path
->slots
[0],
463 struct btrfs_inode_item
);
464 isize
= btrfs_inode_size(eb
, inode_item
);
465 nlink
= btrfs_inode_nlink(eb
, inode_item
);
466 btrfs_release_path(swarn
->path
);
468 ipath
= init_ipath(4096, local_root
, swarn
->path
);
470 ret
= PTR_ERR(ipath
);
474 ret
= paths_from_inode(inum
, ipath
);
480 * we deliberately ignore the bit ipath might have been too small to
481 * hold all of the paths here
483 for (i
= 0; i
< ipath
->fspath
->elem_cnt
; ++i
)
484 printk_in_rcu(KERN_WARNING
"btrfs: %s at logical %llu on dev "
485 "%s, sector %llu, root %llu, inode %llu, offset %llu, "
486 "length %llu, links %u (path: %s)\n", swarn
->errstr
,
487 swarn
->logical
, rcu_str_deref(swarn
->dev
->name
),
488 (unsigned long long)swarn
->sector
, root
, inum
, offset
,
489 min(isize
- offset
, (u64
)PAGE_SIZE
), nlink
,
490 (char *)(unsigned long)ipath
->fspath
->val
[i
]);
496 printk_in_rcu(KERN_WARNING
"btrfs: %s at logical %llu on dev "
497 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
498 "resolving failed with ret=%d\n", swarn
->errstr
,
499 swarn
->logical
, rcu_str_deref(swarn
->dev
->name
),
500 (unsigned long long)swarn
->sector
, root
, inum
, offset
, ret
);
506 static void scrub_print_warning(const char *errstr
, struct scrub_block
*sblock
)
508 struct btrfs_device
*dev
;
509 struct btrfs_fs_info
*fs_info
;
510 struct btrfs_path
*path
;
511 struct btrfs_key found_key
;
512 struct extent_buffer
*eb
;
513 struct btrfs_extent_item
*ei
;
514 struct scrub_warning swarn
;
515 unsigned long ptr
= 0;
521 const int bufsize
= 4096;
524 WARN_ON(sblock
->page_count
< 1);
525 dev
= sblock
->pagev
[0]->dev
;
526 fs_info
= sblock
->sctx
->dev_root
->fs_info
;
528 path
= btrfs_alloc_path();
530 swarn
.scratch_buf
= kmalloc(bufsize
, GFP_NOFS
);
531 swarn
.msg_buf
= kmalloc(bufsize
, GFP_NOFS
);
532 swarn
.sector
= (sblock
->pagev
[0]->physical
) >> 9;
533 swarn
.logical
= sblock
->pagev
[0]->logical
;
534 swarn
.errstr
= errstr
;
536 swarn
.msg_bufsize
= bufsize
;
537 swarn
.scratch_bufsize
= bufsize
;
539 if (!path
|| !swarn
.scratch_buf
|| !swarn
.msg_buf
)
542 ret
= extent_from_logical(fs_info
, swarn
.logical
, path
, &found_key
,
547 extent_item_pos
= swarn
.logical
- found_key
.objectid
;
548 swarn
.extent_item_size
= found_key
.offset
;
551 ei
= btrfs_item_ptr(eb
, path
->slots
[0], struct btrfs_extent_item
);
552 item_size
= btrfs_item_size_nr(eb
, path
->slots
[0]);
554 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
556 ret
= tree_backref_for_extent(&ptr
, eb
, ei
, item_size
,
557 &ref_root
, &ref_level
);
558 printk_in_rcu(KERN_WARNING
559 "btrfs: %s at logical %llu on dev %s, "
560 "sector %llu: metadata %s (level %d) in tree "
561 "%llu\n", errstr
, swarn
.logical
,
562 rcu_str_deref(dev
->name
),
563 (unsigned long long)swarn
.sector
,
564 ref_level
? "node" : "leaf",
565 ret
< 0 ? -1 : ref_level
,
566 ret
< 0 ? -1 : ref_root
);
568 btrfs_release_path(path
);
570 btrfs_release_path(path
);
573 iterate_extent_inodes(fs_info
, found_key
.objectid
,
575 scrub_print_warning_inode
, &swarn
);
579 btrfs_free_path(path
);
580 kfree(swarn
.scratch_buf
);
581 kfree(swarn
.msg_buf
);
584 static int scrub_fixup_readpage(u64 inum
, u64 offset
, u64 root
, void *fixup_ctx
)
586 struct page
*page
= NULL
;
588 struct scrub_fixup_nodatasum
*fixup
= fixup_ctx
;
591 struct btrfs_key key
;
592 struct inode
*inode
= NULL
;
593 struct btrfs_fs_info
*fs_info
;
594 u64 end
= offset
+ PAGE_SIZE
- 1;
595 struct btrfs_root
*local_root
;
599 key
.type
= BTRFS_ROOT_ITEM_KEY
;
600 key
.offset
= (u64
)-1;
602 fs_info
= fixup
->root
->fs_info
;
603 srcu_index
= srcu_read_lock(&fs_info
->subvol_srcu
);
605 local_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
606 if (IS_ERR(local_root
)) {
607 srcu_read_unlock(&fs_info
->subvol_srcu
, srcu_index
);
608 return PTR_ERR(local_root
);
611 key
.type
= BTRFS_INODE_ITEM_KEY
;
614 inode
= btrfs_iget(fs_info
->sb
, &key
, local_root
, NULL
);
615 srcu_read_unlock(&fs_info
->subvol_srcu
, srcu_index
);
617 return PTR_ERR(inode
);
619 index
= offset
>> PAGE_CACHE_SHIFT
;
621 page
= find_or_create_page(inode
->i_mapping
, index
, GFP_NOFS
);
627 if (PageUptodate(page
)) {
628 if (PageDirty(page
)) {
630 * we need to write the data to the defect sector. the
631 * data that was in that sector is not in memory,
632 * because the page was modified. we must not write the
633 * modified page to that sector.
635 * TODO: what could be done here: wait for the delalloc
636 * runner to write out that page (might involve
637 * COW) and see whether the sector is still
638 * referenced afterwards.
640 * For the meantime, we'll treat this error
641 * incorrectable, although there is a chance that a
642 * later scrub will find the bad sector again and that
643 * there's no dirty page in memory, then.
648 fs_info
= BTRFS_I(inode
)->root
->fs_info
;
649 ret
= repair_io_failure(fs_info
, offset
, PAGE_SIZE
,
650 fixup
->logical
, page
,
656 * we need to get good data first. the general readpage path
657 * will call repair_io_failure for us, we just have to make
658 * sure we read the bad mirror.
660 ret
= set_extent_bits(&BTRFS_I(inode
)->io_tree
, offset
, end
,
661 EXTENT_DAMAGED
, GFP_NOFS
);
663 /* set_extent_bits should give proper error */
670 ret
= extent_read_full_page(&BTRFS_I(inode
)->io_tree
, page
,
673 wait_on_page_locked(page
);
675 corrected
= !test_range_bit(&BTRFS_I(inode
)->io_tree
, offset
,
676 end
, EXTENT_DAMAGED
, 0, NULL
);
678 clear_extent_bits(&BTRFS_I(inode
)->io_tree
, offset
, end
,
679 EXTENT_DAMAGED
, GFP_NOFS
);
691 if (ret
== 0 && corrected
) {
693 * we only need to call readpage for one of the inodes belonging
694 * to this extent. so make iterate_extent_inodes stop
702 static void scrub_fixup_nodatasum(struct btrfs_work
*work
)
705 struct scrub_fixup_nodatasum
*fixup
;
706 struct scrub_ctx
*sctx
;
707 struct btrfs_trans_handle
*trans
= NULL
;
708 struct btrfs_fs_info
*fs_info
;
709 struct btrfs_path
*path
;
710 int uncorrectable
= 0;
712 fixup
= container_of(work
, struct scrub_fixup_nodatasum
, work
);
714 fs_info
= fixup
->root
->fs_info
;
716 path
= btrfs_alloc_path();
718 spin_lock(&sctx
->stat_lock
);
719 ++sctx
->stat
.malloc_errors
;
720 spin_unlock(&sctx
->stat_lock
);
725 trans
= btrfs_join_transaction(fixup
->root
);
732 * the idea is to trigger a regular read through the standard path. we
733 * read a page from the (failed) logical address by specifying the
734 * corresponding copynum of the failed sector. thus, that readpage is
736 * that is the point where on-the-fly error correction will kick in
737 * (once it's finished) and rewrite the failed sector if a good copy
740 ret
= iterate_inodes_from_logical(fixup
->logical
, fixup
->root
->fs_info
,
741 path
, scrub_fixup_readpage
,
749 spin_lock(&sctx
->stat_lock
);
750 ++sctx
->stat
.corrected_errors
;
751 spin_unlock(&sctx
->stat_lock
);
754 if (trans
&& !IS_ERR(trans
))
755 btrfs_end_transaction(trans
, fixup
->root
);
757 spin_lock(&sctx
->stat_lock
);
758 ++sctx
->stat
.uncorrectable_errors
;
759 spin_unlock(&sctx
->stat_lock
);
760 btrfs_dev_replace_stats_inc(
761 &sctx
->dev_root
->fs_info
->dev_replace
.
762 num_uncorrectable_read_errors
);
763 printk_ratelimited_in_rcu(KERN_ERR
764 "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n",
765 fixup
->logical
, rcu_str_deref(fixup
->dev
->name
));
768 btrfs_free_path(path
);
771 scrub_pending_trans_workers_dec(sctx
);
775 * scrub_handle_errored_block gets called when either verification of the
776 * pages failed or the bio failed to read, e.g. with EIO. In the latter
777 * case, this function handles all pages in the bio, even though only one
779 * The goal of this function is to repair the errored block by using the
780 * contents of one of the mirrors.
782 static int scrub_handle_errored_block(struct scrub_block
*sblock_to_check
)
784 struct scrub_ctx
*sctx
= sblock_to_check
->sctx
;
785 struct btrfs_device
*dev
;
786 struct btrfs_fs_info
*fs_info
;
790 unsigned int failed_mirror_index
;
791 unsigned int is_metadata
;
792 unsigned int have_csum
;
794 struct scrub_block
*sblocks_for_recheck
; /* holds one for each mirror */
795 struct scrub_block
*sblock_bad
;
800 static DEFINE_RATELIMIT_STATE(_rs
, DEFAULT_RATELIMIT_INTERVAL
,
801 DEFAULT_RATELIMIT_BURST
);
803 BUG_ON(sblock_to_check
->page_count
< 1);
804 fs_info
= sctx
->dev_root
->fs_info
;
805 if (sblock_to_check
->pagev
[0]->flags
& BTRFS_EXTENT_FLAG_SUPER
) {
807 * if we find an error in a super block, we just report it.
808 * They will get written with the next transaction commit
811 spin_lock(&sctx
->stat_lock
);
812 ++sctx
->stat
.super_errors
;
813 spin_unlock(&sctx
->stat_lock
);
816 length
= sblock_to_check
->page_count
* PAGE_SIZE
;
817 logical
= sblock_to_check
->pagev
[0]->logical
;
818 generation
= sblock_to_check
->pagev
[0]->generation
;
819 BUG_ON(sblock_to_check
->pagev
[0]->mirror_num
< 1);
820 failed_mirror_index
= sblock_to_check
->pagev
[0]->mirror_num
- 1;
821 is_metadata
= !(sblock_to_check
->pagev
[0]->flags
&
822 BTRFS_EXTENT_FLAG_DATA
);
823 have_csum
= sblock_to_check
->pagev
[0]->have_csum
;
824 csum
= sblock_to_check
->pagev
[0]->csum
;
825 dev
= sblock_to_check
->pagev
[0]->dev
;
827 if (sctx
->is_dev_replace
&& !is_metadata
&& !have_csum
) {
828 sblocks_for_recheck
= NULL
;
833 * read all mirrors one after the other. This includes to
834 * re-read the extent or metadata block that failed (that was
835 * the cause that this fixup code is called) another time,
836 * page by page this time in order to know which pages
837 * caused I/O errors and which ones are good (for all mirrors).
838 * It is the goal to handle the situation when more than one
839 * mirror contains I/O errors, but the errors do not
840 * overlap, i.e. the data can be repaired by selecting the
841 * pages from those mirrors without I/O error on the
842 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
843 * would be that mirror #1 has an I/O error on the first page,
844 * the second page is good, and mirror #2 has an I/O error on
845 * the second page, but the first page is good.
846 * Then the first page of the first mirror can be repaired by
847 * taking the first page of the second mirror, and the
848 * second page of the second mirror can be repaired by
849 * copying the contents of the 2nd page of the 1st mirror.
850 * One more note: if the pages of one mirror contain I/O
851 * errors, the checksum cannot be verified. In order to get
852 * the best data for repairing, the first attempt is to find
853 * a mirror without I/O errors and with a validated checksum.
854 * Only if this is not possible, the pages are picked from
855 * mirrors with I/O errors without considering the checksum.
856 * If the latter is the case, at the end, the checksum of the
857 * repaired area is verified in order to correctly maintain
861 sblocks_for_recheck
= kzalloc(BTRFS_MAX_MIRRORS
*
862 sizeof(*sblocks_for_recheck
),
864 if (!sblocks_for_recheck
) {
865 spin_lock(&sctx
->stat_lock
);
866 sctx
->stat
.malloc_errors
++;
867 sctx
->stat
.read_errors
++;
868 sctx
->stat
.uncorrectable_errors
++;
869 spin_unlock(&sctx
->stat_lock
);
870 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
874 /* setup the context, map the logical blocks and alloc the pages */
875 ret
= scrub_setup_recheck_block(sctx
, fs_info
, sblock_to_check
, length
,
876 logical
, sblocks_for_recheck
);
878 spin_lock(&sctx
->stat_lock
);
879 sctx
->stat
.read_errors
++;
880 sctx
->stat
.uncorrectable_errors
++;
881 spin_unlock(&sctx
->stat_lock
);
882 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
885 BUG_ON(failed_mirror_index
>= BTRFS_MAX_MIRRORS
);
886 sblock_bad
= sblocks_for_recheck
+ failed_mirror_index
;
888 /* build and submit the bios for the failed mirror, check checksums */
889 scrub_recheck_block(fs_info
, sblock_bad
, is_metadata
, have_csum
,
890 csum
, generation
, sctx
->csum_size
);
892 if (!sblock_bad
->header_error
&& !sblock_bad
->checksum_error
&&
893 sblock_bad
->no_io_error_seen
) {
895 * the error disappeared after reading page by page, or
896 * the area was part of a huge bio and other parts of the
897 * bio caused I/O errors, or the block layer merged several
898 * read requests into one and the error is caused by a
899 * different bio (usually one of the two latter cases is
902 spin_lock(&sctx
->stat_lock
);
903 sctx
->stat
.unverified_errors
++;
904 spin_unlock(&sctx
->stat_lock
);
906 if (sctx
->is_dev_replace
)
907 scrub_write_block_to_dev_replace(sblock_bad
);
911 if (!sblock_bad
->no_io_error_seen
) {
912 spin_lock(&sctx
->stat_lock
);
913 sctx
->stat
.read_errors
++;
914 spin_unlock(&sctx
->stat_lock
);
915 if (__ratelimit(&_rs
))
916 scrub_print_warning("i/o error", sblock_to_check
);
917 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
918 } else if (sblock_bad
->checksum_error
) {
919 spin_lock(&sctx
->stat_lock
);
920 sctx
->stat
.csum_errors
++;
921 spin_unlock(&sctx
->stat_lock
);
922 if (__ratelimit(&_rs
))
923 scrub_print_warning("checksum error", sblock_to_check
);
924 btrfs_dev_stat_inc_and_print(dev
,
925 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
926 } else if (sblock_bad
->header_error
) {
927 spin_lock(&sctx
->stat_lock
);
928 sctx
->stat
.verify_errors
++;
929 spin_unlock(&sctx
->stat_lock
);
930 if (__ratelimit(&_rs
))
931 scrub_print_warning("checksum/header error",
933 if (sblock_bad
->generation_error
)
934 btrfs_dev_stat_inc_and_print(dev
,
935 BTRFS_DEV_STAT_GENERATION_ERRS
);
937 btrfs_dev_stat_inc_and_print(dev
,
938 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
941 if (sctx
->readonly
) {
942 ASSERT(!sctx
->is_dev_replace
);
946 if (!is_metadata
&& !have_csum
) {
947 struct scrub_fixup_nodatasum
*fixup_nodatasum
;
950 WARN_ON(sctx
->is_dev_replace
);
953 * !is_metadata and !have_csum, this means that the data
954 * might not be COW'ed, that it might be modified
955 * concurrently. The general strategy to work on the
956 * commit root does not help in the case when COW is not
959 fixup_nodatasum
= kzalloc(sizeof(*fixup_nodatasum
), GFP_NOFS
);
960 if (!fixup_nodatasum
)
961 goto did_not_correct_error
;
962 fixup_nodatasum
->sctx
= sctx
;
963 fixup_nodatasum
->dev
= dev
;
964 fixup_nodatasum
->logical
= logical
;
965 fixup_nodatasum
->root
= fs_info
->extent_root
;
966 fixup_nodatasum
->mirror_num
= failed_mirror_index
+ 1;
967 scrub_pending_trans_workers_inc(sctx
);
968 fixup_nodatasum
->work
.func
= scrub_fixup_nodatasum
;
969 btrfs_queue_worker(&fs_info
->scrub_workers
,
970 &fixup_nodatasum
->work
);
975 * now build and submit the bios for the other mirrors, check
977 * First try to pick the mirror which is completely without I/O
978 * errors and also does not have a checksum error.
979 * If one is found, and if a checksum is present, the full block
980 * that is known to contain an error is rewritten. Afterwards
981 * the block is known to be corrected.
982 * If a mirror is found which is completely correct, and no
983 * checksum is present, only those pages are rewritten that had
984 * an I/O error in the block to be repaired, since it cannot be
985 * determined, which copy of the other pages is better (and it
986 * could happen otherwise that a correct page would be
987 * overwritten by a bad one).
989 for (mirror_index
= 0;
990 mirror_index
< BTRFS_MAX_MIRRORS
&&
991 sblocks_for_recheck
[mirror_index
].page_count
> 0;
993 struct scrub_block
*sblock_other
;
995 if (mirror_index
== failed_mirror_index
)
997 sblock_other
= sblocks_for_recheck
+ mirror_index
;
999 /* build and submit the bios, check checksums */
1000 scrub_recheck_block(fs_info
, sblock_other
, is_metadata
,
1001 have_csum
, csum
, generation
,
1004 if (!sblock_other
->header_error
&&
1005 !sblock_other
->checksum_error
&&
1006 sblock_other
->no_io_error_seen
) {
1007 if (sctx
->is_dev_replace
) {
1008 scrub_write_block_to_dev_replace(sblock_other
);
1010 int force_write
= is_metadata
|| have_csum
;
1012 ret
= scrub_repair_block_from_good_copy(
1013 sblock_bad
, sblock_other
,
1017 goto corrected_error
;
1022 * for dev_replace, pick good pages and write to the target device.
1024 if (sctx
->is_dev_replace
) {
1026 for (page_num
= 0; page_num
< sblock_bad
->page_count
;
1031 for (mirror_index
= 0;
1032 mirror_index
< BTRFS_MAX_MIRRORS
&&
1033 sblocks_for_recheck
[mirror_index
].page_count
> 0;
1035 struct scrub_block
*sblock_other
=
1036 sblocks_for_recheck
+ mirror_index
;
1037 struct scrub_page
*page_other
=
1038 sblock_other
->pagev
[page_num
];
1040 if (!page_other
->io_error
) {
1041 ret
= scrub_write_page_to_dev_replace(
1042 sblock_other
, page_num
);
1044 /* succeeded for this page */
1048 btrfs_dev_replace_stats_inc(
1050 fs_info
->dev_replace
.
1058 * did not find a mirror to fetch the page
1059 * from. scrub_write_page_to_dev_replace()
1060 * handles this case (page->io_error), by
1061 * filling the block with zeros before
1062 * submitting the write request
1065 ret
= scrub_write_page_to_dev_replace(
1066 sblock_bad
, page_num
);
1068 btrfs_dev_replace_stats_inc(
1069 &sctx
->dev_root
->fs_info
->
1070 dev_replace
.num_write_errors
);
1078 * for regular scrub, repair those pages that are errored.
1079 * In case of I/O errors in the area that is supposed to be
1080 * repaired, continue by picking good copies of those pages.
1081 * Select the good pages from mirrors to rewrite bad pages from
1082 * the area to fix. Afterwards verify the checksum of the block
1083 * that is supposed to be repaired. This verification step is
1084 * only done for the purpose of statistic counting and for the
1085 * final scrub report, whether errors remain.
1086 * A perfect algorithm could make use of the checksum and try
1087 * all possible combinations of pages from the different mirrors
1088 * until the checksum verification succeeds. For example, when
1089 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1090 * of mirror #2 is readable but the final checksum test fails,
1091 * then the 2nd page of mirror #3 could be tried, whether now
1092 * the final checksum succeedes. But this would be a rare
1093 * exception and is therefore not implemented. At least it is
1094 * avoided that the good copy is overwritten.
1095 * A more useful improvement would be to pick the sectors
1096 * without I/O error based on sector sizes (512 bytes on legacy
1097 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1098 * mirror could be repaired by taking 512 byte of a different
1099 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1100 * area are unreadable.
1103 /* can only fix I/O errors from here on */
1104 if (sblock_bad
->no_io_error_seen
)
1105 goto did_not_correct_error
;
1108 for (page_num
= 0; page_num
< sblock_bad
->page_count
; page_num
++) {
1109 struct scrub_page
*page_bad
= sblock_bad
->pagev
[page_num
];
1111 if (!page_bad
->io_error
)
1114 for (mirror_index
= 0;
1115 mirror_index
< BTRFS_MAX_MIRRORS
&&
1116 sblocks_for_recheck
[mirror_index
].page_count
> 0;
1118 struct scrub_block
*sblock_other
= sblocks_for_recheck
+
1120 struct scrub_page
*page_other
= sblock_other
->pagev
[
1123 if (!page_other
->io_error
) {
1124 ret
= scrub_repair_page_from_good_copy(
1125 sblock_bad
, sblock_other
, page_num
, 0);
1127 page_bad
->io_error
= 0;
1128 break; /* succeeded for this page */
1133 if (page_bad
->io_error
) {
1134 /* did not find a mirror to copy the page from */
1140 if (is_metadata
|| have_csum
) {
1142 * need to verify the checksum now that all
1143 * sectors on disk are repaired (the write
1144 * request for data to be repaired is on its way).
1145 * Just be lazy and use scrub_recheck_block()
1146 * which re-reads the data before the checksum
1147 * is verified, but most likely the data comes out
1148 * of the page cache.
1150 scrub_recheck_block(fs_info
, sblock_bad
,
1151 is_metadata
, have_csum
, csum
,
1152 generation
, sctx
->csum_size
);
1153 if (!sblock_bad
->header_error
&&
1154 !sblock_bad
->checksum_error
&&
1155 sblock_bad
->no_io_error_seen
)
1156 goto corrected_error
;
1158 goto did_not_correct_error
;
1161 spin_lock(&sctx
->stat_lock
);
1162 sctx
->stat
.corrected_errors
++;
1163 spin_unlock(&sctx
->stat_lock
);
1164 printk_ratelimited_in_rcu(KERN_ERR
1165 "btrfs: fixed up error at logical %llu on dev %s\n",
1166 logical
, rcu_str_deref(dev
->name
));
1169 did_not_correct_error
:
1170 spin_lock(&sctx
->stat_lock
);
1171 sctx
->stat
.uncorrectable_errors
++;
1172 spin_unlock(&sctx
->stat_lock
);
1173 printk_ratelimited_in_rcu(KERN_ERR
1174 "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n",
1175 logical
, rcu_str_deref(dev
->name
));
1179 if (sblocks_for_recheck
) {
1180 for (mirror_index
= 0; mirror_index
< BTRFS_MAX_MIRRORS
;
1182 struct scrub_block
*sblock
= sblocks_for_recheck
+
1186 for (page_index
= 0; page_index
< sblock
->page_count
;
1188 sblock
->pagev
[page_index
]->sblock
= NULL
;
1189 scrub_page_put(sblock
->pagev
[page_index
]);
1192 kfree(sblocks_for_recheck
);
1198 static int scrub_setup_recheck_block(struct scrub_ctx
*sctx
,
1199 struct btrfs_fs_info
*fs_info
,
1200 struct scrub_block
*original_sblock
,
1201 u64 length
, u64 logical
,
1202 struct scrub_block
*sblocks_for_recheck
)
1209 * note: the two members ref_count and outstanding_pages
1210 * are not used (and not set) in the blocks that are used for
1211 * the recheck procedure
1215 while (length
> 0) {
1216 u64 sublen
= min_t(u64
, length
, PAGE_SIZE
);
1217 u64 mapped_length
= sublen
;
1218 struct btrfs_bio
*bbio
= NULL
;
1221 * with a length of PAGE_SIZE, each returned stripe
1222 * represents one mirror
1224 ret
= btrfs_map_block(fs_info
, REQ_GET_READ_MIRRORS
, logical
,
1225 &mapped_length
, &bbio
, 0);
1226 if (ret
|| !bbio
|| mapped_length
< sublen
) {
1231 BUG_ON(page_index
>= SCRUB_PAGES_PER_RD_BIO
);
1232 for (mirror_index
= 0; mirror_index
< (int)bbio
->num_stripes
;
1234 struct scrub_block
*sblock
;
1235 struct scrub_page
*page
;
1237 if (mirror_index
>= BTRFS_MAX_MIRRORS
)
1240 sblock
= sblocks_for_recheck
+ mirror_index
;
1241 sblock
->sctx
= sctx
;
1242 page
= kzalloc(sizeof(*page
), GFP_NOFS
);
1245 spin_lock(&sctx
->stat_lock
);
1246 sctx
->stat
.malloc_errors
++;
1247 spin_unlock(&sctx
->stat_lock
);
1251 scrub_page_get(page
);
1252 sblock
->pagev
[page_index
] = page
;
1253 page
->logical
= logical
;
1254 page
->physical
= bbio
->stripes
[mirror_index
].physical
;
1255 BUG_ON(page_index
>= original_sblock
->page_count
);
1256 page
->physical_for_dev_replace
=
1257 original_sblock
->pagev
[page_index
]->
1258 physical_for_dev_replace
;
1259 /* for missing devices, dev->bdev is NULL */
1260 page
->dev
= bbio
->stripes
[mirror_index
].dev
;
1261 page
->mirror_num
= mirror_index
+ 1;
1262 sblock
->page_count
++;
1263 page
->page
= alloc_page(GFP_NOFS
);
1277 * this function will check the on disk data for checksum errors, header
1278 * errors and read I/O errors. If any I/O errors happen, the exact pages
1279 * which are errored are marked as being bad. The goal is to enable scrub
1280 * to take those pages that are not errored from all the mirrors so that
1281 * the pages that are errored in the just handled mirror can be repaired.
1283 static void scrub_recheck_block(struct btrfs_fs_info
*fs_info
,
1284 struct scrub_block
*sblock
, int is_metadata
,
1285 int have_csum
, u8
*csum
, u64 generation
,
1290 sblock
->no_io_error_seen
= 1;
1291 sblock
->header_error
= 0;
1292 sblock
->checksum_error
= 0;
1294 for (page_num
= 0; page_num
< sblock
->page_count
; page_num
++) {
1296 struct scrub_page
*page
= sblock
->pagev
[page_num
];
1297 DECLARE_COMPLETION_ONSTACK(complete
);
1299 if (page
->dev
->bdev
== NULL
) {
1301 sblock
->no_io_error_seen
= 0;
1305 WARN_ON(!page
->page
);
1306 bio
= btrfs_io_bio_alloc(GFP_NOFS
, 1);
1309 sblock
->no_io_error_seen
= 0;
1312 bio
->bi_bdev
= page
->dev
->bdev
;
1313 bio
->bi_sector
= page
->physical
>> 9;
1314 bio
->bi_end_io
= scrub_complete_bio_end_io
;
1315 bio
->bi_private
= &complete
;
1317 bio_add_page(bio
, page
->page
, PAGE_SIZE
, 0);
1318 btrfsic_submit_bio(READ
, bio
);
1320 /* this will also unplug the queue */
1321 wait_for_completion(&complete
);
1323 page
->io_error
= !test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
1324 if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
1325 sblock
->no_io_error_seen
= 0;
1329 if (sblock
->no_io_error_seen
)
1330 scrub_recheck_block_checksum(fs_info
, sblock
, is_metadata
,
1331 have_csum
, csum
, generation
,
1337 static void scrub_recheck_block_checksum(struct btrfs_fs_info
*fs_info
,
1338 struct scrub_block
*sblock
,
1339 int is_metadata
, int have_csum
,
1340 const u8
*csum
, u64 generation
,
1344 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1346 void *mapped_buffer
;
1348 WARN_ON(!sblock
->pagev
[0]->page
);
1350 struct btrfs_header
*h
;
1352 mapped_buffer
= kmap_atomic(sblock
->pagev
[0]->page
);
1353 h
= (struct btrfs_header
*)mapped_buffer
;
1355 if (sblock
->pagev
[0]->logical
!= btrfs_stack_header_bytenr(h
) ||
1356 memcmp(h
->fsid
, fs_info
->fsid
, BTRFS_UUID_SIZE
) ||
1357 memcmp(h
->chunk_tree_uuid
, fs_info
->chunk_tree_uuid
,
1359 sblock
->header_error
= 1;
1360 } else if (generation
!= btrfs_stack_header_generation(h
)) {
1361 sblock
->header_error
= 1;
1362 sblock
->generation_error
= 1;
1369 mapped_buffer
= kmap_atomic(sblock
->pagev
[0]->page
);
1372 for (page_num
= 0;;) {
1373 if (page_num
== 0 && is_metadata
)
1374 crc
= btrfs_csum_data(
1375 ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
,
1376 crc
, PAGE_SIZE
- BTRFS_CSUM_SIZE
);
1378 crc
= btrfs_csum_data(mapped_buffer
, crc
, PAGE_SIZE
);
1380 kunmap_atomic(mapped_buffer
);
1382 if (page_num
>= sblock
->page_count
)
1384 WARN_ON(!sblock
->pagev
[page_num
]->page
);
1386 mapped_buffer
= kmap_atomic(sblock
->pagev
[page_num
]->page
);
1389 btrfs_csum_final(crc
, calculated_csum
);
1390 if (memcmp(calculated_csum
, csum
, csum_size
))
1391 sblock
->checksum_error
= 1;
1394 static void scrub_complete_bio_end_io(struct bio
*bio
, int err
)
1396 complete((struct completion
*)bio
->bi_private
);
1399 static int scrub_repair_block_from_good_copy(struct scrub_block
*sblock_bad
,
1400 struct scrub_block
*sblock_good
,
1406 for (page_num
= 0; page_num
< sblock_bad
->page_count
; page_num
++) {
1409 ret_sub
= scrub_repair_page_from_good_copy(sblock_bad
,
1420 static int scrub_repair_page_from_good_copy(struct scrub_block
*sblock_bad
,
1421 struct scrub_block
*sblock_good
,
1422 int page_num
, int force_write
)
1424 struct scrub_page
*page_bad
= sblock_bad
->pagev
[page_num
];
1425 struct scrub_page
*page_good
= sblock_good
->pagev
[page_num
];
1427 BUG_ON(page_bad
->page
== NULL
);
1428 BUG_ON(page_good
->page
== NULL
);
1429 if (force_write
|| sblock_bad
->header_error
||
1430 sblock_bad
->checksum_error
|| page_bad
->io_error
) {
1433 DECLARE_COMPLETION_ONSTACK(complete
);
1435 if (!page_bad
->dev
->bdev
) {
1436 printk_ratelimited(KERN_WARNING
1437 "btrfs: scrub_repair_page_from_good_copy(bdev == NULL) is unexpected!\n");
1441 bio
= btrfs_io_bio_alloc(GFP_NOFS
, 1);
1444 bio
->bi_bdev
= page_bad
->dev
->bdev
;
1445 bio
->bi_sector
= page_bad
->physical
>> 9;
1446 bio
->bi_end_io
= scrub_complete_bio_end_io
;
1447 bio
->bi_private
= &complete
;
1449 ret
= bio_add_page(bio
, page_good
->page
, PAGE_SIZE
, 0);
1450 if (PAGE_SIZE
!= ret
) {
1454 btrfsic_submit_bio(WRITE
, bio
);
1456 /* this will also unplug the queue */
1457 wait_for_completion(&complete
);
1458 if (!bio_flagged(bio
, BIO_UPTODATE
)) {
1459 btrfs_dev_stat_inc_and_print(page_bad
->dev
,
1460 BTRFS_DEV_STAT_WRITE_ERRS
);
1461 btrfs_dev_replace_stats_inc(
1462 &sblock_bad
->sctx
->dev_root
->fs_info
->
1463 dev_replace
.num_write_errors
);
1473 static void scrub_write_block_to_dev_replace(struct scrub_block
*sblock
)
1477 for (page_num
= 0; page_num
< sblock
->page_count
; page_num
++) {
1480 ret
= scrub_write_page_to_dev_replace(sblock
, page_num
);
1482 btrfs_dev_replace_stats_inc(
1483 &sblock
->sctx
->dev_root
->fs_info
->dev_replace
.
1488 static int scrub_write_page_to_dev_replace(struct scrub_block
*sblock
,
1491 struct scrub_page
*spage
= sblock
->pagev
[page_num
];
1493 BUG_ON(spage
->page
== NULL
);
1494 if (spage
->io_error
) {
1495 void *mapped_buffer
= kmap_atomic(spage
->page
);
1497 memset(mapped_buffer
, 0, PAGE_CACHE_SIZE
);
1498 flush_dcache_page(spage
->page
);
1499 kunmap_atomic(mapped_buffer
);
1501 return scrub_add_page_to_wr_bio(sblock
->sctx
, spage
);
1504 static int scrub_add_page_to_wr_bio(struct scrub_ctx
*sctx
,
1505 struct scrub_page
*spage
)
1507 struct scrub_wr_ctx
*wr_ctx
= &sctx
->wr_ctx
;
1508 struct scrub_bio
*sbio
;
1511 mutex_lock(&wr_ctx
->wr_lock
);
1513 if (!wr_ctx
->wr_curr_bio
) {
1514 wr_ctx
->wr_curr_bio
= kzalloc(sizeof(*wr_ctx
->wr_curr_bio
),
1516 if (!wr_ctx
->wr_curr_bio
) {
1517 mutex_unlock(&wr_ctx
->wr_lock
);
1520 wr_ctx
->wr_curr_bio
->sctx
= sctx
;
1521 wr_ctx
->wr_curr_bio
->page_count
= 0;
1523 sbio
= wr_ctx
->wr_curr_bio
;
1524 if (sbio
->page_count
== 0) {
1527 sbio
->physical
= spage
->physical_for_dev_replace
;
1528 sbio
->logical
= spage
->logical
;
1529 sbio
->dev
= wr_ctx
->tgtdev
;
1532 bio
= btrfs_io_bio_alloc(GFP_NOFS
, wr_ctx
->pages_per_wr_bio
);
1534 mutex_unlock(&wr_ctx
->wr_lock
);
1540 bio
->bi_private
= sbio
;
1541 bio
->bi_end_io
= scrub_wr_bio_end_io
;
1542 bio
->bi_bdev
= sbio
->dev
->bdev
;
1543 bio
->bi_sector
= sbio
->physical
>> 9;
1545 } else if (sbio
->physical
+ sbio
->page_count
* PAGE_SIZE
!=
1546 spage
->physical_for_dev_replace
||
1547 sbio
->logical
+ sbio
->page_count
* PAGE_SIZE
!=
1549 scrub_wr_submit(sctx
);
1553 ret
= bio_add_page(sbio
->bio
, spage
->page
, PAGE_SIZE
, 0);
1554 if (ret
!= PAGE_SIZE
) {
1555 if (sbio
->page_count
< 1) {
1558 mutex_unlock(&wr_ctx
->wr_lock
);
1561 scrub_wr_submit(sctx
);
1565 sbio
->pagev
[sbio
->page_count
] = spage
;
1566 scrub_page_get(spage
);
1568 if (sbio
->page_count
== wr_ctx
->pages_per_wr_bio
)
1569 scrub_wr_submit(sctx
);
1570 mutex_unlock(&wr_ctx
->wr_lock
);
1575 static void scrub_wr_submit(struct scrub_ctx
*sctx
)
1577 struct scrub_wr_ctx
*wr_ctx
= &sctx
->wr_ctx
;
1578 struct scrub_bio
*sbio
;
1580 if (!wr_ctx
->wr_curr_bio
)
1583 sbio
= wr_ctx
->wr_curr_bio
;
1584 wr_ctx
->wr_curr_bio
= NULL
;
1585 WARN_ON(!sbio
->bio
->bi_bdev
);
1586 scrub_pending_bio_inc(sctx
);
1587 /* process all writes in a single worker thread. Then the block layer
1588 * orders the requests before sending them to the driver which
1589 * doubled the write performance on spinning disks when measured
1591 btrfsic_submit_bio(WRITE
, sbio
->bio
);
1594 static void scrub_wr_bio_end_io(struct bio
*bio
, int err
)
1596 struct scrub_bio
*sbio
= bio
->bi_private
;
1597 struct btrfs_fs_info
*fs_info
= sbio
->dev
->dev_root
->fs_info
;
1602 sbio
->work
.func
= scrub_wr_bio_end_io_worker
;
1603 btrfs_queue_worker(&fs_info
->scrub_wr_completion_workers
, &sbio
->work
);
1606 static void scrub_wr_bio_end_io_worker(struct btrfs_work
*work
)
1608 struct scrub_bio
*sbio
= container_of(work
, struct scrub_bio
, work
);
1609 struct scrub_ctx
*sctx
= sbio
->sctx
;
1612 WARN_ON(sbio
->page_count
> SCRUB_PAGES_PER_WR_BIO
);
1614 struct btrfs_dev_replace
*dev_replace
=
1615 &sbio
->sctx
->dev_root
->fs_info
->dev_replace
;
1617 for (i
= 0; i
< sbio
->page_count
; i
++) {
1618 struct scrub_page
*spage
= sbio
->pagev
[i
];
1620 spage
->io_error
= 1;
1621 btrfs_dev_replace_stats_inc(&dev_replace
->
1626 for (i
= 0; i
< sbio
->page_count
; i
++)
1627 scrub_page_put(sbio
->pagev
[i
]);
1631 scrub_pending_bio_dec(sctx
);
1634 static int scrub_checksum(struct scrub_block
*sblock
)
1639 WARN_ON(sblock
->page_count
< 1);
1640 flags
= sblock
->pagev
[0]->flags
;
1642 if (flags
& BTRFS_EXTENT_FLAG_DATA
)
1643 ret
= scrub_checksum_data(sblock
);
1644 else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)
1645 ret
= scrub_checksum_tree_block(sblock
);
1646 else if (flags
& BTRFS_EXTENT_FLAG_SUPER
)
1647 (void)scrub_checksum_super(sblock
);
1651 scrub_handle_errored_block(sblock
);
1656 static int scrub_checksum_data(struct scrub_block
*sblock
)
1658 struct scrub_ctx
*sctx
= sblock
->sctx
;
1659 u8 csum
[BTRFS_CSUM_SIZE
];
1668 BUG_ON(sblock
->page_count
< 1);
1669 if (!sblock
->pagev
[0]->have_csum
)
1672 on_disk_csum
= sblock
->pagev
[0]->csum
;
1673 page
= sblock
->pagev
[0]->page
;
1674 buffer
= kmap_atomic(page
);
1676 len
= sctx
->sectorsize
;
1679 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
1681 crc
= btrfs_csum_data(buffer
, crc
, l
);
1682 kunmap_atomic(buffer
);
1687 BUG_ON(index
>= sblock
->page_count
);
1688 BUG_ON(!sblock
->pagev
[index
]->page
);
1689 page
= sblock
->pagev
[index
]->page
;
1690 buffer
= kmap_atomic(page
);
1693 btrfs_csum_final(crc
, csum
);
1694 if (memcmp(csum
, on_disk_csum
, sctx
->csum_size
))
1700 static int scrub_checksum_tree_block(struct scrub_block
*sblock
)
1702 struct scrub_ctx
*sctx
= sblock
->sctx
;
1703 struct btrfs_header
*h
;
1704 struct btrfs_root
*root
= sctx
->dev_root
;
1705 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1706 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1707 u8 on_disk_csum
[BTRFS_CSUM_SIZE
];
1709 void *mapped_buffer
;
1718 BUG_ON(sblock
->page_count
< 1);
1719 page
= sblock
->pagev
[0]->page
;
1720 mapped_buffer
= kmap_atomic(page
);
1721 h
= (struct btrfs_header
*)mapped_buffer
;
1722 memcpy(on_disk_csum
, h
->csum
, sctx
->csum_size
);
1725 * we don't use the getter functions here, as we
1726 * a) don't have an extent buffer and
1727 * b) the page is already kmapped
1730 if (sblock
->pagev
[0]->logical
!= btrfs_stack_header_bytenr(h
))
1733 if (sblock
->pagev
[0]->generation
!= btrfs_stack_header_generation(h
))
1736 if (memcmp(h
->fsid
, fs_info
->fsid
, BTRFS_UUID_SIZE
))
1739 if (memcmp(h
->chunk_tree_uuid
, fs_info
->chunk_tree_uuid
,
1743 WARN_ON(sctx
->nodesize
!= sctx
->leafsize
);
1744 len
= sctx
->nodesize
- BTRFS_CSUM_SIZE
;
1745 mapped_size
= PAGE_SIZE
- BTRFS_CSUM_SIZE
;
1746 p
= ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
;
1749 u64 l
= min_t(u64
, len
, mapped_size
);
1751 crc
= btrfs_csum_data(p
, crc
, l
);
1752 kunmap_atomic(mapped_buffer
);
1757 BUG_ON(index
>= sblock
->page_count
);
1758 BUG_ON(!sblock
->pagev
[index
]->page
);
1759 page
= sblock
->pagev
[index
]->page
;
1760 mapped_buffer
= kmap_atomic(page
);
1761 mapped_size
= PAGE_SIZE
;
1765 btrfs_csum_final(crc
, calculated_csum
);
1766 if (memcmp(calculated_csum
, on_disk_csum
, sctx
->csum_size
))
1769 return fail
|| crc_fail
;
1772 static int scrub_checksum_super(struct scrub_block
*sblock
)
1774 struct btrfs_super_block
*s
;
1775 struct scrub_ctx
*sctx
= sblock
->sctx
;
1776 struct btrfs_root
*root
= sctx
->dev_root
;
1777 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1778 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1779 u8 on_disk_csum
[BTRFS_CSUM_SIZE
];
1781 void *mapped_buffer
;
1790 BUG_ON(sblock
->page_count
< 1);
1791 page
= sblock
->pagev
[0]->page
;
1792 mapped_buffer
= kmap_atomic(page
);
1793 s
= (struct btrfs_super_block
*)mapped_buffer
;
1794 memcpy(on_disk_csum
, s
->csum
, sctx
->csum_size
);
1796 if (sblock
->pagev
[0]->logical
!= btrfs_super_bytenr(s
))
1799 if (sblock
->pagev
[0]->generation
!= btrfs_super_generation(s
))
1802 if (memcmp(s
->fsid
, fs_info
->fsid
, BTRFS_UUID_SIZE
))
1805 len
= BTRFS_SUPER_INFO_SIZE
- BTRFS_CSUM_SIZE
;
1806 mapped_size
= PAGE_SIZE
- BTRFS_CSUM_SIZE
;
1807 p
= ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
;
1810 u64 l
= min_t(u64
, len
, mapped_size
);
1812 crc
= btrfs_csum_data(p
, crc
, l
);
1813 kunmap_atomic(mapped_buffer
);
1818 BUG_ON(index
>= sblock
->page_count
);
1819 BUG_ON(!sblock
->pagev
[index
]->page
);
1820 page
= sblock
->pagev
[index
]->page
;
1821 mapped_buffer
= kmap_atomic(page
);
1822 mapped_size
= PAGE_SIZE
;
1826 btrfs_csum_final(crc
, calculated_csum
);
1827 if (memcmp(calculated_csum
, on_disk_csum
, sctx
->csum_size
))
1830 if (fail_cor
+ fail_gen
) {
1832 * if we find an error in a super block, we just report it.
1833 * They will get written with the next transaction commit
1836 spin_lock(&sctx
->stat_lock
);
1837 ++sctx
->stat
.super_errors
;
1838 spin_unlock(&sctx
->stat_lock
);
1840 btrfs_dev_stat_inc_and_print(sblock
->pagev
[0]->dev
,
1841 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
1843 btrfs_dev_stat_inc_and_print(sblock
->pagev
[0]->dev
,
1844 BTRFS_DEV_STAT_GENERATION_ERRS
);
1847 return fail_cor
+ fail_gen
;
1850 static void scrub_block_get(struct scrub_block
*sblock
)
1852 atomic_inc(&sblock
->ref_count
);
1855 static void scrub_block_put(struct scrub_block
*sblock
)
1857 if (atomic_dec_and_test(&sblock
->ref_count
)) {
1860 for (i
= 0; i
< sblock
->page_count
; i
++)
1861 scrub_page_put(sblock
->pagev
[i
]);
1866 static void scrub_page_get(struct scrub_page
*spage
)
1868 atomic_inc(&spage
->ref_count
);
1871 static void scrub_page_put(struct scrub_page
*spage
)
1873 if (atomic_dec_and_test(&spage
->ref_count
)) {
1875 __free_page(spage
->page
);
1880 static void scrub_submit(struct scrub_ctx
*sctx
)
1882 struct scrub_bio
*sbio
;
1884 if (sctx
->curr
== -1)
1887 sbio
= sctx
->bios
[sctx
->curr
];
1889 scrub_pending_bio_inc(sctx
);
1891 if (!sbio
->bio
->bi_bdev
) {
1893 * this case should not happen. If btrfs_map_block() is
1894 * wrong, it could happen for dev-replace operations on
1895 * missing devices when no mirrors are available, but in
1896 * this case it should already fail the mount.
1897 * This case is handled correctly (but _very_ slowly).
1899 printk_ratelimited(KERN_WARNING
1900 "btrfs: scrub_submit(bio bdev == NULL) is unexpected!\n");
1901 bio_endio(sbio
->bio
, -EIO
);
1903 btrfsic_submit_bio(READ
, sbio
->bio
);
1907 static int scrub_add_page_to_rd_bio(struct scrub_ctx
*sctx
,
1908 struct scrub_page
*spage
)
1910 struct scrub_block
*sblock
= spage
->sblock
;
1911 struct scrub_bio
*sbio
;
1916 * grab a fresh bio or wait for one to become available
1918 while (sctx
->curr
== -1) {
1919 spin_lock(&sctx
->list_lock
);
1920 sctx
->curr
= sctx
->first_free
;
1921 if (sctx
->curr
!= -1) {
1922 sctx
->first_free
= sctx
->bios
[sctx
->curr
]->next_free
;
1923 sctx
->bios
[sctx
->curr
]->next_free
= -1;
1924 sctx
->bios
[sctx
->curr
]->page_count
= 0;
1925 spin_unlock(&sctx
->list_lock
);
1927 spin_unlock(&sctx
->list_lock
);
1928 wait_event(sctx
->list_wait
, sctx
->first_free
!= -1);
1931 sbio
= sctx
->bios
[sctx
->curr
];
1932 if (sbio
->page_count
== 0) {
1935 sbio
->physical
= spage
->physical
;
1936 sbio
->logical
= spage
->logical
;
1937 sbio
->dev
= spage
->dev
;
1940 bio
= btrfs_io_bio_alloc(GFP_NOFS
, sctx
->pages_per_rd_bio
);
1946 bio
->bi_private
= sbio
;
1947 bio
->bi_end_io
= scrub_bio_end_io
;
1948 bio
->bi_bdev
= sbio
->dev
->bdev
;
1949 bio
->bi_sector
= sbio
->physical
>> 9;
1951 } else if (sbio
->physical
+ sbio
->page_count
* PAGE_SIZE
!=
1953 sbio
->logical
+ sbio
->page_count
* PAGE_SIZE
!=
1955 sbio
->dev
!= spage
->dev
) {
1960 sbio
->pagev
[sbio
->page_count
] = spage
;
1961 ret
= bio_add_page(sbio
->bio
, spage
->page
, PAGE_SIZE
, 0);
1962 if (ret
!= PAGE_SIZE
) {
1963 if (sbio
->page_count
< 1) {
1972 scrub_block_get(sblock
); /* one for the page added to the bio */
1973 atomic_inc(&sblock
->outstanding_pages
);
1975 if (sbio
->page_count
== sctx
->pages_per_rd_bio
)
1981 static int scrub_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
1982 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
1983 u64 gen
, int mirror_num
, u8
*csum
, int force
,
1984 u64 physical_for_dev_replace
)
1986 struct scrub_block
*sblock
;
1989 sblock
= kzalloc(sizeof(*sblock
), GFP_NOFS
);
1991 spin_lock(&sctx
->stat_lock
);
1992 sctx
->stat
.malloc_errors
++;
1993 spin_unlock(&sctx
->stat_lock
);
1997 /* one ref inside this function, plus one for each page added to
1999 atomic_set(&sblock
->ref_count
, 1);
2000 sblock
->sctx
= sctx
;
2001 sblock
->no_io_error_seen
= 1;
2003 for (index
= 0; len
> 0; index
++) {
2004 struct scrub_page
*spage
;
2005 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
2007 spage
= kzalloc(sizeof(*spage
), GFP_NOFS
);
2010 spin_lock(&sctx
->stat_lock
);
2011 sctx
->stat
.malloc_errors
++;
2012 spin_unlock(&sctx
->stat_lock
);
2013 scrub_block_put(sblock
);
2016 BUG_ON(index
>= SCRUB_MAX_PAGES_PER_BLOCK
);
2017 scrub_page_get(spage
);
2018 sblock
->pagev
[index
] = spage
;
2019 spage
->sblock
= sblock
;
2021 spage
->flags
= flags
;
2022 spage
->generation
= gen
;
2023 spage
->logical
= logical
;
2024 spage
->physical
= physical
;
2025 spage
->physical_for_dev_replace
= physical_for_dev_replace
;
2026 spage
->mirror_num
= mirror_num
;
2028 spage
->have_csum
= 1;
2029 memcpy(spage
->csum
, csum
, sctx
->csum_size
);
2031 spage
->have_csum
= 0;
2033 sblock
->page_count
++;
2034 spage
->page
= alloc_page(GFP_NOFS
);
2040 physical_for_dev_replace
+= l
;
2043 WARN_ON(sblock
->page_count
== 0);
2044 for (index
= 0; index
< sblock
->page_count
; index
++) {
2045 struct scrub_page
*spage
= sblock
->pagev
[index
];
2048 ret
= scrub_add_page_to_rd_bio(sctx
, spage
);
2050 scrub_block_put(sblock
);
2058 /* last one frees, either here or in bio completion for last page */
2059 scrub_block_put(sblock
);
2063 static void scrub_bio_end_io(struct bio
*bio
, int err
)
2065 struct scrub_bio
*sbio
= bio
->bi_private
;
2066 struct btrfs_fs_info
*fs_info
= sbio
->dev
->dev_root
->fs_info
;
2071 btrfs_queue_worker(&fs_info
->scrub_workers
, &sbio
->work
);
2074 static void scrub_bio_end_io_worker(struct btrfs_work
*work
)
2076 struct scrub_bio
*sbio
= container_of(work
, struct scrub_bio
, work
);
2077 struct scrub_ctx
*sctx
= sbio
->sctx
;
2080 BUG_ON(sbio
->page_count
> SCRUB_PAGES_PER_RD_BIO
);
2082 for (i
= 0; i
< sbio
->page_count
; i
++) {
2083 struct scrub_page
*spage
= sbio
->pagev
[i
];
2085 spage
->io_error
= 1;
2086 spage
->sblock
->no_io_error_seen
= 0;
2090 /* now complete the scrub_block items that have all pages completed */
2091 for (i
= 0; i
< sbio
->page_count
; i
++) {
2092 struct scrub_page
*spage
= sbio
->pagev
[i
];
2093 struct scrub_block
*sblock
= spage
->sblock
;
2095 if (atomic_dec_and_test(&sblock
->outstanding_pages
))
2096 scrub_block_complete(sblock
);
2097 scrub_block_put(sblock
);
2102 spin_lock(&sctx
->list_lock
);
2103 sbio
->next_free
= sctx
->first_free
;
2104 sctx
->first_free
= sbio
->index
;
2105 spin_unlock(&sctx
->list_lock
);
2107 if (sctx
->is_dev_replace
&&
2108 atomic_read(&sctx
->wr_ctx
.flush_all_writes
)) {
2109 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
2110 scrub_wr_submit(sctx
);
2111 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
2114 scrub_pending_bio_dec(sctx
);
2117 static void scrub_block_complete(struct scrub_block
*sblock
)
2119 if (!sblock
->no_io_error_seen
) {
2120 scrub_handle_errored_block(sblock
);
2123 * if has checksum error, write via repair mechanism in
2124 * dev replace case, otherwise write here in dev replace
2127 if (!scrub_checksum(sblock
) && sblock
->sctx
->is_dev_replace
)
2128 scrub_write_block_to_dev_replace(sblock
);
2132 static int scrub_find_csum(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
2135 struct btrfs_ordered_sum
*sum
= NULL
;
2136 unsigned long index
;
2137 unsigned long num_sectors
;
2139 while (!list_empty(&sctx
->csum_list
)) {
2140 sum
= list_first_entry(&sctx
->csum_list
,
2141 struct btrfs_ordered_sum
, list
);
2142 if (sum
->bytenr
> logical
)
2144 if (sum
->bytenr
+ sum
->len
> logical
)
2147 ++sctx
->stat
.csum_discards
;
2148 list_del(&sum
->list
);
2155 index
= ((u32
)(logical
- sum
->bytenr
)) / sctx
->sectorsize
;
2156 num_sectors
= sum
->len
/ sctx
->sectorsize
;
2157 memcpy(csum
, sum
->sums
+ index
, sctx
->csum_size
);
2158 if (index
== num_sectors
- 1) {
2159 list_del(&sum
->list
);
2165 /* scrub extent tries to collect up to 64 kB for each bio */
2166 static int scrub_extent(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
2167 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
2168 u64 gen
, int mirror_num
, u64 physical_for_dev_replace
)
2171 u8 csum
[BTRFS_CSUM_SIZE
];
2174 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2175 blocksize
= sctx
->sectorsize
;
2176 spin_lock(&sctx
->stat_lock
);
2177 sctx
->stat
.data_extents_scrubbed
++;
2178 sctx
->stat
.data_bytes_scrubbed
+= len
;
2179 spin_unlock(&sctx
->stat_lock
);
2180 } else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
2181 WARN_ON(sctx
->nodesize
!= sctx
->leafsize
);
2182 blocksize
= sctx
->nodesize
;
2183 spin_lock(&sctx
->stat_lock
);
2184 sctx
->stat
.tree_extents_scrubbed
++;
2185 sctx
->stat
.tree_bytes_scrubbed
+= len
;
2186 spin_unlock(&sctx
->stat_lock
);
2188 blocksize
= sctx
->sectorsize
;
2193 u64 l
= min_t(u64
, len
, blocksize
);
2196 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2197 /* push csums to sbio */
2198 have_csum
= scrub_find_csum(sctx
, logical
, l
, csum
);
2200 ++sctx
->stat
.no_csum
;
2201 if (sctx
->is_dev_replace
&& !have_csum
) {
2202 ret
= copy_nocow_pages(sctx
, logical
, l
,
2204 physical_for_dev_replace
);
2205 goto behind_scrub_pages
;
2208 ret
= scrub_pages(sctx
, logical
, l
, physical
, dev
, flags
, gen
,
2209 mirror_num
, have_csum
? csum
: NULL
, 0,
2210 physical_for_dev_replace
);
2217 physical_for_dev_replace
+= l
;
2222 static noinline_for_stack
int scrub_stripe(struct scrub_ctx
*sctx
,
2223 struct map_lookup
*map
,
2224 struct btrfs_device
*scrub_dev
,
2225 int num
, u64 base
, u64 length
,
2228 struct btrfs_path
*path
;
2229 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
2230 struct btrfs_root
*root
= fs_info
->extent_root
;
2231 struct btrfs_root
*csum_root
= fs_info
->csum_root
;
2232 struct btrfs_extent_item
*extent
;
2233 struct blk_plug plug
;
2238 struct extent_buffer
*l
;
2239 struct btrfs_key key
;
2245 struct reada_control
*reada1
;
2246 struct reada_control
*reada2
;
2247 struct btrfs_key key_start
;
2248 struct btrfs_key key_end
;
2249 u64 increment
= map
->stripe_len
;
2252 u64 extent_physical
;
2254 struct btrfs_device
*extent_dev
;
2255 int extent_mirror_num
;
2258 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
|
2259 BTRFS_BLOCK_GROUP_RAID6
)) {
2260 if (num
>= nr_data_stripes(map
)) {
2267 do_div(nstripes
, map
->stripe_len
);
2268 if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
2269 offset
= map
->stripe_len
* num
;
2270 increment
= map
->stripe_len
* map
->num_stripes
;
2272 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
2273 int factor
= map
->num_stripes
/ map
->sub_stripes
;
2274 offset
= map
->stripe_len
* (num
/ map
->sub_stripes
);
2275 increment
= map
->stripe_len
* factor
;
2276 mirror_num
= num
% map
->sub_stripes
+ 1;
2277 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
2278 increment
= map
->stripe_len
;
2279 mirror_num
= num
% map
->num_stripes
+ 1;
2280 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
2281 increment
= map
->stripe_len
;
2282 mirror_num
= num
% map
->num_stripes
+ 1;
2284 increment
= map
->stripe_len
;
2288 path
= btrfs_alloc_path();
2293 * work on commit root. The related disk blocks are static as
2294 * long as COW is applied. This means, it is save to rewrite
2295 * them to repair disk errors without any race conditions
2297 path
->search_commit_root
= 1;
2298 path
->skip_locking
= 1;
2301 * trigger the readahead for extent tree csum tree and wait for
2302 * completion. During readahead, the scrub is officially paused
2303 * to not hold off transaction commits
2305 logical
= base
+ offset
;
2307 wait_event(sctx
->list_wait
,
2308 atomic_read(&sctx
->bios_in_flight
) == 0);
2309 atomic_inc(&fs_info
->scrubs_paused
);
2310 wake_up(&fs_info
->scrub_pause_wait
);
2312 /* FIXME it might be better to start readahead at commit root */
2313 key_start
.objectid
= logical
;
2314 key_start
.type
= BTRFS_EXTENT_ITEM_KEY
;
2315 key_start
.offset
= (u64
)0;
2316 key_end
.objectid
= base
+ offset
+ nstripes
* increment
;
2317 key_end
.type
= BTRFS_METADATA_ITEM_KEY
;
2318 key_end
.offset
= (u64
)-1;
2319 reada1
= btrfs_reada_add(root
, &key_start
, &key_end
);
2321 key_start
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
2322 key_start
.type
= BTRFS_EXTENT_CSUM_KEY
;
2323 key_start
.offset
= logical
;
2324 key_end
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
2325 key_end
.type
= BTRFS_EXTENT_CSUM_KEY
;
2326 key_end
.offset
= base
+ offset
+ nstripes
* increment
;
2327 reada2
= btrfs_reada_add(csum_root
, &key_start
, &key_end
);
2329 if (!IS_ERR(reada1
))
2330 btrfs_reada_wait(reada1
);
2331 if (!IS_ERR(reada2
))
2332 btrfs_reada_wait(reada2
);
2334 mutex_lock(&fs_info
->scrub_lock
);
2335 while (atomic_read(&fs_info
->scrub_pause_req
)) {
2336 mutex_unlock(&fs_info
->scrub_lock
);
2337 wait_event(fs_info
->scrub_pause_wait
,
2338 atomic_read(&fs_info
->scrub_pause_req
) == 0);
2339 mutex_lock(&fs_info
->scrub_lock
);
2341 atomic_dec(&fs_info
->scrubs_paused
);
2342 mutex_unlock(&fs_info
->scrub_lock
);
2343 wake_up(&fs_info
->scrub_pause_wait
);
2346 * collect all data csums for the stripe to avoid seeking during
2347 * the scrub. This might currently (crc32) end up to be about 1MB
2349 blk_start_plug(&plug
);
2352 * now find all extents for each stripe and scrub them
2354 logical
= base
+ offset
;
2355 physical
= map
->stripes
[num
].physical
;
2356 logic_end
= logical
+ increment
* nstripes
;
2358 while (logical
< logic_end
) {
2362 if (atomic_read(&fs_info
->scrub_cancel_req
) ||
2363 atomic_read(&sctx
->cancel_req
)) {
2368 * check to see if we have to pause
2370 if (atomic_read(&fs_info
->scrub_pause_req
)) {
2371 /* push queued extents */
2372 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 1);
2374 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
2375 scrub_wr_submit(sctx
);
2376 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
2377 wait_event(sctx
->list_wait
,
2378 atomic_read(&sctx
->bios_in_flight
) == 0);
2379 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 0);
2380 atomic_inc(&fs_info
->scrubs_paused
);
2381 wake_up(&fs_info
->scrub_pause_wait
);
2382 mutex_lock(&fs_info
->scrub_lock
);
2383 while (atomic_read(&fs_info
->scrub_pause_req
)) {
2384 mutex_unlock(&fs_info
->scrub_lock
);
2385 wait_event(fs_info
->scrub_pause_wait
,
2386 atomic_read(&fs_info
->scrub_pause_req
) == 0);
2387 mutex_lock(&fs_info
->scrub_lock
);
2389 atomic_dec(&fs_info
->scrubs_paused
);
2390 mutex_unlock(&fs_info
->scrub_lock
);
2391 wake_up(&fs_info
->scrub_pause_wait
);
2394 key
.objectid
= logical
;
2395 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
2396 key
.offset
= (u64
)-1;
2398 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2403 ret
= btrfs_previous_item(root
, path
, 0,
2404 BTRFS_EXTENT_ITEM_KEY
);
2408 /* there's no smaller item, so stick with the
2410 btrfs_release_path(path
);
2411 ret
= btrfs_search_slot(NULL
, root
, &key
,
2423 slot
= path
->slots
[0];
2424 if (slot
>= btrfs_header_nritems(l
)) {
2425 ret
= btrfs_next_leaf(root
, path
);
2434 btrfs_item_key_to_cpu(l
, &key
, slot
);
2436 if (key
.type
== BTRFS_METADATA_ITEM_KEY
)
2437 bytes
= root
->leafsize
;
2441 if (key
.objectid
+ bytes
<= logical
)
2444 if (key
.type
!= BTRFS_EXTENT_ITEM_KEY
&&
2445 key
.type
!= BTRFS_METADATA_ITEM_KEY
)
2448 if (key
.objectid
>= logical
+ map
->stripe_len
) {
2449 /* out of this device extent */
2450 if (key
.objectid
>= logic_end
)
2455 extent
= btrfs_item_ptr(l
, slot
,
2456 struct btrfs_extent_item
);
2457 flags
= btrfs_extent_flags(l
, extent
);
2458 generation
= btrfs_extent_generation(l
, extent
);
2460 if (key
.objectid
< logical
&&
2461 (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)) {
2463 "btrfs scrub: tree block %llu spanning "
2464 "stripes, ignored. logical=%llu\n",
2465 key
.objectid
, logical
);
2470 extent_logical
= key
.objectid
;
2474 * trim extent to this stripe
2476 if (extent_logical
< logical
) {
2477 extent_len
-= logical
- extent_logical
;
2478 extent_logical
= logical
;
2480 if (extent_logical
+ extent_len
>
2481 logical
+ map
->stripe_len
) {
2482 extent_len
= logical
+ map
->stripe_len
-
2486 extent_physical
= extent_logical
- logical
+ physical
;
2487 extent_dev
= scrub_dev
;
2488 extent_mirror_num
= mirror_num
;
2490 scrub_remap_extent(fs_info
, extent_logical
,
2491 extent_len
, &extent_physical
,
2493 &extent_mirror_num
);
2495 ret
= btrfs_lookup_csums_range(csum_root
, logical
,
2496 logical
+ map
->stripe_len
- 1,
2497 &sctx
->csum_list
, 1);
2501 ret
= scrub_extent(sctx
, extent_logical
, extent_len
,
2502 extent_physical
, extent_dev
, flags
,
2503 generation
, extent_mirror_num
,
2504 extent_logical
- logical
+ physical
);
2508 scrub_free_csums(sctx
);
2509 if (extent_logical
+ extent_len
<
2510 key
.objectid
+ bytes
) {
2511 logical
+= increment
;
2512 physical
+= map
->stripe_len
;
2514 if (logical
< key
.objectid
+ bytes
) {
2519 if (logical
>= logic_end
) {
2527 btrfs_release_path(path
);
2528 logical
+= increment
;
2529 physical
+= map
->stripe_len
;
2530 spin_lock(&sctx
->stat_lock
);
2532 sctx
->stat
.last_physical
= map
->stripes
[num
].physical
+
2535 sctx
->stat
.last_physical
= physical
;
2536 spin_unlock(&sctx
->stat_lock
);
2541 /* push queued extents */
2543 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
2544 scrub_wr_submit(sctx
);
2545 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
2547 blk_finish_plug(&plug
);
2548 btrfs_free_path(path
);
2549 return ret
< 0 ? ret
: 0;
2552 static noinline_for_stack
int scrub_chunk(struct scrub_ctx
*sctx
,
2553 struct btrfs_device
*scrub_dev
,
2554 u64 chunk_tree
, u64 chunk_objectid
,
2555 u64 chunk_offset
, u64 length
,
2556 u64 dev_offset
, int is_dev_replace
)
2558 struct btrfs_mapping_tree
*map_tree
=
2559 &sctx
->dev_root
->fs_info
->mapping_tree
;
2560 struct map_lookup
*map
;
2561 struct extent_map
*em
;
2565 read_lock(&map_tree
->map_tree
.lock
);
2566 em
= lookup_extent_mapping(&map_tree
->map_tree
, chunk_offset
, 1);
2567 read_unlock(&map_tree
->map_tree
.lock
);
2572 map
= (struct map_lookup
*)em
->bdev
;
2573 if (em
->start
!= chunk_offset
)
2576 if (em
->len
< length
)
2579 for (i
= 0; i
< map
->num_stripes
; ++i
) {
2580 if (map
->stripes
[i
].dev
->bdev
== scrub_dev
->bdev
&&
2581 map
->stripes
[i
].physical
== dev_offset
) {
2582 ret
= scrub_stripe(sctx
, map
, scrub_dev
, i
,
2583 chunk_offset
, length
,
2590 free_extent_map(em
);
2595 static noinline_for_stack
2596 int scrub_enumerate_chunks(struct scrub_ctx
*sctx
,
2597 struct btrfs_device
*scrub_dev
, u64 start
, u64 end
,
2600 struct btrfs_dev_extent
*dev_extent
= NULL
;
2601 struct btrfs_path
*path
;
2602 struct btrfs_root
*root
= sctx
->dev_root
;
2603 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2610 struct extent_buffer
*l
;
2611 struct btrfs_key key
;
2612 struct btrfs_key found_key
;
2613 struct btrfs_block_group_cache
*cache
;
2614 struct btrfs_dev_replace
*dev_replace
= &fs_info
->dev_replace
;
2616 path
= btrfs_alloc_path();
2621 path
->search_commit_root
= 1;
2622 path
->skip_locking
= 1;
2624 key
.objectid
= scrub_dev
->devid
;
2626 key
.type
= BTRFS_DEV_EXTENT_KEY
;
2629 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2633 if (path
->slots
[0] >=
2634 btrfs_header_nritems(path
->nodes
[0])) {
2635 ret
= btrfs_next_leaf(root
, path
);
2642 slot
= path
->slots
[0];
2644 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
2646 if (found_key
.objectid
!= scrub_dev
->devid
)
2649 if (btrfs_key_type(&found_key
) != BTRFS_DEV_EXTENT_KEY
)
2652 if (found_key
.offset
>= end
)
2655 if (found_key
.offset
< key
.offset
)
2658 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
2659 length
= btrfs_dev_extent_length(l
, dev_extent
);
2661 if (found_key
.offset
+ length
<= start
) {
2662 key
.offset
= found_key
.offset
+ length
;
2663 btrfs_release_path(path
);
2667 chunk_tree
= btrfs_dev_extent_chunk_tree(l
, dev_extent
);
2668 chunk_objectid
= btrfs_dev_extent_chunk_objectid(l
, dev_extent
);
2669 chunk_offset
= btrfs_dev_extent_chunk_offset(l
, dev_extent
);
2672 * get a reference on the corresponding block group to prevent
2673 * the chunk from going away while we scrub it
2675 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
2680 dev_replace
->cursor_right
= found_key
.offset
+ length
;
2681 dev_replace
->cursor_left
= found_key
.offset
;
2682 dev_replace
->item_needs_writeback
= 1;
2683 ret
= scrub_chunk(sctx
, scrub_dev
, chunk_tree
, chunk_objectid
,
2684 chunk_offset
, length
, found_key
.offset
,
2688 * flush, submit all pending read and write bios, afterwards
2690 * Note that in the dev replace case, a read request causes
2691 * write requests that are submitted in the read completion
2692 * worker. Therefore in the current situation, it is required
2693 * that all write requests are flushed, so that all read and
2694 * write requests are really completed when bios_in_flight
2697 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 1);
2699 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
2700 scrub_wr_submit(sctx
);
2701 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
2703 wait_event(sctx
->list_wait
,
2704 atomic_read(&sctx
->bios_in_flight
) == 0);
2705 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 0);
2706 atomic_inc(&fs_info
->scrubs_paused
);
2707 wake_up(&fs_info
->scrub_pause_wait
);
2708 wait_event(sctx
->list_wait
,
2709 atomic_read(&sctx
->workers_pending
) == 0);
2711 mutex_lock(&fs_info
->scrub_lock
);
2712 while (atomic_read(&fs_info
->scrub_pause_req
)) {
2713 mutex_unlock(&fs_info
->scrub_lock
);
2714 wait_event(fs_info
->scrub_pause_wait
,
2715 atomic_read(&fs_info
->scrub_pause_req
) == 0);
2716 mutex_lock(&fs_info
->scrub_lock
);
2718 atomic_dec(&fs_info
->scrubs_paused
);
2719 mutex_unlock(&fs_info
->scrub_lock
);
2720 wake_up(&fs_info
->scrub_pause_wait
);
2722 btrfs_put_block_group(cache
);
2725 if (is_dev_replace
&&
2726 atomic64_read(&dev_replace
->num_write_errors
) > 0) {
2730 if (sctx
->stat
.malloc_errors
> 0) {
2735 dev_replace
->cursor_left
= dev_replace
->cursor_right
;
2736 dev_replace
->item_needs_writeback
= 1;
2738 key
.offset
= found_key
.offset
+ length
;
2739 btrfs_release_path(path
);
2742 btrfs_free_path(path
);
2745 * ret can still be 1 from search_slot or next_leaf,
2746 * that's not an error
2748 return ret
< 0 ? ret
: 0;
2751 static noinline_for_stack
int scrub_supers(struct scrub_ctx
*sctx
,
2752 struct btrfs_device
*scrub_dev
)
2758 struct btrfs_root
*root
= sctx
->dev_root
;
2760 if (test_bit(BTRFS_FS_STATE_ERROR
, &root
->fs_info
->fs_state
))
2763 gen
= root
->fs_info
->last_trans_committed
;
2765 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
2766 bytenr
= btrfs_sb_offset(i
);
2767 if (bytenr
+ BTRFS_SUPER_INFO_SIZE
> scrub_dev
->total_bytes
)
2770 ret
= scrub_pages(sctx
, bytenr
, BTRFS_SUPER_INFO_SIZE
, bytenr
,
2771 scrub_dev
, BTRFS_EXTENT_FLAG_SUPER
, gen
, i
,
2776 wait_event(sctx
->list_wait
, atomic_read(&sctx
->bios_in_flight
) == 0);
2782 * get a reference count on fs_info->scrub_workers. start worker if necessary
2784 static noinline_for_stack
int scrub_workers_get(struct btrfs_fs_info
*fs_info
,
2789 if (fs_info
->scrub_workers_refcnt
== 0) {
2791 btrfs_init_workers(&fs_info
->scrub_workers
, "scrub", 1,
2792 &fs_info
->generic_worker
);
2794 btrfs_init_workers(&fs_info
->scrub_workers
, "scrub",
2795 fs_info
->thread_pool_size
,
2796 &fs_info
->generic_worker
);
2797 fs_info
->scrub_workers
.idle_thresh
= 4;
2798 ret
= btrfs_start_workers(&fs_info
->scrub_workers
);
2801 btrfs_init_workers(&fs_info
->scrub_wr_completion_workers
,
2803 fs_info
->thread_pool_size
,
2804 &fs_info
->generic_worker
);
2805 fs_info
->scrub_wr_completion_workers
.idle_thresh
= 2;
2806 ret
= btrfs_start_workers(
2807 &fs_info
->scrub_wr_completion_workers
);
2810 btrfs_init_workers(&fs_info
->scrub_nocow_workers
, "scrubnc", 1,
2811 &fs_info
->generic_worker
);
2812 ret
= btrfs_start_workers(&fs_info
->scrub_nocow_workers
);
2816 ++fs_info
->scrub_workers_refcnt
;
2821 static noinline_for_stack
void scrub_workers_put(struct btrfs_fs_info
*fs_info
)
2823 if (--fs_info
->scrub_workers_refcnt
== 0) {
2824 btrfs_stop_workers(&fs_info
->scrub_workers
);
2825 btrfs_stop_workers(&fs_info
->scrub_wr_completion_workers
);
2826 btrfs_stop_workers(&fs_info
->scrub_nocow_workers
);
2828 WARN_ON(fs_info
->scrub_workers_refcnt
< 0);
2831 int btrfs_scrub_dev(struct btrfs_fs_info
*fs_info
, u64 devid
, u64 start
,
2832 u64 end
, struct btrfs_scrub_progress
*progress
,
2833 int readonly
, int is_dev_replace
)
2835 struct scrub_ctx
*sctx
;
2837 struct btrfs_device
*dev
;
2839 if (btrfs_fs_closing(fs_info
))
2843 * check some assumptions
2845 if (fs_info
->chunk_root
->nodesize
!= fs_info
->chunk_root
->leafsize
) {
2847 "btrfs_scrub: size assumption nodesize == leafsize (%d == %d) fails\n",
2848 fs_info
->chunk_root
->nodesize
,
2849 fs_info
->chunk_root
->leafsize
);
2853 if (fs_info
->chunk_root
->nodesize
> BTRFS_STRIPE_LEN
) {
2855 * in this case scrub is unable to calculate the checksum
2856 * the way scrub is implemented. Do not handle this
2857 * situation at all because it won't ever happen.
2860 "btrfs_scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails\n",
2861 fs_info
->chunk_root
->nodesize
, BTRFS_STRIPE_LEN
);
2865 if (fs_info
->chunk_root
->sectorsize
!= PAGE_SIZE
) {
2866 /* not supported for data w/o checksums */
2868 "btrfs_scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails\n",
2869 fs_info
->chunk_root
->sectorsize
, PAGE_SIZE
);
2873 if (fs_info
->chunk_root
->nodesize
>
2874 PAGE_SIZE
* SCRUB_MAX_PAGES_PER_BLOCK
||
2875 fs_info
->chunk_root
->sectorsize
>
2876 PAGE_SIZE
* SCRUB_MAX_PAGES_PER_BLOCK
) {
2878 * would exhaust the array bounds of pagev member in
2879 * struct scrub_block
2881 pr_err("btrfs_scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails\n",
2882 fs_info
->chunk_root
->nodesize
,
2883 SCRUB_MAX_PAGES_PER_BLOCK
,
2884 fs_info
->chunk_root
->sectorsize
,
2885 SCRUB_MAX_PAGES_PER_BLOCK
);
2890 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
2891 dev
= btrfs_find_device(fs_info
, devid
, NULL
, NULL
);
2892 if (!dev
|| (dev
->missing
&& !is_dev_replace
)) {
2893 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2897 mutex_lock(&fs_info
->scrub_lock
);
2898 if (!dev
->in_fs_metadata
|| dev
->is_tgtdev_for_dev_replace
) {
2899 mutex_unlock(&fs_info
->scrub_lock
);
2900 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2904 btrfs_dev_replace_lock(&fs_info
->dev_replace
);
2905 if (dev
->scrub_device
||
2907 btrfs_dev_replace_is_ongoing(&fs_info
->dev_replace
))) {
2908 btrfs_dev_replace_unlock(&fs_info
->dev_replace
);
2909 mutex_unlock(&fs_info
->scrub_lock
);
2910 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2911 return -EINPROGRESS
;
2913 btrfs_dev_replace_unlock(&fs_info
->dev_replace
);
2915 ret
= scrub_workers_get(fs_info
, is_dev_replace
);
2917 mutex_unlock(&fs_info
->scrub_lock
);
2918 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2922 sctx
= scrub_setup_ctx(dev
, is_dev_replace
);
2924 mutex_unlock(&fs_info
->scrub_lock
);
2925 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2926 scrub_workers_put(fs_info
);
2927 return PTR_ERR(sctx
);
2929 sctx
->readonly
= readonly
;
2930 dev
->scrub_device
= sctx
;
2932 atomic_inc(&fs_info
->scrubs_running
);
2933 mutex_unlock(&fs_info
->scrub_lock
);
2935 if (!is_dev_replace
) {
2937 * by holding device list mutex, we can
2938 * kick off writing super in log tree sync.
2940 ret
= scrub_supers(sctx
, dev
);
2942 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2945 ret
= scrub_enumerate_chunks(sctx
, dev
, start
, end
,
2948 wait_event(sctx
->list_wait
, atomic_read(&sctx
->bios_in_flight
) == 0);
2949 atomic_dec(&fs_info
->scrubs_running
);
2950 wake_up(&fs_info
->scrub_pause_wait
);
2952 wait_event(sctx
->list_wait
, atomic_read(&sctx
->workers_pending
) == 0);
2955 memcpy(progress
, &sctx
->stat
, sizeof(*progress
));
2957 mutex_lock(&fs_info
->scrub_lock
);
2958 dev
->scrub_device
= NULL
;
2959 scrub_workers_put(fs_info
);
2960 mutex_unlock(&fs_info
->scrub_lock
);
2962 scrub_free_ctx(sctx
);
2967 void btrfs_scrub_pause(struct btrfs_root
*root
)
2969 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2971 mutex_lock(&fs_info
->scrub_lock
);
2972 atomic_inc(&fs_info
->scrub_pause_req
);
2973 while (atomic_read(&fs_info
->scrubs_paused
) !=
2974 atomic_read(&fs_info
->scrubs_running
)) {
2975 mutex_unlock(&fs_info
->scrub_lock
);
2976 wait_event(fs_info
->scrub_pause_wait
,
2977 atomic_read(&fs_info
->scrubs_paused
) ==
2978 atomic_read(&fs_info
->scrubs_running
));
2979 mutex_lock(&fs_info
->scrub_lock
);
2981 mutex_unlock(&fs_info
->scrub_lock
);
2984 void btrfs_scrub_continue(struct btrfs_root
*root
)
2986 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2988 atomic_dec(&fs_info
->scrub_pause_req
);
2989 wake_up(&fs_info
->scrub_pause_wait
);
2992 int btrfs_scrub_cancel(struct btrfs_fs_info
*fs_info
)
2994 mutex_lock(&fs_info
->scrub_lock
);
2995 if (!atomic_read(&fs_info
->scrubs_running
)) {
2996 mutex_unlock(&fs_info
->scrub_lock
);
3000 atomic_inc(&fs_info
->scrub_cancel_req
);
3001 while (atomic_read(&fs_info
->scrubs_running
)) {
3002 mutex_unlock(&fs_info
->scrub_lock
);
3003 wait_event(fs_info
->scrub_pause_wait
,
3004 atomic_read(&fs_info
->scrubs_running
) == 0);
3005 mutex_lock(&fs_info
->scrub_lock
);
3007 atomic_dec(&fs_info
->scrub_cancel_req
);
3008 mutex_unlock(&fs_info
->scrub_lock
);
3013 int btrfs_scrub_cancel_dev(struct btrfs_fs_info
*fs_info
,
3014 struct btrfs_device
*dev
)
3016 struct scrub_ctx
*sctx
;
3018 mutex_lock(&fs_info
->scrub_lock
);
3019 sctx
= dev
->scrub_device
;
3021 mutex_unlock(&fs_info
->scrub_lock
);
3024 atomic_inc(&sctx
->cancel_req
);
3025 while (dev
->scrub_device
) {
3026 mutex_unlock(&fs_info
->scrub_lock
);
3027 wait_event(fs_info
->scrub_pause_wait
,
3028 dev
->scrub_device
== NULL
);
3029 mutex_lock(&fs_info
->scrub_lock
);
3031 mutex_unlock(&fs_info
->scrub_lock
);
3036 int btrfs_scrub_progress(struct btrfs_root
*root
, u64 devid
,
3037 struct btrfs_scrub_progress
*progress
)
3039 struct btrfs_device
*dev
;
3040 struct scrub_ctx
*sctx
= NULL
;
3042 mutex_lock(&root
->fs_info
->fs_devices
->device_list_mutex
);
3043 dev
= btrfs_find_device(root
->fs_info
, devid
, NULL
, NULL
);
3045 sctx
= dev
->scrub_device
;
3047 memcpy(progress
, &sctx
->stat
, sizeof(*progress
));
3048 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
3050 return dev
? (sctx
? 0 : -ENOTCONN
) : -ENODEV
;
3053 static void scrub_remap_extent(struct btrfs_fs_info
*fs_info
,
3054 u64 extent_logical
, u64 extent_len
,
3055 u64
*extent_physical
,
3056 struct btrfs_device
**extent_dev
,
3057 int *extent_mirror_num
)
3060 struct btrfs_bio
*bbio
= NULL
;
3063 mapped_length
= extent_len
;
3064 ret
= btrfs_map_block(fs_info
, READ
, extent_logical
,
3065 &mapped_length
, &bbio
, 0);
3066 if (ret
|| !bbio
|| mapped_length
< extent_len
||
3067 !bbio
->stripes
[0].dev
->bdev
) {
3072 *extent_physical
= bbio
->stripes
[0].physical
;
3073 *extent_mirror_num
= bbio
->mirror_num
;
3074 *extent_dev
= bbio
->stripes
[0].dev
;
3078 static int scrub_setup_wr_ctx(struct scrub_ctx
*sctx
,
3079 struct scrub_wr_ctx
*wr_ctx
,
3080 struct btrfs_fs_info
*fs_info
,
3081 struct btrfs_device
*dev
,
3084 WARN_ON(wr_ctx
->wr_curr_bio
!= NULL
);
3086 mutex_init(&wr_ctx
->wr_lock
);
3087 wr_ctx
->wr_curr_bio
= NULL
;
3088 if (!is_dev_replace
)
3091 WARN_ON(!dev
->bdev
);
3092 wr_ctx
->pages_per_wr_bio
= min_t(int, SCRUB_PAGES_PER_WR_BIO
,
3093 bio_get_nr_vecs(dev
->bdev
));
3094 wr_ctx
->tgtdev
= dev
;
3095 atomic_set(&wr_ctx
->flush_all_writes
, 0);
3099 static void scrub_free_wr_ctx(struct scrub_wr_ctx
*wr_ctx
)
3101 mutex_lock(&wr_ctx
->wr_lock
);
3102 kfree(wr_ctx
->wr_curr_bio
);
3103 wr_ctx
->wr_curr_bio
= NULL
;
3104 mutex_unlock(&wr_ctx
->wr_lock
);
3107 static int copy_nocow_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
3108 int mirror_num
, u64 physical_for_dev_replace
)
3110 struct scrub_copy_nocow_ctx
*nocow_ctx
;
3111 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
3113 nocow_ctx
= kzalloc(sizeof(*nocow_ctx
), GFP_NOFS
);
3115 spin_lock(&sctx
->stat_lock
);
3116 sctx
->stat
.malloc_errors
++;
3117 spin_unlock(&sctx
->stat_lock
);
3121 scrub_pending_trans_workers_inc(sctx
);
3123 nocow_ctx
->sctx
= sctx
;
3124 nocow_ctx
->logical
= logical
;
3125 nocow_ctx
->len
= len
;
3126 nocow_ctx
->mirror_num
= mirror_num
;
3127 nocow_ctx
->physical_for_dev_replace
= physical_for_dev_replace
;
3128 nocow_ctx
->work
.func
= copy_nocow_pages_worker
;
3129 INIT_LIST_HEAD(&nocow_ctx
->inodes
);
3130 btrfs_queue_worker(&fs_info
->scrub_nocow_workers
,
3136 static int record_inode_for_nocow(u64 inum
, u64 offset
, u64 root
, void *ctx
)
3138 struct scrub_copy_nocow_ctx
*nocow_ctx
= ctx
;
3139 struct scrub_nocow_inode
*nocow_inode
;
3141 nocow_inode
= kzalloc(sizeof(*nocow_inode
), GFP_NOFS
);
3144 nocow_inode
->inum
= inum
;
3145 nocow_inode
->offset
= offset
;
3146 nocow_inode
->root
= root
;
3147 list_add_tail(&nocow_inode
->list
, &nocow_ctx
->inodes
);
3151 #define COPY_COMPLETE 1
3153 static void copy_nocow_pages_worker(struct btrfs_work
*work
)
3155 struct scrub_copy_nocow_ctx
*nocow_ctx
=
3156 container_of(work
, struct scrub_copy_nocow_ctx
, work
);
3157 struct scrub_ctx
*sctx
= nocow_ctx
->sctx
;
3158 u64 logical
= nocow_ctx
->logical
;
3159 u64 len
= nocow_ctx
->len
;
3160 int mirror_num
= nocow_ctx
->mirror_num
;
3161 u64 physical_for_dev_replace
= nocow_ctx
->physical_for_dev_replace
;
3163 struct btrfs_trans_handle
*trans
= NULL
;
3164 struct btrfs_fs_info
*fs_info
;
3165 struct btrfs_path
*path
;
3166 struct btrfs_root
*root
;
3167 int not_written
= 0;
3169 fs_info
= sctx
->dev_root
->fs_info
;
3170 root
= fs_info
->extent_root
;
3172 path
= btrfs_alloc_path();
3174 spin_lock(&sctx
->stat_lock
);
3175 sctx
->stat
.malloc_errors
++;
3176 spin_unlock(&sctx
->stat_lock
);
3181 trans
= btrfs_join_transaction(root
);
3182 if (IS_ERR(trans
)) {
3187 ret
= iterate_inodes_from_logical(logical
, fs_info
, path
,
3188 record_inode_for_nocow
, nocow_ctx
);
3189 if (ret
!= 0 && ret
!= -ENOENT
) {
3190 pr_warn("iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d\n",
3191 logical
, physical_for_dev_replace
, len
, mirror_num
,
3197 btrfs_end_transaction(trans
, root
);
3199 while (!list_empty(&nocow_ctx
->inodes
)) {
3200 struct scrub_nocow_inode
*entry
;
3201 entry
= list_first_entry(&nocow_ctx
->inodes
,
3202 struct scrub_nocow_inode
,
3204 list_del_init(&entry
->list
);
3205 ret
= copy_nocow_pages_for_inode(entry
->inum
, entry
->offset
,
3206 entry
->root
, nocow_ctx
);
3208 if (ret
== COPY_COMPLETE
) {
3216 while (!list_empty(&nocow_ctx
->inodes
)) {
3217 struct scrub_nocow_inode
*entry
;
3218 entry
= list_first_entry(&nocow_ctx
->inodes
,
3219 struct scrub_nocow_inode
,
3221 list_del_init(&entry
->list
);
3224 if (trans
&& !IS_ERR(trans
))
3225 btrfs_end_transaction(trans
, root
);
3227 btrfs_dev_replace_stats_inc(&fs_info
->dev_replace
.
3228 num_uncorrectable_read_errors
);
3230 btrfs_free_path(path
);
3233 scrub_pending_trans_workers_dec(sctx
);
3236 static int copy_nocow_pages_for_inode(u64 inum
, u64 offset
, u64 root
,
3237 struct scrub_copy_nocow_ctx
*nocow_ctx
)
3239 struct btrfs_fs_info
*fs_info
= nocow_ctx
->sctx
->dev_root
->fs_info
;
3240 struct btrfs_key key
;
3241 struct inode
*inode
;
3243 struct btrfs_root
*local_root
;
3244 struct btrfs_ordered_extent
*ordered
;
3245 struct extent_map
*em
;
3246 struct extent_state
*cached_state
= NULL
;
3247 struct extent_io_tree
*io_tree
;
3248 u64 physical_for_dev_replace
;
3249 u64 len
= nocow_ctx
->len
;
3250 u64 lockstart
= offset
, lockend
= offset
+ len
- 1;
3251 unsigned long index
;
3256 key
.objectid
= root
;
3257 key
.type
= BTRFS_ROOT_ITEM_KEY
;
3258 key
.offset
= (u64
)-1;
3260 srcu_index
= srcu_read_lock(&fs_info
->subvol_srcu
);
3262 local_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
3263 if (IS_ERR(local_root
)) {
3264 srcu_read_unlock(&fs_info
->subvol_srcu
, srcu_index
);
3265 return PTR_ERR(local_root
);
3268 key
.type
= BTRFS_INODE_ITEM_KEY
;
3269 key
.objectid
= inum
;
3271 inode
= btrfs_iget(fs_info
->sb
, &key
, local_root
, NULL
);
3272 srcu_read_unlock(&fs_info
->subvol_srcu
, srcu_index
);
3274 return PTR_ERR(inode
);
3276 /* Avoid truncate/dio/punch hole.. */
3277 mutex_lock(&inode
->i_mutex
);
3278 inode_dio_wait(inode
);
3280 physical_for_dev_replace
= nocow_ctx
->physical_for_dev_replace
;
3281 io_tree
= &BTRFS_I(inode
)->io_tree
;
3283 lock_extent_bits(io_tree
, lockstart
, lockend
, 0, &cached_state
);
3284 ordered
= btrfs_lookup_ordered_range(inode
, lockstart
, len
);
3286 btrfs_put_ordered_extent(ordered
);
3290 em
= btrfs_get_extent(inode
, NULL
, 0, lockstart
, len
, 0);
3297 * This extent does not actually cover the logical extent anymore,
3298 * move on to the next inode.
3300 if (em
->block_start
> nocow_ctx
->logical
||
3301 em
->block_start
+ em
->block_len
< nocow_ctx
->logical
+ len
) {
3302 free_extent_map(em
);
3305 free_extent_map(em
);
3307 while (len
>= PAGE_CACHE_SIZE
) {
3308 index
= offset
>> PAGE_CACHE_SHIFT
;
3310 page
= find_or_create_page(inode
->i_mapping
, index
, GFP_NOFS
);
3312 pr_err("find_or_create_page() failed\n");
3317 if (PageUptodate(page
)) {
3318 if (PageDirty(page
))
3321 ClearPageError(page
);
3322 err
= extent_read_full_page_nolock(io_tree
, page
,
3324 nocow_ctx
->mirror_num
);
3332 * If the page has been remove from the page cache,
3333 * the data on it is meaningless, because it may be
3334 * old one, the new data may be written into the new
3335 * page in the page cache.
3337 if (page
->mapping
!= inode
->i_mapping
) {
3339 page_cache_release(page
);
3342 if (!PageUptodate(page
)) {
3347 err
= write_page_nocow(nocow_ctx
->sctx
,
3348 physical_for_dev_replace
, page
);
3353 page_cache_release(page
);
3358 offset
+= PAGE_CACHE_SIZE
;
3359 physical_for_dev_replace
+= PAGE_CACHE_SIZE
;
3360 len
-= PAGE_CACHE_SIZE
;
3362 ret
= COPY_COMPLETE
;
3364 unlock_extent_cached(io_tree
, lockstart
, lockend
, &cached_state
,
3367 mutex_unlock(&inode
->i_mutex
);
3372 static int write_page_nocow(struct scrub_ctx
*sctx
,
3373 u64 physical_for_dev_replace
, struct page
*page
)
3376 struct btrfs_device
*dev
;
3378 DECLARE_COMPLETION_ONSTACK(compl);
3380 dev
= sctx
->wr_ctx
.tgtdev
;
3384 printk_ratelimited(KERN_WARNING
3385 "btrfs: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
3388 bio
= btrfs_io_bio_alloc(GFP_NOFS
, 1);
3390 spin_lock(&sctx
->stat_lock
);
3391 sctx
->stat
.malloc_errors
++;
3392 spin_unlock(&sctx
->stat_lock
);
3395 bio
->bi_private
= &compl;
3396 bio
->bi_end_io
= scrub_complete_bio_end_io
;
3398 bio
->bi_sector
= physical_for_dev_replace
>> 9;
3399 bio
->bi_bdev
= dev
->bdev
;
3400 ret
= bio_add_page(bio
, page
, PAGE_CACHE_SIZE
, 0);
3401 if (ret
!= PAGE_CACHE_SIZE
) {
3404 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_WRITE_ERRS
);
3407 btrfsic_submit_bio(WRITE_SYNC
, bio
);
3408 wait_for_completion(&compl);
3410 if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
3411 goto leave_with_eio
;