2 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/blkdev.h>
20 #include <linux/ratelimit.h>
24 #include "ordered-data.h"
25 #include "transaction.h"
27 #include "extent_io.h"
28 #include "dev-replace.h"
29 #include "check-integrity.h"
30 #include "rcu-string.h"
33 * This is only the first step towards a full-features scrub. It reads all
34 * extent and super block and verifies the checksums. In case a bad checksum
35 * is found or the extent cannot be read, good data will be written back if
38 * Future enhancements:
39 * - In case an unrepairable extent is encountered, track which files are
40 * affected and report them
41 * - track and record media errors, throw out bad devices
42 * - add a mode to also read unallocated space
49 * the following three values only influence the performance.
50 * The last one configures the number of parallel and outstanding I/O
51 * operations. The first two values configure an upper limit for the number
52 * of (dynamically allocated) pages that are added to a bio.
54 #define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
55 #define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
56 #define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
59 * the following value times PAGE_SIZE needs to be large enough to match the
60 * largest node/leaf/sector size that shall be supported.
61 * Values larger than BTRFS_STRIPE_LEN are not supported.
63 #define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
66 struct scrub_block
*sblock
;
68 struct btrfs_device
*dev
;
69 u64 flags
; /* extent flags */
73 u64 physical_for_dev_replace
;
76 unsigned int mirror_num
:8;
77 unsigned int have_csum
:1;
78 unsigned int io_error
:1;
80 u8 csum
[BTRFS_CSUM_SIZE
];
85 struct scrub_ctx
*sctx
;
86 struct btrfs_device
*dev
;
91 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
92 struct scrub_page
*pagev
[SCRUB_PAGES_PER_WR_BIO
];
94 struct scrub_page
*pagev
[SCRUB_PAGES_PER_RD_BIO
];
98 struct btrfs_work work
;
102 struct scrub_page
*pagev
[SCRUB_MAX_PAGES_PER_BLOCK
];
104 atomic_t outstanding_pages
;
105 atomic_t ref_count
; /* free mem on transition to zero */
106 struct scrub_ctx
*sctx
;
108 unsigned int header_error
:1;
109 unsigned int checksum_error
:1;
110 unsigned int no_io_error_seen
:1;
111 unsigned int generation_error
:1; /* also sets header_error */
115 struct scrub_wr_ctx
{
116 struct scrub_bio
*wr_curr_bio
;
117 struct btrfs_device
*tgtdev
;
118 int pages_per_wr_bio
; /* <= SCRUB_PAGES_PER_WR_BIO */
119 atomic_t flush_all_writes
;
120 struct mutex wr_lock
;
124 struct scrub_bio
*bios
[SCRUB_BIOS_PER_SCTX
];
125 struct btrfs_root
*dev_root
;
128 atomic_t bios_in_flight
;
129 atomic_t workers_pending
;
130 spinlock_t list_lock
;
131 wait_queue_head_t list_wait
;
133 struct list_head csum_list
;
136 int pages_per_rd_bio
;
142 struct scrub_wr_ctx wr_ctx
;
147 struct btrfs_scrub_progress stat
;
148 spinlock_t stat_lock
;
151 struct scrub_fixup_nodatasum
{
152 struct scrub_ctx
*sctx
;
153 struct btrfs_device
*dev
;
155 struct btrfs_root
*root
;
156 struct btrfs_work work
;
160 struct scrub_copy_nocow_ctx
{
161 struct scrub_ctx
*sctx
;
165 u64 physical_for_dev_replace
;
166 struct btrfs_work work
;
169 struct scrub_warning
{
170 struct btrfs_path
*path
;
171 u64 extent_item_size
;
177 struct btrfs_device
*dev
;
183 static void scrub_pending_bio_inc(struct scrub_ctx
*sctx
);
184 static void scrub_pending_bio_dec(struct scrub_ctx
*sctx
);
185 static void scrub_pending_trans_workers_inc(struct scrub_ctx
*sctx
);
186 static void scrub_pending_trans_workers_dec(struct scrub_ctx
*sctx
);
187 static int scrub_handle_errored_block(struct scrub_block
*sblock_to_check
);
188 static int scrub_setup_recheck_block(struct scrub_ctx
*sctx
,
189 struct btrfs_fs_info
*fs_info
,
190 struct scrub_block
*original_sblock
,
191 u64 length
, u64 logical
,
192 struct scrub_block
*sblocks_for_recheck
);
193 static void scrub_recheck_block(struct btrfs_fs_info
*fs_info
,
194 struct scrub_block
*sblock
, int is_metadata
,
195 int have_csum
, u8
*csum
, u64 generation
,
197 static void scrub_recheck_block_checksum(struct btrfs_fs_info
*fs_info
,
198 struct scrub_block
*sblock
,
199 int is_metadata
, int have_csum
,
200 const u8
*csum
, u64 generation
,
202 static void scrub_complete_bio_end_io(struct bio
*bio
, int err
);
203 static int scrub_repair_block_from_good_copy(struct scrub_block
*sblock_bad
,
204 struct scrub_block
*sblock_good
,
206 static int scrub_repair_page_from_good_copy(struct scrub_block
*sblock_bad
,
207 struct scrub_block
*sblock_good
,
208 int page_num
, int force_write
);
209 static void scrub_write_block_to_dev_replace(struct scrub_block
*sblock
);
210 static int scrub_write_page_to_dev_replace(struct scrub_block
*sblock
,
212 static int scrub_checksum_data(struct scrub_block
*sblock
);
213 static int scrub_checksum_tree_block(struct scrub_block
*sblock
);
214 static int scrub_checksum_super(struct scrub_block
*sblock
);
215 static void scrub_block_get(struct scrub_block
*sblock
);
216 static void scrub_block_put(struct scrub_block
*sblock
);
217 static void scrub_page_get(struct scrub_page
*spage
);
218 static void scrub_page_put(struct scrub_page
*spage
);
219 static int scrub_add_page_to_rd_bio(struct scrub_ctx
*sctx
,
220 struct scrub_page
*spage
);
221 static int scrub_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
222 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
223 u64 gen
, int mirror_num
, u8
*csum
, int force
,
224 u64 physical_for_dev_replace
);
225 static void scrub_bio_end_io(struct bio
*bio
, int err
);
226 static void scrub_bio_end_io_worker(struct btrfs_work
*work
);
227 static void scrub_block_complete(struct scrub_block
*sblock
);
228 static void scrub_remap_extent(struct btrfs_fs_info
*fs_info
,
229 u64 extent_logical
, u64 extent_len
,
230 u64
*extent_physical
,
231 struct btrfs_device
**extent_dev
,
232 int *extent_mirror_num
);
233 static int scrub_setup_wr_ctx(struct scrub_ctx
*sctx
,
234 struct scrub_wr_ctx
*wr_ctx
,
235 struct btrfs_fs_info
*fs_info
,
236 struct btrfs_device
*dev
,
238 static void scrub_free_wr_ctx(struct scrub_wr_ctx
*wr_ctx
);
239 static int scrub_add_page_to_wr_bio(struct scrub_ctx
*sctx
,
240 struct scrub_page
*spage
);
241 static void scrub_wr_submit(struct scrub_ctx
*sctx
);
242 static void scrub_wr_bio_end_io(struct bio
*bio
, int err
);
243 static void scrub_wr_bio_end_io_worker(struct btrfs_work
*work
);
244 static int write_page_nocow(struct scrub_ctx
*sctx
,
245 u64 physical_for_dev_replace
, struct page
*page
);
246 static int copy_nocow_pages_for_inode(u64 inum
, u64 offset
, u64 root
,
248 static int copy_nocow_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
249 int mirror_num
, u64 physical_for_dev_replace
);
250 static void copy_nocow_pages_worker(struct btrfs_work
*work
);
253 static void scrub_pending_bio_inc(struct scrub_ctx
*sctx
)
255 atomic_inc(&sctx
->bios_in_flight
);
258 static void scrub_pending_bio_dec(struct scrub_ctx
*sctx
)
260 atomic_dec(&sctx
->bios_in_flight
);
261 wake_up(&sctx
->list_wait
);
265 * used for workers that require transaction commits (i.e., for the
268 static void scrub_pending_trans_workers_inc(struct scrub_ctx
*sctx
)
270 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
273 * increment scrubs_running to prevent cancel requests from
274 * completing as long as a worker is running. we must also
275 * increment scrubs_paused to prevent deadlocking on pause
276 * requests used for transactions commits (as the worker uses a
277 * transaction context). it is safe to regard the worker
278 * as paused for all matters practical. effectively, we only
279 * avoid cancellation requests from completing.
281 mutex_lock(&fs_info
->scrub_lock
);
282 atomic_inc(&fs_info
->scrubs_running
);
283 atomic_inc(&fs_info
->scrubs_paused
);
284 mutex_unlock(&fs_info
->scrub_lock
);
285 atomic_inc(&sctx
->workers_pending
);
288 /* used for workers that require transaction commits */
289 static void scrub_pending_trans_workers_dec(struct scrub_ctx
*sctx
)
291 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
294 * see scrub_pending_trans_workers_inc() why we're pretending
295 * to be paused in the scrub counters
297 mutex_lock(&fs_info
->scrub_lock
);
298 atomic_dec(&fs_info
->scrubs_running
);
299 atomic_dec(&fs_info
->scrubs_paused
);
300 mutex_unlock(&fs_info
->scrub_lock
);
301 atomic_dec(&sctx
->workers_pending
);
302 wake_up(&fs_info
->scrub_pause_wait
);
303 wake_up(&sctx
->list_wait
);
306 static void scrub_free_csums(struct scrub_ctx
*sctx
)
308 while (!list_empty(&sctx
->csum_list
)) {
309 struct btrfs_ordered_sum
*sum
;
310 sum
= list_first_entry(&sctx
->csum_list
,
311 struct btrfs_ordered_sum
, list
);
312 list_del(&sum
->list
);
317 static noinline_for_stack
void scrub_free_ctx(struct scrub_ctx
*sctx
)
324 scrub_free_wr_ctx(&sctx
->wr_ctx
);
326 /* this can happen when scrub is cancelled */
327 if (sctx
->curr
!= -1) {
328 struct scrub_bio
*sbio
= sctx
->bios
[sctx
->curr
];
330 for (i
= 0; i
< sbio
->page_count
; i
++) {
331 WARN_ON(!sbio
->pagev
[i
]->page
);
332 scrub_block_put(sbio
->pagev
[i
]->sblock
);
337 for (i
= 0; i
< SCRUB_BIOS_PER_SCTX
; ++i
) {
338 struct scrub_bio
*sbio
= sctx
->bios
[i
];
345 scrub_free_csums(sctx
);
349 static noinline_for_stack
350 struct scrub_ctx
*scrub_setup_ctx(struct btrfs_device
*dev
, int is_dev_replace
)
352 struct scrub_ctx
*sctx
;
354 struct btrfs_fs_info
*fs_info
= dev
->dev_root
->fs_info
;
355 int pages_per_rd_bio
;
359 * the setting of pages_per_rd_bio is correct for scrub but might
360 * be wrong for the dev_replace code where we might read from
361 * different devices in the initial huge bios. However, that
362 * code is able to correctly handle the case when adding a page
366 pages_per_rd_bio
= min_t(int, SCRUB_PAGES_PER_RD_BIO
,
367 bio_get_nr_vecs(dev
->bdev
));
369 pages_per_rd_bio
= SCRUB_PAGES_PER_RD_BIO
;
370 sctx
= kzalloc(sizeof(*sctx
), GFP_NOFS
);
373 sctx
->is_dev_replace
= is_dev_replace
;
374 sctx
->pages_per_rd_bio
= pages_per_rd_bio
;
376 sctx
->dev_root
= dev
->dev_root
;
377 for (i
= 0; i
< SCRUB_BIOS_PER_SCTX
; ++i
) {
378 struct scrub_bio
*sbio
;
380 sbio
= kzalloc(sizeof(*sbio
), GFP_NOFS
);
383 sctx
->bios
[i
] = sbio
;
387 sbio
->page_count
= 0;
388 sbio
->work
.func
= scrub_bio_end_io_worker
;
390 if (i
!= SCRUB_BIOS_PER_SCTX
- 1)
391 sctx
->bios
[i
]->next_free
= i
+ 1;
393 sctx
->bios
[i
]->next_free
= -1;
395 sctx
->first_free
= 0;
396 sctx
->nodesize
= dev
->dev_root
->nodesize
;
397 sctx
->leafsize
= dev
->dev_root
->leafsize
;
398 sctx
->sectorsize
= dev
->dev_root
->sectorsize
;
399 atomic_set(&sctx
->bios_in_flight
, 0);
400 atomic_set(&sctx
->workers_pending
, 0);
401 atomic_set(&sctx
->cancel_req
, 0);
402 sctx
->csum_size
= btrfs_super_csum_size(fs_info
->super_copy
);
403 INIT_LIST_HEAD(&sctx
->csum_list
);
405 spin_lock_init(&sctx
->list_lock
);
406 spin_lock_init(&sctx
->stat_lock
);
407 init_waitqueue_head(&sctx
->list_wait
);
409 ret
= scrub_setup_wr_ctx(sctx
, &sctx
->wr_ctx
, fs_info
,
410 fs_info
->dev_replace
.tgtdev
, is_dev_replace
);
412 scrub_free_ctx(sctx
);
418 scrub_free_ctx(sctx
);
419 return ERR_PTR(-ENOMEM
);
422 static int scrub_print_warning_inode(u64 inum
, u64 offset
, u64 root
,
429 struct extent_buffer
*eb
;
430 struct btrfs_inode_item
*inode_item
;
431 struct scrub_warning
*swarn
= warn_ctx
;
432 struct btrfs_fs_info
*fs_info
= swarn
->dev
->dev_root
->fs_info
;
433 struct inode_fs_paths
*ipath
= NULL
;
434 struct btrfs_root
*local_root
;
435 struct btrfs_key root_key
;
437 root_key
.objectid
= root
;
438 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
439 root_key
.offset
= (u64
)-1;
440 local_root
= btrfs_read_fs_root_no_name(fs_info
, &root_key
);
441 if (IS_ERR(local_root
)) {
442 ret
= PTR_ERR(local_root
);
446 ret
= inode_item_info(inum
, 0, local_root
, swarn
->path
);
448 btrfs_release_path(swarn
->path
);
452 eb
= swarn
->path
->nodes
[0];
453 inode_item
= btrfs_item_ptr(eb
, swarn
->path
->slots
[0],
454 struct btrfs_inode_item
);
455 isize
= btrfs_inode_size(eb
, inode_item
);
456 nlink
= btrfs_inode_nlink(eb
, inode_item
);
457 btrfs_release_path(swarn
->path
);
459 ipath
= init_ipath(4096, local_root
, swarn
->path
);
461 ret
= PTR_ERR(ipath
);
465 ret
= paths_from_inode(inum
, ipath
);
471 * we deliberately ignore the bit ipath might have been too small to
472 * hold all of the paths here
474 for (i
= 0; i
< ipath
->fspath
->elem_cnt
; ++i
)
475 printk_in_rcu(KERN_WARNING
"btrfs: %s at logical %llu on dev "
476 "%s, sector %llu, root %llu, inode %llu, offset %llu, "
477 "length %llu, links %u (path: %s)\n", swarn
->errstr
,
478 swarn
->logical
, rcu_str_deref(swarn
->dev
->name
),
479 (unsigned long long)swarn
->sector
, root
, inum
, offset
,
480 min(isize
- offset
, (u64
)PAGE_SIZE
), nlink
,
481 (char *)(unsigned long)ipath
->fspath
->val
[i
]);
487 printk_in_rcu(KERN_WARNING
"btrfs: %s at logical %llu on dev "
488 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
489 "resolving failed with ret=%d\n", swarn
->errstr
,
490 swarn
->logical
, rcu_str_deref(swarn
->dev
->name
),
491 (unsigned long long)swarn
->sector
, root
, inum
, offset
, ret
);
497 static void scrub_print_warning(const char *errstr
, struct scrub_block
*sblock
)
499 struct btrfs_device
*dev
;
500 struct btrfs_fs_info
*fs_info
;
501 struct btrfs_path
*path
;
502 struct btrfs_key found_key
;
503 struct extent_buffer
*eb
;
504 struct btrfs_extent_item
*ei
;
505 struct scrub_warning swarn
;
506 unsigned long ptr
= 0;
512 const int bufsize
= 4096;
515 WARN_ON(sblock
->page_count
< 1);
516 dev
= sblock
->pagev
[0]->dev
;
517 fs_info
= sblock
->sctx
->dev_root
->fs_info
;
519 path
= btrfs_alloc_path();
521 swarn
.scratch_buf
= kmalloc(bufsize
, GFP_NOFS
);
522 swarn
.msg_buf
= kmalloc(bufsize
, GFP_NOFS
);
523 swarn
.sector
= (sblock
->pagev
[0]->physical
) >> 9;
524 swarn
.logical
= sblock
->pagev
[0]->logical
;
525 swarn
.errstr
= errstr
;
527 swarn
.msg_bufsize
= bufsize
;
528 swarn
.scratch_bufsize
= bufsize
;
530 if (!path
|| !swarn
.scratch_buf
|| !swarn
.msg_buf
)
533 ret
= extent_from_logical(fs_info
, swarn
.logical
, path
, &found_key
,
538 extent_item_pos
= swarn
.logical
- found_key
.objectid
;
539 swarn
.extent_item_size
= found_key
.offset
;
542 ei
= btrfs_item_ptr(eb
, path
->slots
[0], struct btrfs_extent_item
);
543 item_size
= btrfs_item_size_nr(eb
, path
->slots
[0]);
544 btrfs_release_path(path
);
546 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
548 ret
= tree_backref_for_extent(&ptr
, eb
, ei
, item_size
,
549 &ref_root
, &ref_level
);
550 printk_in_rcu(KERN_WARNING
551 "btrfs: %s at logical %llu on dev %s, "
552 "sector %llu: metadata %s (level %d) in tree "
553 "%llu\n", errstr
, swarn
.logical
,
554 rcu_str_deref(dev
->name
),
555 (unsigned long long)swarn
.sector
,
556 ref_level
? "node" : "leaf",
557 ret
< 0 ? -1 : ref_level
,
558 ret
< 0 ? -1 : ref_root
);
563 iterate_extent_inodes(fs_info
, found_key
.objectid
,
565 scrub_print_warning_inode
, &swarn
);
569 btrfs_free_path(path
);
570 kfree(swarn
.scratch_buf
);
571 kfree(swarn
.msg_buf
);
574 static int scrub_fixup_readpage(u64 inum
, u64 offset
, u64 root
, void *fixup_ctx
)
576 struct page
*page
= NULL
;
578 struct scrub_fixup_nodatasum
*fixup
= fixup_ctx
;
581 struct btrfs_key key
;
582 struct inode
*inode
= NULL
;
583 u64 end
= offset
+ PAGE_SIZE
- 1;
584 struct btrfs_root
*local_root
;
587 key
.type
= BTRFS_ROOT_ITEM_KEY
;
588 key
.offset
= (u64
)-1;
589 local_root
= btrfs_read_fs_root_no_name(fixup
->root
->fs_info
, &key
);
590 if (IS_ERR(local_root
))
591 return PTR_ERR(local_root
);
593 key
.type
= BTRFS_INODE_ITEM_KEY
;
596 inode
= btrfs_iget(fixup
->root
->fs_info
->sb
, &key
, local_root
, NULL
);
598 return PTR_ERR(inode
);
600 index
= offset
>> PAGE_CACHE_SHIFT
;
602 page
= find_or_create_page(inode
->i_mapping
, index
, GFP_NOFS
);
608 if (PageUptodate(page
)) {
609 struct btrfs_fs_info
*fs_info
;
610 if (PageDirty(page
)) {
612 * we need to write the data to the defect sector. the
613 * data that was in that sector is not in memory,
614 * because the page was modified. we must not write the
615 * modified page to that sector.
617 * TODO: what could be done here: wait for the delalloc
618 * runner to write out that page (might involve
619 * COW) and see whether the sector is still
620 * referenced afterwards.
622 * For the meantime, we'll treat this error
623 * incorrectable, although there is a chance that a
624 * later scrub will find the bad sector again and that
625 * there's no dirty page in memory, then.
630 fs_info
= BTRFS_I(inode
)->root
->fs_info
;
631 ret
= repair_io_failure(fs_info
, offset
, PAGE_SIZE
,
632 fixup
->logical
, page
,
638 * we need to get good data first. the general readpage path
639 * will call repair_io_failure for us, we just have to make
640 * sure we read the bad mirror.
642 ret
= set_extent_bits(&BTRFS_I(inode
)->io_tree
, offset
, end
,
643 EXTENT_DAMAGED
, GFP_NOFS
);
645 /* set_extent_bits should give proper error */
652 ret
= extent_read_full_page(&BTRFS_I(inode
)->io_tree
, page
,
655 wait_on_page_locked(page
);
657 corrected
= !test_range_bit(&BTRFS_I(inode
)->io_tree
, offset
,
658 end
, EXTENT_DAMAGED
, 0, NULL
);
660 clear_extent_bits(&BTRFS_I(inode
)->io_tree
, offset
, end
,
661 EXTENT_DAMAGED
, GFP_NOFS
);
673 if (ret
== 0 && corrected
) {
675 * we only need to call readpage for one of the inodes belonging
676 * to this extent. so make iterate_extent_inodes stop
684 static void scrub_fixup_nodatasum(struct btrfs_work
*work
)
687 struct scrub_fixup_nodatasum
*fixup
;
688 struct scrub_ctx
*sctx
;
689 struct btrfs_trans_handle
*trans
= NULL
;
690 struct btrfs_fs_info
*fs_info
;
691 struct btrfs_path
*path
;
692 int uncorrectable
= 0;
694 fixup
= container_of(work
, struct scrub_fixup_nodatasum
, work
);
696 fs_info
= fixup
->root
->fs_info
;
698 path
= btrfs_alloc_path();
700 spin_lock(&sctx
->stat_lock
);
701 ++sctx
->stat
.malloc_errors
;
702 spin_unlock(&sctx
->stat_lock
);
707 trans
= btrfs_join_transaction(fixup
->root
);
714 * the idea is to trigger a regular read through the standard path. we
715 * read a page from the (failed) logical address by specifying the
716 * corresponding copynum of the failed sector. thus, that readpage is
718 * that is the point where on-the-fly error correction will kick in
719 * (once it's finished) and rewrite the failed sector if a good copy
722 ret
= iterate_inodes_from_logical(fixup
->logical
, fixup
->root
->fs_info
,
723 path
, scrub_fixup_readpage
,
731 spin_lock(&sctx
->stat_lock
);
732 ++sctx
->stat
.corrected_errors
;
733 spin_unlock(&sctx
->stat_lock
);
736 if (trans
&& !IS_ERR(trans
))
737 btrfs_end_transaction(trans
, fixup
->root
);
739 spin_lock(&sctx
->stat_lock
);
740 ++sctx
->stat
.uncorrectable_errors
;
741 spin_unlock(&sctx
->stat_lock
);
742 btrfs_dev_replace_stats_inc(
743 &sctx
->dev_root
->fs_info
->dev_replace
.
744 num_uncorrectable_read_errors
);
745 printk_ratelimited_in_rcu(KERN_ERR
746 "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n",
747 (unsigned long long)fixup
->logical
,
748 rcu_str_deref(fixup
->dev
->name
));
751 btrfs_free_path(path
);
754 scrub_pending_trans_workers_dec(sctx
);
758 * scrub_handle_errored_block gets called when either verification of the
759 * pages failed or the bio failed to read, e.g. with EIO. In the latter
760 * case, this function handles all pages in the bio, even though only one
762 * The goal of this function is to repair the errored block by using the
763 * contents of one of the mirrors.
765 static int scrub_handle_errored_block(struct scrub_block
*sblock_to_check
)
767 struct scrub_ctx
*sctx
= sblock_to_check
->sctx
;
768 struct btrfs_device
*dev
;
769 struct btrfs_fs_info
*fs_info
;
773 unsigned int failed_mirror_index
;
774 unsigned int is_metadata
;
775 unsigned int have_csum
;
777 struct scrub_block
*sblocks_for_recheck
; /* holds one for each mirror */
778 struct scrub_block
*sblock_bad
;
783 static DEFINE_RATELIMIT_STATE(_rs
, DEFAULT_RATELIMIT_INTERVAL
,
784 DEFAULT_RATELIMIT_BURST
);
786 BUG_ON(sblock_to_check
->page_count
< 1);
787 fs_info
= sctx
->dev_root
->fs_info
;
788 if (sblock_to_check
->pagev
[0]->flags
& BTRFS_EXTENT_FLAG_SUPER
) {
790 * if we find an error in a super block, we just report it.
791 * They will get written with the next transaction commit
794 spin_lock(&sctx
->stat_lock
);
795 ++sctx
->stat
.super_errors
;
796 spin_unlock(&sctx
->stat_lock
);
799 length
= sblock_to_check
->page_count
* PAGE_SIZE
;
800 logical
= sblock_to_check
->pagev
[0]->logical
;
801 generation
= sblock_to_check
->pagev
[0]->generation
;
802 BUG_ON(sblock_to_check
->pagev
[0]->mirror_num
< 1);
803 failed_mirror_index
= sblock_to_check
->pagev
[0]->mirror_num
- 1;
804 is_metadata
= !(sblock_to_check
->pagev
[0]->flags
&
805 BTRFS_EXTENT_FLAG_DATA
);
806 have_csum
= sblock_to_check
->pagev
[0]->have_csum
;
807 csum
= sblock_to_check
->pagev
[0]->csum
;
808 dev
= sblock_to_check
->pagev
[0]->dev
;
810 if (sctx
->is_dev_replace
&& !is_metadata
&& !have_csum
) {
811 sblocks_for_recheck
= NULL
;
816 * read all mirrors one after the other. This includes to
817 * re-read the extent or metadata block that failed (that was
818 * the cause that this fixup code is called) another time,
819 * page by page this time in order to know which pages
820 * caused I/O errors and which ones are good (for all mirrors).
821 * It is the goal to handle the situation when more than one
822 * mirror contains I/O errors, but the errors do not
823 * overlap, i.e. the data can be repaired by selecting the
824 * pages from those mirrors without I/O error on the
825 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
826 * would be that mirror #1 has an I/O error on the first page,
827 * the second page is good, and mirror #2 has an I/O error on
828 * the second page, but the first page is good.
829 * Then the first page of the first mirror can be repaired by
830 * taking the first page of the second mirror, and the
831 * second page of the second mirror can be repaired by
832 * copying the contents of the 2nd page of the 1st mirror.
833 * One more note: if the pages of one mirror contain I/O
834 * errors, the checksum cannot be verified. In order to get
835 * the best data for repairing, the first attempt is to find
836 * a mirror without I/O errors and with a validated checksum.
837 * Only if this is not possible, the pages are picked from
838 * mirrors with I/O errors without considering the checksum.
839 * If the latter is the case, at the end, the checksum of the
840 * repaired area is verified in order to correctly maintain
844 sblocks_for_recheck
= kzalloc(BTRFS_MAX_MIRRORS
*
845 sizeof(*sblocks_for_recheck
),
847 if (!sblocks_for_recheck
) {
848 spin_lock(&sctx
->stat_lock
);
849 sctx
->stat
.malloc_errors
++;
850 sctx
->stat
.read_errors
++;
851 sctx
->stat
.uncorrectable_errors
++;
852 spin_unlock(&sctx
->stat_lock
);
853 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
857 /* setup the context, map the logical blocks and alloc the pages */
858 ret
= scrub_setup_recheck_block(sctx
, fs_info
, sblock_to_check
, length
,
859 logical
, sblocks_for_recheck
);
861 spin_lock(&sctx
->stat_lock
);
862 sctx
->stat
.read_errors
++;
863 sctx
->stat
.uncorrectable_errors
++;
864 spin_unlock(&sctx
->stat_lock
);
865 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
868 BUG_ON(failed_mirror_index
>= BTRFS_MAX_MIRRORS
);
869 sblock_bad
= sblocks_for_recheck
+ failed_mirror_index
;
871 /* build and submit the bios for the failed mirror, check checksums */
872 scrub_recheck_block(fs_info
, sblock_bad
, is_metadata
, have_csum
,
873 csum
, generation
, sctx
->csum_size
);
875 if (!sblock_bad
->header_error
&& !sblock_bad
->checksum_error
&&
876 sblock_bad
->no_io_error_seen
) {
878 * the error disappeared after reading page by page, or
879 * the area was part of a huge bio and other parts of the
880 * bio caused I/O errors, or the block layer merged several
881 * read requests into one and the error is caused by a
882 * different bio (usually one of the two latter cases is
885 spin_lock(&sctx
->stat_lock
);
886 sctx
->stat
.unverified_errors
++;
887 spin_unlock(&sctx
->stat_lock
);
889 if (sctx
->is_dev_replace
)
890 scrub_write_block_to_dev_replace(sblock_bad
);
894 if (!sblock_bad
->no_io_error_seen
) {
895 spin_lock(&sctx
->stat_lock
);
896 sctx
->stat
.read_errors
++;
897 spin_unlock(&sctx
->stat_lock
);
898 if (__ratelimit(&_rs
))
899 scrub_print_warning("i/o error", sblock_to_check
);
900 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
901 } else if (sblock_bad
->checksum_error
) {
902 spin_lock(&sctx
->stat_lock
);
903 sctx
->stat
.csum_errors
++;
904 spin_unlock(&sctx
->stat_lock
);
905 if (__ratelimit(&_rs
))
906 scrub_print_warning("checksum error", sblock_to_check
);
907 btrfs_dev_stat_inc_and_print(dev
,
908 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
909 } else if (sblock_bad
->header_error
) {
910 spin_lock(&sctx
->stat_lock
);
911 sctx
->stat
.verify_errors
++;
912 spin_unlock(&sctx
->stat_lock
);
913 if (__ratelimit(&_rs
))
914 scrub_print_warning("checksum/header error",
916 if (sblock_bad
->generation_error
)
917 btrfs_dev_stat_inc_and_print(dev
,
918 BTRFS_DEV_STAT_GENERATION_ERRS
);
920 btrfs_dev_stat_inc_and_print(dev
,
921 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
924 if (sctx
->readonly
&& !sctx
->is_dev_replace
)
925 goto did_not_correct_error
;
927 if (!is_metadata
&& !have_csum
) {
928 struct scrub_fixup_nodatasum
*fixup_nodatasum
;
931 WARN_ON(sctx
->is_dev_replace
);
934 * !is_metadata and !have_csum, this means that the data
935 * might not be COW'ed, that it might be modified
936 * concurrently. The general strategy to work on the
937 * commit root does not help in the case when COW is not
940 fixup_nodatasum
= kzalloc(sizeof(*fixup_nodatasum
), GFP_NOFS
);
941 if (!fixup_nodatasum
)
942 goto did_not_correct_error
;
943 fixup_nodatasum
->sctx
= sctx
;
944 fixup_nodatasum
->dev
= dev
;
945 fixup_nodatasum
->logical
= logical
;
946 fixup_nodatasum
->root
= fs_info
->extent_root
;
947 fixup_nodatasum
->mirror_num
= failed_mirror_index
+ 1;
948 scrub_pending_trans_workers_inc(sctx
);
949 fixup_nodatasum
->work
.func
= scrub_fixup_nodatasum
;
950 btrfs_queue_worker(&fs_info
->scrub_workers
,
951 &fixup_nodatasum
->work
);
956 * now build and submit the bios for the other mirrors, check
958 * First try to pick the mirror which is completely without I/O
959 * errors and also does not have a checksum error.
960 * If one is found, and if a checksum is present, the full block
961 * that is known to contain an error is rewritten. Afterwards
962 * the block is known to be corrected.
963 * If a mirror is found which is completely correct, and no
964 * checksum is present, only those pages are rewritten that had
965 * an I/O error in the block to be repaired, since it cannot be
966 * determined, which copy of the other pages is better (and it
967 * could happen otherwise that a correct page would be
968 * overwritten by a bad one).
970 for (mirror_index
= 0;
971 mirror_index
< BTRFS_MAX_MIRRORS
&&
972 sblocks_for_recheck
[mirror_index
].page_count
> 0;
974 struct scrub_block
*sblock_other
;
976 if (mirror_index
== failed_mirror_index
)
978 sblock_other
= sblocks_for_recheck
+ mirror_index
;
980 /* build and submit the bios, check checksums */
981 scrub_recheck_block(fs_info
, sblock_other
, is_metadata
,
982 have_csum
, csum
, generation
,
985 if (!sblock_other
->header_error
&&
986 !sblock_other
->checksum_error
&&
987 sblock_other
->no_io_error_seen
) {
988 if (sctx
->is_dev_replace
) {
989 scrub_write_block_to_dev_replace(sblock_other
);
991 int force_write
= is_metadata
|| have_csum
;
993 ret
= scrub_repair_block_from_good_copy(
994 sblock_bad
, sblock_other
,
998 goto corrected_error
;
1003 * for dev_replace, pick good pages and write to the target device.
1005 if (sctx
->is_dev_replace
) {
1007 for (page_num
= 0; page_num
< sblock_bad
->page_count
;
1012 for (mirror_index
= 0;
1013 mirror_index
< BTRFS_MAX_MIRRORS
&&
1014 sblocks_for_recheck
[mirror_index
].page_count
> 0;
1016 struct scrub_block
*sblock_other
=
1017 sblocks_for_recheck
+ mirror_index
;
1018 struct scrub_page
*page_other
=
1019 sblock_other
->pagev
[page_num
];
1021 if (!page_other
->io_error
) {
1022 ret
= scrub_write_page_to_dev_replace(
1023 sblock_other
, page_num
);
1025 /* succeeded for this page */
1029 btrfs_dev_replace_stats_inc(
1031 fs_info
->dev_replace
.
1039 * did not find a mirror to fetch the page
1040 * from. scrub_write_page_to_dev_replace()
1041 * handles this case (page->io_error), by
1042 * filling the block with zeros before
1043 * submitting the write request
1046 ret
= scrub_write_page_to_dev_replace(
1047 sblock_bad
, page_num
);
1049 btrfs_dev_replace_stats_inc(
1050 &sctx
->dev_root
->fs_info
->
1051 dev_replace
.num_write_errors
);
1059 * for regular scrub, repair those pages that are errored.
1060 * In case of I/O errors in the area that is supposed to be
1061 * repaired, continue by picking good copies of those pages.
1062 * Select the good pages from mirrors to rewrite bad pages from
1063 * the area to fix. Afterwards verify the checksum of the block
1064 * that is supposed to be repaired. This verification step is
1065 * only done for the purpose of statistic counting and for the
1066 * final scrub report, whether errors remain.
1067 * A perfect algorithm could make use of the checksum and try
1068 * all possible combinations of pages from the different mirrors
1069 * until the checksum verification succeeds. For example, when
1070 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1071 * of mirror #2 is readable but the final checksum test fails,
1072 * then the 2nd page of mirror #3 could be tried, whether now
1073 * the final checksum succeedes. But this would be a rare
1074 * exception and is therefore not implemented. At least it is
1075 * avoided that the good copy is overwritten.
1076 * A more useful improvement would be to pick the sectors
1077 * without I/O error based on sector sizes (512 bytes on legacy
1078 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1079 * mirror could be repaired by taking 512 byte of a different
1080 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1081 * area are unreadable.
1084 /* can only fix I/O errors from here on */
1085 if (sblock_bad
->no_io_error_seen
)
1086 goto did_not_correct_error
;
1089 for (page_num
= 0; page_num
< sblock_bad
->page_count
; page_num
++) {
1090 struct scrub_page
*page_bad
= sblock_bad
->pagev
[page_num
];
1092 if (!page_bad
->io_error
)
1095 for (mirror_index
= 0;
1096 mirror_index
< BTRFS_MAX_MIRRORS
&&
1097 sblocks_for_recheck
[mirror_index
].page_count
> 0;
1099 struct scrub_block
*sblock_other
= sblocks_for_recheck
+
1101 struct scrub_page
*page_other
= sblock_other
->pagev
[
1104 if (!page_other
->io_error
) {
1105 ret
= scrub_repair_page_from_good_copy(
1106 sblock_bad
, sblock_other
, page_num
, 0);
1108 page_bad
->io_error
= 0;
1109 break; /* succeeded for this page */
1114 if (page_bad
->io_error
) {
1115 /* did not find a mirror to copy the page from */
1121 if (is_metadata
|| have_csum
) {
1123 * need to verify the checksum now that all
1124 * sectors on disk are repaired (the write
1125 * request for data to be repaired is on its way).
1126 * Just be lazy and use scrub_recheck_block()
1127 * which re-reads the data before the checksum
1128 * is verified, but most likely the data comes out
1129 * of the page cache.
1131 scrub_recheck_block(fs_info
, sblock_bad
,
1132 is_metadata
, have_csum
, csum
,
1133 generation
, sctx
->csum_size
);
1134 if (!sblock_bad
->header_error
&&
1135 !sblock_bad
->checksum_error
&&
1136 sblock_bad
->no_io_error_seen
)
1137 goto corrected_error
;
1139 goto did_not_correct_error
;
1142 spin_lock(&sctx
->stat_lock
);
1143 sctx
->stat
.corrected_errors
++;
1144 spin_unlock(&sctx
->stat_lock
);
1145 printk_ratelimited_in_rcu(KERN_ERR
1146 "btrfs: fixed up error at logical %llu on dev %s\n",
1147 (unsigned long long)logical
,
1148 rcu_str_deref(dev
->name
));
1151 did_not_correct_error
:
1152 spin_lock(&sctx
->stat_lock
);
1153 sctx
->stat
.uncorrectable_errors
++;
1154 spin_unlock(&sctx
->stat_lock
);
1155 printk_ratelimited_in_rcu(KERN_ERR
1156 "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n",
1157 (unsigned long long)logical
,
1158 rcu_str_deref(dev
->name
));
1162 if (sblocks_for_recheck
) {
1163 for (mirror_index
= 0; mirror_index
< BTRFS_MAX_MIRRORS
;
1165 struct scrub_block
*sblock
= sblocks_for_recheck
+
1169 for (page_index
= 0; page_index
< sblock
->page_count
;
1171 sblock
->pagev
[page_index
]->sblock
= NULL
;
1172 scrub_page_put(sblock
->pagev
[page_index
]);
1175 kfree(sblocks_for_recheck
);
1181 static int scrub_setup_recheck_block(struct scrub_ctx
*sctx
,
1182 struct btrfs_fs_info
*fs_info
,
1183 struct scrub_block
*original_sblock
,
1184 u64 length
, u64 logical
,
1185 struct scrub_block
*sblocks_for_recheck
)
1192 * note: the two members ref_count and outstanding_pages
1193 * are not used (and not set) in the blocks that are used for
1194 * the recheck procedure
1198 while (length
> 0) {
1199 u64 sublen
= min_t(u64
, length
, PAGE_SIZE
);
1200 u64 mapped_length
= sublen
;
1201 struct btrfs_bio
*bbio
= NULL
;
1204 * with a length of PAGE_SIZE, each returned stripe
1205 * represents one mirror
1207 ret
= btrfs_map_block(fs_info
, REQ_GET_READ_MIRRORS
, logical
,
1208 &mapped_length
, &bbio
, 0);
1209 if (ret
|| !bbio
|| mapped_length
< sublen
) {
1214 BUG_ON(page_index
>= SCRUB_PAGES_PER_RD_BIO
);
1215 for (mirror_index
= 0; mirror_index
< (int)bbio
->num_stripes
;
1217 struct scrub_block
*sblock
;
1218 struct scrub_page
*page
;
1220 if (mirror_index
>= BTRFS_MAX_MIRRORS
)
1223 sblock
= sblocks_for_recheck
+ mirror_index
;
1224 sblock
->sctx
= sctx
;
1225 page
= kzalloc(sizeof(*page
), GFP_NOFS
);
1228 spin_lock(&sctx
->stat_lock
);
1229 sctx
->stat
.malloc_errors
++;
1230 spin_unlock(&sctx
->stat_lock
);
1234 scrub_page_get(page
);
1235 sblock
->pagev
[page_index
] = page
;
1236 page
->logical
= logical
;
1237 page
->physical
= bbio
->stripes
[mirror_index
].physical
;
1238 BUG_ON(page_index
>= original_sblock
->page_count
);
1239 page
->physical_for_dev_replace
=
1240 original_sblock
->pagev
[page_index
]->
1241 physical_for_dev_replace
;
1242 /* for missing devices, dev->bdev is NULL */
1243 page
->dev
= bbio
->stripes
[mirror_index
].dev
;
1244 page
->mirror_num
= mirror_index
+ 1;
1245 sblock
->page_count
++;
1246 page
->page
= alloc_page(GFP_NOFS
);
1260 * this function will check the on disk data for checksum errors, header
1261 * errors and read I/O errors. If any I/O errors happen, the exact pages
1262 * which are errored are marked as being bad. The goal is to enable scrub
1263 * to take those pages that are not errored from all the mirrors so that
1264 * the pages that are errored in the just handled mirror can be repaired.
1266 static void scrub_recheck_block(struct btrfs_fs_info
*fs_info
,
1267 struct scrub_block
*sblock
, int is_metadata
,
1268 int have_csum
, u8
*csum
, u64 generation
,
1273 sblock
->no_io_error_seen
= 1;
1274 sblock
->header_error
= 0;
1275 sblock
->checksum_error
= 0;
1277 for (page_num
= 0; page_num
< sblock
->page_count
; page_num
++) {
1279 struct scrub_page
*page
= sblock
->pagev
[page_num
];
1280 DECLARE_COMPLETION_ONSTACK(complete
);
1282 if (page
->dev
->bdev
== NULL
) {
1284 sblock
->no_io_error_seen
= 0;
1288 WARN_ON(!page
->page
);
1289 bio
= bio_alloc(GFP_NOFS
, 1);
1292 sblock
->no_io_error_seen
= 0;
1295 bio
->bi_bdev
= page
->dev
->bdev
;
1296 bio
->bi_sector
= page
->physical
>> 9;
1297 bio
->bi_end_io
= scrub_complete_bio_end_io
;
1298 bio
->bi_private
= &complete
;
1300 bio_add_page(bio
, page
->page
, PAGE_SIZE
, 0);
1301 btrfsic_submit_bio(READ
, bio
);
1303 /* this will also unplug the queue */
1304 wait_for_completion(&complete
);
1306 page
->io_error
= !test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
1307 if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
1308 sblock
->no_io_error_seen
= 0;
1312 if (sblock
->no_io_error_seen
)
1313 scrub_recheck_block_checksum(fs_info
, sblock
, is_metadata
,
1314 have_csum
, csum
, generation
,
1320 static void scrub_recheck_block_checksum(struct btrfs_fs_info
*fs_info
,
1321 struct scrub_block
*sblock
,
1322 int is_metadata
, int have_csum
,
1323 const u8
*csum
, u64 generation
,
1327 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1329 struct btrfs_root
*root
= fs_info
->extent_root
;
1330 void *mapped_buffer
;
1332 WARN_ON(!sblock
->pagev
[0]->page
);
1334 struct btrfs_header
*h
;
1336 mapped_buffer
= kmap_atomic(sblock
->pagev
[0]->page
);
1337 h
= (struct btrfs_header
*)mapped_buffer
;
1339 if (sblock
->pagev
[0]->logical
!= le64_to_cpu(h
->bytenr
) ||
1340 memcmp(h
->fsid
, fs_info
->fsid
, BTRFS_UUID_SIZE
) ||
1341 memcmp(h
->chunk_tree_uuid
, fs_info
->chunk_tree_uuid
,
1343 sblock
->header_error
= 1;
1344 } else if (generation
!= le64_to_cpu(h
->generation
)) {
1345 sblock
->header_error
= 1;
1346 sblock
->generation_error
= 1;
1353 mapped_buffer
= kmap_atomic(sblock
->pagev
[0]->page
);
1356 for (page_num
= 0;;) {
1357 if (page_num
== 0 && is_metadata
)
1358 crc
= btrfs_csum_data(root
,
1359 ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
,
1360 crc
, PAGE_SIZE
- BTRFS_CSUM_SIZE
);
1362 crc
= btrfs_csum_data(root
, mapped_buffer
, crc
,
1365 kunmap_atomic(mapped_buffer
);
1367 if (page_num
>= sblock
->page_count
)
1369 WARN_ON(!sblock
->pagev
[page_num
]->page
);
1371 mapped_buffer
= kmap_atomic(sblock
->pagev
[page_num
]->page
);
1374 btrfs_csum_final(crc
, calculated_csum
);
1375 if (memcmp(calculated_csum
, csum
, csum_size
))
1376 sblock
->checksum_error
= 1;
1379 static void scrub_complete_bio_end_io(struct bio
*bio
, int err
)
1381 complete((struct completion
*)bio
->bi_private
);
1384 static int scrub_repair_block_from_good_copy(struct scrub_block
*sblock_bad
,
1385 struct scrub_block
*sblock_good
,
1391 for (page_num
= 0; page_num
< sblock_bad
->page_count
; page_num
++) {
1394 ret_sub
= scrub_repair_page_from_good_copy(sblock_bad
,
1405 static int scrub_repair_page_from_good_copy(struct scrub_block
*sblock_bad
,
1406 struct scrub_block
*sblock_good
,
1407 int page_num
, int force_write
)
1409 struct scrub_page
*page_bad
= sblock_bad
->pagev
[page_num
];
1410 struct scrub_page
*page_good
= sblock_good
->pagev
[page_num
];
1412 BUG_ON(page_bad
->page
== NULL
);
1413 BUG_ON(page_good
->page
== NULL
);
1414 if (force_write
|| sblock_bad
->header_error
||
1415 sblock_bad
->checksum_error
|| page_bad
->io_error
) {
1418 DECLARE_COMPLETION_ONSTACK(complete
);
1420 if (!page_bad
->dev
->bdev
) {
1421 printk_ratelimited(KERN_WARNING
1422 "btrfs: scrub_repair_page_from_good_copy(bdev == NULL) is unexpected!\n");
1426 bio
= bio_alloc(GFP_NOFS
, 1);
1429 bio
->bi_bdev
= page_bad
->dev
->bdev
;
1430 bio
->bi_sector
= page_bad
->physical
>> 9;
1431 bio
->bi_end_io
= scrub_complete_bio_end_io
;
1432 bio
->bi_private
= &complete
;
1434 ret
= bio_add_page(bio
, page_good
->page
, PAGE_SIZE
, 0);
1435 if (PAGE_SIZE
!= ret
) {
1439 btrfsic_submit_bio(WRITE
, bio
);
1441 /* this will also unplug the queue */
1442 wait_for_completion(&complete
);
1443 if (!bio_flagged(bio
, BIO_UPTODATE
)) {
1444 btrfs_dev_stat_inc_and_print(page_bad
->dev
,
1445 BTRFS_DEV_STAT_WRITE_ERRS
);
1446 btrfs_dev_replace_stats_inc(
1447 &sblock_bad
->sctx
->dev_root
->fs_info
->
1448 dev_replace
.num_write_errors
);
1458 static void scrub_write_block_to_dev_replace(struct scrub_block
*sblock
)
1462 for (page_num
= 0; page_num
< sblock
->page_count
; page_num
++) {
1465 ret
= scrub_write_page_to_dev_replace(sblock
, page_num
);
1467 btrfs_dev_replace_stats_inc(
1468 &sblock
->sctx
->dev_root
->fs_info
->dev_replace
.
1473 static int scrub_write_page_to_dev_replace(struct scrub_block
*sblock
,
1476 struct scrub_page
*spage
= sblock
->pagev
[page_num
];
1478 BUG_ON(spage
->page
== NULL
);
1479 if (spage
->io_error
) {
1480 void *mapped_buffer
= kmap_atomic(spage
->page
);
1482 memset(mapped_buffer
, 0, PAGE_CACHE_SIZE
);
1483 flush_dcache_page(spage
->page
);
1484 kunmap_atomic(mapped_buffer
);
1486 return scrub_add_page_to_wr_bio(sblock
->sctx
, spage
);
1489 static int scrub_add_page_to_wr_bio(struct scrub_ctx
*sctx
,
1490 struct scrub_page
*spage
)
1492 struct scrub_wr_ctx
*wr_ctx
= &sctx
->wr_ctx
;
1493 struct scrub_bio
*sbio
;
1496 mutex_lock(&wr_ctx
->wr_lock
);
1498 if (!wr_ctx
->wr_curr_bio
) {
1499 wr_ctx
->wr_curr_bio
= kzalloc(sizeof(*wr_ctx
->wr_curr_bio
),
1501 if (!wr_ctx
->wr_curr_bio
) {
1502 mutex_unlock(&wr_ctx
->wr_lock
);
1505 wr_ctx
->wr_curr_bio
->sctx
= sctx
;
1506 wr_ctx
->wr_curr_bio
->page_count
= 0;
1508 sbio
= wr_ctx
->wr_curr_bio
;
1509 if (sbio
->page_count
== 0) {
1512 sbio
->physical
= spage
->physical_for_dev_replace
;
1513 sbio
->logical
= spage
->logical
;
1514 sbio
->dev
= wr_ctx
->tgtdev
;
1517 bio
= bio_alloc(GFP_NOFS
, wr_ctx
->pages_per_wr_bio
);
1519 mutex_unlock(&wr_ctx
->wr_lock
);
1525 bio
->bi_private
= sbio
;
1526 bio
->bi_end_io
= scrub_wr_bio_end_io
;
1527 bio
->bi_bdev
= sbio
->dev
->bdev
;
1528 bio
->bi_sector
= sbio
->physical
>> 9;
1530 } else if (sbio
->physical
+ sbio
->page_count
* PAGE_SIZE
!=
1531 spage
->physical_for_dev_replace
||
1532 sbio
->logical
+ sbio
->page_count
* PAGE_SIZE
!=
1534 scrub_wr_submit(sctx
);
1538 ret
= bio_add_page(sbio
->bio
, spage
->page
, PAGE_SIZE
, 0);
1539 if (ret
!= PAGE_SIZE
) {
1540 if (sbio
->page_count
< 1) {
1543 mutex_unlock(&wr_ctx
->wr_lock
);
1546 scrub_wr_submit(sctx
);
1550 sbio
->pagev
[sbio
->page_count
] = spage
;
1551 scrub_page_get(spage
);
1553 if (sbio
->page_count
== wr_ctx
->pages_per_wr_bio
)
1554 scrub_wr_submit(sctx
);
1555 mutex_unlock(&wr_ctx
->wr_lock
);
1560 static void scrub_wr_submit(struct scrub_ctx
*sctx
)
1562 struct scrub_wr_ctx
*wr_ctx
= &sctx
->wr_ctx
;
1563 struct scrub_bio
*sbio
;
1565 if (!wr_ctx
->wr_curr_bio
)
1568 sbio
= wr_ctx
->wr_curr_bio
;
1569 wr_ctx
->wr_curr_bio
= NULL
;
1570 WARN_ON(!sbio
->bio
->bi_bdev
);
1571 scrub_pending_bio_inc(sctx
);
1572 /* process all writes in a single worker thread. Then the block layer
1573 * orders the requests before sending them to the driver which
1574 * doubled the write performance on spinning disks when measured
1576 btrfsic_submit_bio(WRITE
, sbio
->bio
);
1579 static void scrub_wr_bio_end_io(struct bio
*bio
, int err
)
1581 struct scrub_bio
*sbio
= bio
->bi_private
;
1582 struct btrfs_fs_info
*fs_info
= sbio
->dev
->dev_root
->fs_info
;
1587 sbio
->work
.func
= scrub_wr_bio_end_io_worker
;
1588 btrfs_queue_worker(&fs_info
->scrub_wr_completion_workers
, &sbio
->work
);
1591 static void scrub_wr_bio_end_io_worker(struct btrfs_work
*work
)
1593 struct scrub_bio
*sbio
= container_of(work
, struct scrub_bio
, work
);
1594 struct scrub_ctx
*sctx
= sbio
->sctx
;
1597 WARN_ON(sbio
->page_count
> SCRUB_PAGES_PER_WR_BIO
);
1599 struct btrfs_dev_replace
*dev_replace
=
1600 &sbio
->sctx
->dev_root
->fs_info
->dev_replace
;
1602 for (i
= 0; i
< sbio
->page_count
; i
++) {
1603 struct scrub_page
*spage
= sbio
->pagev
[i
];
1605 spage
->io_error
= 1;
1606 btrfs_dev_replace_stats_inc(&dev_replace
->
1611 for (i
= 0; i
< sbio
->page_count
; i
++)
1612 scrub_page_put(sbio
->pagev
[i
]);
1616 scrub_pending_bio_dec(sctx
);
1619 static int scrub_checksum(struct scrub_block
*sblock
)
1624 WARN_ON(sblock
->page_count
< 1);
1625 flags
= sblock
->pagev
[0]->flags
;
1627 if (flags
& BTRFS_EXTENT_FLAG_DATA
)
1628 ret
= scrub_checksum_data(sblock
);
1629 else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)
1630 ret
= scrub_checksum_tree_block(sblock
);
1631 else if (flags
& BTRFS_EXTENT_FLAG_SUPER
)
1632 (void)scrub_checksum_super(sblock
);
1636 scrub_handle_errored_block(sblock
);
1641 static int scrub_checksum_data(struct scrub_block
*sblock
)
1643 struct scrub_ctx
*sctx
= sblock
->sctx
;
1644 u8 csum
[BTRFS_CSUM_SIZE
];
1650 struct btrfs_root
*root
= sctx
->dev_root
;
1654 BUG_ON(sblock
->page_count
< 1);
1655 if (!sblock
->pagev
[0]->have_csum
)
1658 on_disk_csum
= sblock
->pagev
[0]->csum
;
1659 page
= sblock
->pagev
[0]->page
;
1660 buffer
= kmap_atomic(page
);
1662 len
= sctx
->sectorsize
;
1665 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
1667 crc
= btrfs_csum_data(root
, buffer
, crc
, l
);
1668 kunmap_atomic(buffer
);
1673 BUG_ON(index
>= sblock
->page_count
);
1674 BUG_ON(!sblock
->pagev
[index
]->page
);
1675 page
= sblock
->pagev
[index
]->page
;
1676 buffer
= kmap_atomic(page
);
1679 btrfs_csum_final(crc
, csum
);
1680 if (memcmp(csum
, on_disk_csum
, sctx
->csum_size
))
1686 static int scrub_checksum_tree_block(struct scrub_block
*sblock
)
1688 struct scrub_ctx
*sctx
= sblock
->sctx
;
1689 struct btrfs_header
*h
;
1690 struct btrfs_root
*root
= sctx
->dev_root
;
1691 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1692 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1693 u8 on_disk_csum
[BTRFS_CSUM_SIZE
];
1695 void *mapped_buffer
;
1704 BUG_ON(sblock
->page_count
< 1);
1705 page
= sblock
->pagev
[0]->page
;
1706 mapped_buffer
= kmap_atomic(page
);
1707 h
= (struct btrfs_header
*)mapped_buffer
;
1708 memcpy(on_disk_csum
, h
->csum
, sctx
->csum_size
);
1711 * we don't use the getter functions here, as we
1712 * a) don't have an extent buffer and
1713 * b) the page is already kmapped
1716 if (sblock
->pagev
[0]->logical
!= le64_to_cpu(h
->bytenr
))
1719 if (sblock
->pagev
[0]->generation
!= le64_to_cpu(h
->generation
))
1722 if (memcmp(h
->fsid
, fs_info
->fsid
, BTRFS_UUID_SIZE
))
1725 if (memcmp(h
->chunk_tree_uuid
, fs_info
->chunk_tree_uuid
,
1729 WARN_ON(sctx
->nodesize
!= sctx
->leafsize
);
1730 len
= sctx
->nodesize
- BTRFS_CSUM_SIZE
;
1731 mapped_size
= PAGE_SIZE
- BTRFS_CSUM_SIZE
;
1732 p
= ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
;
1735 u64 l
= min_t(u64
, len
, mapped_size
);
1737 crc
= btrfs_csum_data(root
, p
, crc
, l
);
1738 kunmap_atomic(mapped_buffer
);
1743 BUG_ON(index
>= sblock
->page_count
);
1744 BUG_ON(!sblock
->pagev
[index
]->page
);
1745 page
= sblock
->pagev
[index
]->page
;
1746 mapped_buffer
= kmap_atomic(page
);
1747 mapped_size
= PAGE_SIZE
;
1751 btrfs_csum_final(crc
, calculated_csum
);
1752 if (memcmp(calculated_csum
, on_disk_csum
, sctx
->csum_size
))
1755 return fail
|| crc_fail
;
1758 static int scrub_checksum_super(struct scrub_block
*sblock
)
1760 struct btrfs_super_block
*s
;
1761 struct scrub_ctx
*sctx
= sblock
->sctx
;
1762 struct btrfs_root
*root
= sctx
->dev_root
;
1763 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1764 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1765 u8 on_disk_csum
[BTRFS_CSUM_SIZE
];
1767 void *mapped_buffer
;
1776 BUG_ON(sblock
->page_count
< 1);
1777 page
= sblock
->pagev
[0]->page
;
1778 mapped_buffer
= kmap_atomic(page
);
1779 s
= (struct btrfs_super_block
*)mapped_buffer
;
1780 memcpy(on_disk_csum
, s
->csum
, sctx
->csum_size
);
1782 if (sblock
->pagev
[0]->logical
!= le64_to_cpu(s
->bytenr
))
1785 if (sblock
->pagev
[0]->generation
!= le64_to_cpu(s
->generation
))
1788 if (memcmp(s
->fsid
, fs_info
->fsid
, BTRFS_UUID_SIZE
))
1791 len
= BTRFS_SUPER_INFO_SIZE
- BTRFS_CSUM_SIZE
;
1792 mapped_size
= PAGE_SIZE
- BTRFS_CSUM_SIZE
;
1793 p
= ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
;
1796 u64 l
= min_t(u64
, len
, mapped_size
);
1798 crc
= btrfs_csum_data(root
, p
, crc
, l
);
1799 kunmap_atomic(mapped_buffer
);
1804 BUG_ON(index
>= sblock
->page_count
);
1805 BUG_ON(!sblock
->pagev
[index
]->page
);
1806 page
= sblock
->pagev
[index
]->page
;
1807 mapped_buffer
= kmap_atomic(page
);
1808 mapped_size
= PAGE_SIZE
;
1812 btrfs_csum_final(crc
, calculated_csum
);
1813 if (memcmp(calculated_csum
, on_disk_csum
, sctx
->csum_size
))
1816 if (fail_cor
+ fail_gen
) {
1818 * if we find an error in a super block, we just report it.
1819 * They will get written with the next transaction commit
1822 spin_lock(&sctx
->stat_lock
);
1823 ++sctx
->stat
.super_errors
;
1824 spin_unlock(&sctx
->stat_lock
);
1826 btrfs_dev_stat_inc_and_print(sblock
->pagev
[0]->dev
,
1827 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
1829 btrfs_dev_stat_inc_and_print(sblock
->pagev
[0]->dev
,
1830 BTRFS_DEV_STAT_GENERATION_ERRS
);
1833 return fail_cor
+ fail_gen
;
1836 static void scrub_block_get(struct scrub_block
*sblock
)
1838 atomic_inc(&sblock
->ref_count
);
1841 static void scrub_block_put(struct scrub_block
*sblock
)
1843 if (atomic_dec_and_test(&sblock
->ref_count
)) {
1846 for (i
= 0; i
< sblock
->page_count
; i
++)
1847 scrub_page_put(sblock
->pagev
[i
]);
1852 static void scrub_page_get(struct scrub_page
*spage
)
1854 atomic_inc(&spage
->ref_count
);
1857 static void scrub_page_put(struct scrub_page
*spage
)
1859 if (atomic_dec_and_test(&spage
->ref_count
)) {
1861 __free_page(spage
->page
);
1866 static void scrub_submit(struct scrub_ctx
*sctx
)
1868 struct scrub_bio
*sbio
;
1870 if (sctx
->curr
== -1)
1873 sbio
= sctx
->bios
[sctx
->curr
];
1875 scrub_pending_bio_inc(sctx
);
1877 if (!sbio
->bio
->bi_bdev
) {
1879 * this case should not happen. If btrfs_map_block() is
1880 * wrong, it could happen for dev-replace operations on
1881 * missing devices when no mirrors are available, but in
1882 * this case it should already fail the mount.
1883 * This case is handled correctly (but _very_ slowly).
1885 printk_ratelimited(KERN_WARNING
1886 "btrfs: scrub_submit(bio bdev == NULL) is unexpected!\n");
1887 bio_endio(sbio
->bio
, -EIO
);
1889 btrfsic_submit_bio(READ
, sbio
->bio
);
1893 static int scrub_add_page_to_rd_bio(struct scrub_ctx
*sctx
,
1894 struct scrub_page
*spage
)
1896 struct scrub_block
*sblock
= spage
->sblock
;
1897 struct scrub_bio
*sbio
;
1902 * grab a fresh bio or wait for one to become available
1904 while (sctx
->curr
== -1) {
1905 spin_lock(&sctx
->list_lock
);
1906 sctx
->curr
= sctx
->first_free
;
1907 if (sctx
->curr
!= -1) {
1908 sctx
->first_free
= sctx
->bios
[sctx
->curr
]->next_free
;
1909 sctx
->bios
[sctx
->curr
]->next_free
= -1;
1910 sctx
->bios
[sctx
->curr
]->page_count
= 0;
1911 spin_unlock(&sctx
->list_lock
);
1913 spin_unlock(&sctx
->list_lock
);
1914 wait_event(sctx
->list_wait
, sctx
->first_free
!= -1);
1917 sbio
= sctx
->bios
[sctx
->curr
];
1918 if (sbio
->page_count
== 0) {
1921 sbio
->physical
= spage
->physical
;
1922 sbio
->logical
= spage
->logical
;
1923 sbio
->dev
= spage
->dev
;
1926 bio
= bio_alloc(GFP_NOFS
, sctx
->pages_per_rd_bio
);
1932 bio
->bi_private
= sbio
;
1933 bio
->bi_end_io
= scrub_bio_end_io
;
1934 bio
->bi_bdev
= sbio
->dev
->bdev
;
1935 bio
->bi_sector
= sbio
->physical
>> 9;
1937 } else if (sbio
->physical
+ sbio
->page_count
* PAGE_SIZE
!=
1939 sbio
->logical
+ sbio
->page_count
* PAGE_SIZE
!=
1941 sbio
->dev
!= spage
->dev
) {
1946 sbio
->pagev
[sbio
->page_count
] = spage
;
1947 ret
= bio_add_page(sbio
->bio
, spage
->page
, PAGE_SIZE
, 0);
1948 if (ret
!= PAGE_SIZE
) {
1949 if (sbio
->page_count
< 1) {
1958 scrub_block_get(sblock
); /* one for the page added to the bio */
1959 atomic_inc(&sblock
->outstanding_pages
);
1961 if (sbio
->page_count
== sctx
->pages_per_rd_bio
)
1967 static int scrub_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
1968 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
1969 u64 gen
, int mirror_num
, u8
*csum
, int force
,
1970 u64 physical_for_dev_replace
)
1972 struct scrub_block
*sblock
;
1975 sblock
= kzalloc(sizeof(*sblock
), GFP_NOFS
);
1977 spin_lock(&sctx
->stat_lock
);
1978 sctx
->stat
.malloc_errors
++;
1979 spin_unlock(&sctx
->stat_lock
);
1983 /* one ref inside this function, plus one for each page added to
1985 atomic_set(&sblock
->ref_count
, 1);
1986 sblock
->sctx
= sctx
;
1987 sblock
->no_io_error_seen
= 1;
1989 for (index
= 0; len
> 0; index
++) {
1990 struct scrub_page
*spage
;
1991 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
1993 spage
= kzalloc(sizeof(*spage
), GFP_NOFS
);
1996 spin_lock(&sctx
->stat_lock
);
1997 sctx
->stat
.malloc_errors
++;
1998 spin_unlock(&sctx
->stat_lock
);
1999 scrub_block_put(sblock
);
2002 BUG_ON(index
>= SCRUB_MAX_PAGES_PER_BLOCK
);
2003 scrub_page_get(spage
);
2004 sblock
->pagev
[index
] = spage
;
2005 spage
->sblock
= sblock
;
2007 spage
->flags
= flags
;
2008 spage
->generation
= gen
;
2009 spage
->logical
= logical
;
2010 spage
->physical
= physical
;
2011 spage
->physical_for_dev_replace
= physical_for_dev_replace
;
2012 spage
->mirror_num
= mirror_num
;
2014 spage
->have_csum
= 1;
2015 memcpy(spage
->csum
, csum
, sctx
->csum_size
);
2017 spage
->have_csum
= 0;
2019 sblock
->page_count
++;
2020 spage
->page
= alloc_page(GFP_NOFS
);
2026 physical_for_dev_replace
+= l
;
2029 WARN_ON(sblock
->page_count
== 0);
2030 for (index
= 0; index
< sblock
->page_count
; index
++) {
2031 struct scrub_page
*spage
= sblock
->pagev
[index
];
2034 ret
= scrub_add_page_to_rd_bio(sctx
, spage
);
2036 scrub_block_put(sblock
);
2044 /* last one frees, either here or in bio completion for last page */
2045 scrub_block_put(sblock
);
2049 static void scrub_bio_end_io(struct bio
*bio
, int err
)
2051 struct scrub_bio
*sbio
= bio
->bi_private
;
2052 struct btrfs_fs_info
*fs_info
= sbio
->dev
->dev_root
->fs_info
;
2057 btrfs_queue_worker(&fs_info
->scrub_workers
, &sbio
->work
);
2060 static void scrub_bio_end_io_worker(struct btrfs_work
*work
)
2062 struct scrub_bio
*sbio
= container_of(work
, struct scrub_bio
, work
);
2063 struct scrub_ctx
*sctx
= sbio
->sctx
;
2066 BUG_ON(sbio
->page_count
> SCRUB_PAGES_PER_RD_BIO
);
2068 for (i
= 0; i
< sbio
->page_count
; i
++) {
2069 struct scrub_page
*spage
= sbio
->pagev
[i
];
2071 spage
->io_error
= 1;
2072 spage
->sblock
->no_io_error_seen
= 0;
2076 /* now complete the scrub_block items that have all pages completed */
2077 for (i
= 0; i
< sbio
->page_count
; i
++) {
2078 struct scrub_page
*spage
= sbio
->pagev
[i
];
2079 struct scrub_block
*sblock
= spage
->sblock
;
2081 if (atomic_dec_and_test(&sblock
->outstanding_pages
))
2082 scrub_block_complete(sblock
);
2083 scrub_block_put(sblock
);
2088 spin_lock(&sctx
->list_lock
);
2089 sbio
->next_free
= sctx
->first_free
;
2090 sctx
->first_free
= sbio
->index
;
2091 spin_unlock(&sctx
->list_lock
);
2093 if (sctx
->is_dev_replace
&&
2094 atomic_read(&sctx
->wr_ctx
.flush_all_writes
)) {
2095 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
2096 scrub_wr_submit(sctx
);
2097 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
2100 scrub_pending_bio_dec(sctx
);
2103 static void scrub_block_complete(struct scrub_block
*sblock
)
2105 if (!sblock
->no_io_error_seen
) {
2106 scrub_handle_errored_block(sblock
);
2109 * if has checksum error, write via repair mechanism in
2110 * dev replace case, otherwise write here in dev replace
2113 if (!scrub_checksum(sblock
) && sblock
->sctx
->is_dev_replace
)
2114 scrub_write_block_to_dev_replace(sblock
);
2118 static int scrub_find_csum(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
2121 struct btrfs_ordered_sum
*sum
= NULL
;
2124 unsigned long num_sectors
;
2126 while (!list_empty(&sctx
->csum_list
)) {
2127 sum
= list_first_entry(&sctx
->csum_list
,
2128 struct btrfs_ordered_sum
, list
);
2129 if (sum
->bytenr
> logical
)
2131 if (sum
->bytenr
+ sum
->len
> logical
)
2134 ++sctx
->stat
.csum_discards
;
2135 list_del(&sum
->list
);
2142 num_sectors
= sum
->len
/ sctx
->sectorsize
;
2143 for (i
= 0; i
< num_sectors
; ++i
) {
2144 if (sum
->sums
[i
].bytenr
== logical
) {
2145 memcpy(csum
, &sum
->sums
[i
].sum
, sctx
->csum_size
);
2150 if (ret
&& i
== num_sectors
- 1) {
2151 list_del(&sum
->list
);
2157 /* scrub extent tries to collect up to 64 kB for each bio */
2158 static int scrub_extent(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
2159 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
2160 u64 gen
, int mirror_num
, u64 physical_for_dev_replace
)
2163 u8 csum
[BTRFS_CSUM_SIZE
];
2166 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2167 blocksize
= sctx
->sectorsize
;
2168 spin_lock(&sctx
->stat_lock
);
2169 sctx
->stat
.data_extents_scrubbed
++;
2170 sctx
->stat
.data_bytes_scrubbed
+= len
;
2171 spin_unlock(&sctx
->stat_lock
);
2172 } else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
2173 WARN_ON(sctx
->nodesize
!= sctx
->leafsize
);
2174 blocksize
= sctx
->nodesize
;
2175 spin_lock(&sctx
->stat_lock
);
2176 sctx
->stat
.tree_extents_scrubbed
++;
2177 sctx
->stat
.tree_bytes_scrubbed
+= len
;
2178 spin_unlock(&sctx
->stat_lock
);
2180 blocksize
= sctx
->sectorsize
;
2185 u64 l
= min_t(u64
, len
, blocksize
);
2188 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2189 /* push csums to sbio */
2190 have_csum
= scrub_find_csum(sctx
, logical
, l
, csum
);
2192 ++sctx
->stat
.no_csum
;
2193 if (sctx
->is_dev_replace
&& !have_csum
) {
2194 ret
= copy_nocow_pages(sctx
, logical
, l
,
2196 physical_for_dev_replace
);
2197 goto behind_scrub_pages
;
2200 ret
= scrub_pages(sctx
, logical
, l
, physical
, dev
, flags
, gen
,
2201 mirror_num
, have_csum
? csum
: NULL
, 0,
2202 physical_for_dev_replace
);
2209 physical_for_dev_replace
+= l
;
2214 static noinline_for_stack
int scrub_stripe(struct scrub_ctx
*sctx
,
2215 struct map_lookup
*map
,
2216 struct btrfs_device
*scrub_dev
,
2217 int num
, u64 base
, u64 length
,
2220 struct btrfs_path
*path
;
2221 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
2222 struct btrfs_root
*root
= fs_info
->extent_root
;
2223 struct btrfs_root
*csum_root
= fs_info
->csum_root
;
2224 struct btrfs_extent_item
*extent
;
2225 struct blk_plug plug
;
2231 struct extent_buffer
*l
;
2232 struct btrfs_key key
;
2237 struct reada_control
*reada1
;
2238 struct reada_control
*reada2
;
2239 struct btrfs_key key_start
;
2240 struct btrfs_key key_end
;
2241 u64 increment
= map
->stripe_len
;
2244 u64 extent_physical
;
2246 struct btrfs_device
*extent_dev
;
2247 int extent_mirror_num
;
2251 do_div(nstripes
, map
->stripe_len
);
2252 if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
2253 offset
= map
->stripe_len
* num
;
2254 increment
= map
->stripe_len
* map
->num_stripes
;
2256 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
2257 int factor
= map
->num_stripes
/ map
->sub_stripes
;
2258 offset
= map
->stripe_len
* (num
/ map
->sub_stripes
);
2259 increment
= map
->stripe_len
* factor
;
2260 mirror_num
= num
% map
->sub_stripes
+ 1;
2261 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
2262 increment
= map
->stripe_len
;
2263 mirror_num
= num
% map
->num_stripes
+ 1;
2264 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
2265 increment
= map
->stripe_len
;
2266 mirror_num
= num
% map
->num_stripes
+ 1;
2268 increment
= map
->stripe_len
;
2272 path
= btrfs_alloc_path();
2277 * work on commit root. The related disk blocks are static as
2278 * long as COW is applied. This means, it is save to rewrite
2279 * them to repair disk errors without any race conditions
2281 path
->search_commit_root
= 1;
2282 path
->skip_locking
= 1;
2285 * trigger the readahead for extent tree csum tree and wait for
2286 * completion. During readahead, the scrub is officially paused
2287 * to not hold off transaction commits
2289 logical
= base
+ offset
;
2291 wait_event(sctx
->list_wait
,
2292 atomic_read(&sctx
->bios_in_flight
) == 0);
2293 atomic_inc(&fs_info
->scrubs_paused
);
2294 wake_up(&fs_info
->scrub_pause_wait
);
2296 /* FIXME it might be better to start readahead at commit root */
2297 key_start
.objectid
= logical
;
2298 key_start
.type
= BTRFS_EXTENT_ITEM_KEY
;
2299 key_start
.offset
= (u64
)0;
2300 key_end
.objectid
= base
+ offset
+ nstripes
* increment
;
2301 key_end
.type
= BTRFS_EXTENT_ITEM_KEY
;
2302 key_end
.offset
= (u64
)0;
2303 reada1
= btrfs_reada_add(root
, &key_start
, &key_end
);
2305 key_start
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
2306 key_start
.type
= BTRFS_EXTENT_CSUM_KEY
;
2307 key_start
.offset
= logical
;
2308 key_end
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
2309 key_end
.type
= BTRFS_EXTENT_CSUM_KEY
;
2310 key_end
.offset
= base
+ offset
+ nstripes
* increment
;
2311 reada2
= btrfs_reada_add(csum_root
, &key_start
, &key_end
);
2313 if (!IS_ERR(reada1
))
2314 btrfs_reada_wait(reada1
);
2315 if (!IS_ERR(reada2
))
2316 btrfs_reada_wait(reada2
);
2318 mutex_lock(&fs_info
->scrub_lock
);
2319 while (atomic_read(&fs_info
->scrub_pause_req
)) {
2320 mutex_unlock(&fs_info
->scrub_lock
);
2321 wait_event(fs_info
->scrub_pause_wait
,
2322 atomic_read(&fs_info
->scrub_pause_req
) == 0);
2323 mutex_lock(&fs_info
->scrub_lock
);
2325 atomic_dec(&fs_info
->scrubs_paused
);
2326 mutex_unlock(&fs_info
->scrub_lock
);
2327 wake_up(&fs_info
->scrub_pause_wait
);
2330 * collect all data csums for the stripe to avoid seeking during
2331 * the scrub. This might currently (crc32) end up to be about 1MB
2333 blk_start_plug(&plug
);
2336 * now find all extents for each stripe and scrub them
2338 logical
= base
+ offset
;
2339 physical
= map
->stripes
[num
].physical
;
2341 for (i
= 0; i
< nstripes
; ++i
) {
2345 if (atomic_read(&fs_info
->scrub_cancel_req
) ||
2346 atomic_read(&sctx
->cancel_req
)) {
2351 * check to see if we have to pause
2353 if (atomic_read(&fs_info
->scrub_pause_req
)) {
2354 /* push queued extents */
2355 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 1);
2357 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
2358 scrub_wr_submit(sctx
);
2359 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
2360 wait_event(sctx
->list_wait
,
2361 atomic_read(&sctx
->bios_in_flight
) == 0);
2362 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 0);
2363 atomic_inc(&fs_info
->scrubs_paused
);
2364 wake_up(&fs_info
->scrub_pause_wait
);
2365 mutex_lock(&fs_info
->scrub_lock
);
2366 while (atomic_read(&fs_info
->scrub_pause_req
)) {
2367 mutex_unlock(&fs_info
->scrub_lock
);
2368 wait_event(fs_info
->scrub_pause_wait
,
2369 atomic_read(&fs_info
->scrub_pause_req
) == 0);
2370 mutex_lock(&fs_info
->scrub_lock
);
2372 atomic_dec(&fs_info
->scrubs_paused
);
2373 mutex_unlock(&fs_info
->scrub_lock
);
2374 wake_up(&fs_info
->scrub_pause_wait
);
2377 ret
= btrfs_lookup_csums_range(csum_root
, logical
,
2378 logical
+ map
->stripe_len
- 1,
2379 &sctx
->csum_list
, 1);
2383 key
.objectid
= logical
;
2384 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
2385 key
.offset
= (u64
)0;
2387 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2391 ret
= btrfs_previous_item(root
, path
, 0,
2392 BTRFS_EXTENT_ITEM_KEY
);
2396 /* there's no smaller item, so stick with the
2398 btrfs_release_path(path
);
2399 ret
= btrfs_search_slot(NULL
, root
, &key
,
2408 slot
= path
->slots
[0];
2409 if (slot
>= btrfs_header_nritems(l
)) {
2410 ret
= btrfs_next_leaf(root
, path
);
2418 btrfs_item_key_to_cpu(l
, &key
, slot
);
2420 if (key
.objectid
+ key
.offset
<= logical
)
2423 if (key
.objectid
>= logical
+ map
->stripe_len
)
2426 if (btrfs_key_type(&key
) != BTRFS_EXTENT_ITEM_KEY
)
2429 extent
= btrfs_item_ptr(l
, slot
,
2430 struct btrfs_extent_item
);
2431 flags
= btrfs_extent_flags(l
, extent
);
2432 generation
= btrfs_extent_generation(l
, extent
);
2434 if (key
.objectid
< logical
&&
2435 (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)) {
2437 "btrfs scrub: tree block %llu spanning "
2438 "stripes, ignored. logical=%llu\n",
2439 (unsigned long long)key
.objectid
,
2440 (unsigned long long)logical
);
2445 * trim extent to this stripe
2447 if (key
.objectid
< logical
) {
2448 key
.offset
-= logical
- key
.objectid
;
2449 key
.objectid
= logical
;
2451 if (key
.objectid
+ key
.offset
>
2452 logical
+ map
->stripe_len
) {
2453 key
.offset
= logical
+ map
->stripe_len
-
2457 extent_logical
= key
.objectid
;
2458 extent_physical
= key
.objectid
- logical
+ physical
;
2459 extent_len
= key
.offset
;
2460 extent_dev
= scrub_dev
;
2461 extent_mirror_num
= mirror_num
;
2463 scrub_remap_extent(fs_info
, extent_logical
,
2464 extent_len
, &extent_physical
,
2466 &extent_mirror_num
);
2467 ret
= scrub_extent(sctx
, extent_logical
, extent_len
,
2468 extent_physical
, extent_dev
, flags
,
2469 generation
, extent_mirror_num
,
2470 key
.objectid
- logical
+ physical
);
2477 btrfs_release_path(path
);
2478 logical
+= increment
;
2479 physical
+= map
->stripe_len
;
2480 spin_lock(&sctx
->stat_lock
);
2481 sctx
->stat
.last_physical
= physical
;
2482 spin_unlock(&sctx
->stat_lock
);
2485 /* push queued extents */
2487 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
2488 scrub_wr_submit(sctx
);
2489 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
2491 blk_finish_plug(&plug
);
2492 btrfs_free_path(path
);
2493 return ret
< 0 ? ret
: 0;
2496 static noinline_for_stack
int scrub_chunk(struct scrub_ctx
*sctx
,
2497 struct btrfs_device
*scrub_dev
,
2498 u64 chunk_tree
, u64 chunk_objectid
,
2499 u64 chunk_offset
, u64 length
,
2500 u64 dev_offset
, int is_dev_replace
)
2502 struct btrfs_mapping_tree
*map_tree
=
2503 &sctx
->dev_root
->fs_info
->mapping_tree
;
2504 struct map_lookup
*map
;
2505 struct extent_map
*em
;
2509 read_lock(&map_tree
->map_tree
.lock
);
2510 em
= lookup_extent_mapping(&map_tree
->map_tree
, chunk_offset
, 1);
2511 read_unlock(&map_tree
->map_tree
.lock
);
2516 map
= (struct map_lookup
*)em
->bdev
;
2517 if (em
->start
!= chunk_offset
)
2520 if (em
->len
< length
)
2523 for (i
= 0; i
< map
->num_stripes
; ++i
) {
2524 if (map
->stripes
[i
].dev
->bdev
== scrub_dev
->bdev
&&
2525 map
->stripes
[i
].physical
== dev_offset
) {
2526 ret
= scrub_stripe(sctx
, map
, scrub_dev
, i
,
2527 chunk_offset
, length
,
2534 free_extent_map(em
);
2539 static noinline_for_stack
2540 int scrub_enumerate_chunks(struct scrub_ctx
*sctx
,
2541 struct btrfs_device
*scrub_dev
, u64 start
, u64 end
,
2544 struct btrfs_dev_extent
*dev_extent
= NULL
;
2545 struct btrfs_path
*path
;
2546 struct btrfs_root
*root
= sctx
->dev_root
;
2547 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2554 struct extent_buffer
*l
;
2555 struct btrfs_key key
;
2556 struct btrfs_key found_key
;
2557 struct btrfs_block_group_cache
*cache
;
2558 struct btrfs_dev_replace
*dev_replace
= &fs_info
->dev_replace
;
2560 path
= btrfs_alloc_path();
2565 path
->search_commit_root
= 1;
2566 path
->skip_locking
= 1;
2568 key
.objectid
= scrub_dev
->devid
;
2570 key
.type
= BTRFS_DEV_EXTENT_KEY
;
2573 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2577 if (path
->slots
[0] >=
2578 btrfs_header_nritems(path
->nodes
[0])) {
2579 ret
= btrfs_next_leaf(root
, path
);
2586 slot
= path
->slots
[0];
2588 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
2590 if (found_key
.objectid
!= scrub_dev
->devid
)
2593 if (btrfs_key_type(&found_key
) != BTRFS_DEV_EXTENT_KEY
)
2596 if (found_key
.offset
>= end
)
2599 if (found_key
.offset
< key
.offset
)
2602 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
2603 length
= btrfs_dev_extent_length(l
, dev_extent
);
2605 if (found_key
.offset
+ length
<= start
) {
2606 key
.offset
= found_key
.offset
+ length
;
2607 btrfs_release_path(path
);
2611 chunk_tree
= btrfs_dev_extent_chunk_tree(l
, dev_extent
);
2612 chunk_objectid
= btrfs_dev_extent_chunk_objectid(l
, dev_extent
);
2613 chunk_offset
= btrfs_dev_extent_chunk_offset(l
, dev_extent
);
2616 * get a reference on the corresponding block group to prevent
2617 * the chunk from going away while we scrub it
2619 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
2624 dev_replace
->cursor_right
= found_key
.offset
+ length
;
2625 dev_replace
->cursor_left
= found_key
.offset
;
2626 dev_replace
->item_needs_writeback
= 1;
2627 ret
= scrub_chunk(sctx
, scrub_dev
, chunk_tree
, chunk_objectid
,
2628 chunk_offset
, length
, found_key
.offset
,
2632 * flush, submit all pending read and write bios, afterwards
2634 * Note that in the dev replace case, a read request causes
2635 * write requests that are submitted in the read completion
2636 * worker. Therefore in the current situation, it is required
2637 * that all write requests are flushed, so that all read and
2638 * write requests are really completed when bios_in_flight
2641 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 1);
2643 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
2644 scrub_wr_submit(sctx
);
2645 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
2647 wait_event(sctx
->list_wait
,
2648 atomic_read(&sctx
->bios_in_flight
) == 0);
2649 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 0);
2650 atomic_inc(&fs_info
->scrubs_paused
);
2651 wake_up(&fs_info
->scrub_pause_wait
);
2652 wait_event(sctx
->list_wait
,
2653 atomic_read(&sctx
->workers_pending
) == 0);
2655 mutex_lock(&fs_info
->scrub_lock
);
2656 while (atomic_read(&fs_info
->scrub_pause_req
)) {
2657 mutex_unlock(&fs_info
->scrub_lock
);
2658 wait_event(fs_info
->scrub_pause_wait
,
2659 atomic_read(&fs_info
->scrub_pause_req
) == 0);
2660 mutex_lock(&fs_info
->scrub_lock
);
2662 atomic_dec(&fs_info
->scrubs_paused
);
2663 mutex_unlock(&fs_info
->scrub_lock
);
2664 wake_up(&fs_info
->scrub_pause_wait
);
2666 dev_replace
->cursor_left
= dev_replace
->cursor_right
;
2667 dev_replace
->item_needs_writeback
= 1;
2668 btrfs_put_block_group(cache
);
2671 if (is_dev_replace
&&
2672 atomic64_read(&dev_replace
->num_write_errors
) > 0) {
2676 if (sctx
->stat
.malloc_errors
> 0) {
2681 key
.offset
= found_key
.offset
+ length
;
2682 btrfs_release_path(path
);
2685 btrfs_free_path(path
);
2688 * ret can still be 1 from search_slot or next_leaf,
2689 * that's not an error
2691 return ret
< 0 ? ret
: 0;
2694 static noinline_for_stack
int scrub_supers(struct scrub_ctx
*sctx
,
2695 struct btrfs_device
*scrub_dev
)
2701 struct btrfs_root
*root
= sctx
->dev_root
;
2703 if (root
->fs_info
->fs_state
& BTRFS_SUPER_FLAG_ERROR
)
2706 gen
= root
->fs_info
->last_trans_committed
;
2708 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
2709 bytenr
= btrfs_sb_offset(i
);
2710 if (bytenr
+ BTRFS_SUPER_INFO_SIZE
> scrub_dev
->total_bytes
)
2713 ret
= scrub_pages(sctx
, bytenr
, BTRFS_SUPER_INFO_SIZE
, bytenr
,
2714 scrub_dev
, BTRFS_EXTENT_FLAG_SUPER
, gen
, i
,
2719 wait_event(sctx
->list_wait
, atomic_read(&sctx
->bios_in_flight
) == 0);
2725 * get a reference count on fs_info->scrub_workers. start worker if necessary
2727 static noinline_for_stack
int scrub_workers_get(struct btrfs_fs_info
*fs_info
,
2732 mutex_lock(&fs_info
->scrub_lock
);
2733 if (fs_info
->scrub_workers_refcnt
== 0) {
2735 btrfs_init_workers(&fs_info
->scrub_workers
, "scrub", 1,
2736 &fs_info
->generic_worker
);
2738 btrfs_init_workers(&fs_info
->scrub_workers
, "scrub",
2739 fs_info
->thread_pool_size
,
2740 &fs_info
->generic_worker
);
2741 fs_info
->scrub_workers
.idle_thresh
= 4;
2742 ret
= btrfs_start_workers(&fs_info
->scrub_workers
);
2745 btrfs_init_workers(&fs_info
->scrub_wr_completion_workers
,
2747 fs_info
->thread_pool_size
,
2748 &fs_info
->generic_worker
);
2749 fs_info
->scrub_wr_completion_workers
.idle_thresh
= 2;
2750 ret
= btrfs_start_workers(
2751 &fs_info
->scrub_wr_completion_workers
);
2754 btrfs_init_workers(&fs_info
->scrub_nocow_workers
, "scrubnc", 1,
2755 &fs_info
->generic_worker
);
2756 ret
= btrfs_start_workers(&fs_info
->scrub_nocow_workers
);
2760 ++fs_info
->scrub_workers_refcnt
;
2762 mutex_unlock(&fs_info
->scrub_lock
);
2767 static noinline_for_stack
void scrub_workers_put(struct btrfs_fs_info
*fs_info
)
2769 mutex_lock(&fs_info
->scrub_lock
);
2770 if (--fs_info
->scrub_workers_refcnt
== 0) {
2771 btrfs_stop_workers(&fs_info
->scrub_workers
);
2772 btrfs_stop_workers(&fs_info
->scrub_wr_completion_workers
);
2773 btrfs_stop_workers(&fs_info
->scrub_nocow_workers
);
2775 WARN_ON(fs_info
->scrub_workers_refcnt
< 0);
2776 mutex_unlock(&fs_info
->scrub_lock
);
2779 int btrfs_scrub_dev(struct btrfs_fs_info
*fs_info
, u64 devid
, u64 start
,
2780 u64 end
, struct btrfs_scrub_progress
*progress
,
2781 int readonly
, int is_dev_replace
)
2783 struct scrub_ctx
*sctx
;
2785 struct btrfs_device
*dev
;
2787 if (btrfs_fs_closing(fs_info
))
2791 * check some assumptions
2793 if (fs_info
->chunk_root
->nodesize
!= fs_info
->chunk_root
->leafsize
) {
2795 "btrfs_scrub: size assumption nodesize == leafsize (%d == %d) fails\n",
2796 fs_info
->chunk_root
->nodesize
,
2797 fs_info
->chunk_root
->leafsize
);
2801 if (fs_info
->chunk_root
->nodesize
> BTRFS_STRIPE_LEN
) {
2803 * in this case scrub is unable to calculate the checksum
2804 * the way scrub is implemented. Do not handle this
2805 * situation at all because it won't ever happen.
2808 "btrfs_scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails\n",
2809 fs_info
->chunk_root
->nodesize
, BTRFS_STRIPE_LEN
);
2813 if (fs_info
->chunk_root
->sectorsize
!= PAGE_SIZE
) {
2814 /* not supported for data w/o checksums */
2816 "btrfs_scrub: size assumption sectorsize != PAGE_SIZE (%d != %lld) fails\n",
2817 fs_info
->chunk_root
->sectorsize
,
2818 (unsigned long long)PAGE_SIZE
);
2822 if (fs_info
->chunk_root
->nodesize
>
2823 PAGE_SIZE
* SCRUB_MAX_PAGES_PER_BLOCK
||
2824 fs_info
->chunk_root
->sectorsize
>
2825 PAGE_SIZE
* SCRUB_MAX_PAGES_PER_BLOCK
) {
2827 * would exhaust the array bounds of pagev member in
2828 * struct scrub_block
2830 pr_err("btrfs_scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails\n",
2831 fs_info
->chunk_root
->nodesize
,
2832 SCRUB_MAX_PAGES_PER_BLOCK
,
2833 fs_info
->chunk_root
->sectorsize
,
2834 SCRUB_MAX_PAGES_PER_BLOCK
);
2838 ret
= scrub_workers_get(fs_info
, is_dev_replace
);
2842 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
2843 dev
= btrfs_find_device(fs_info
, devid
, NULL
, NULL
);
2844 if (!dev
|| (dev
->missing
&& !is_dev_replace
)) {
2845 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2846 scrub_workers_put(fs_info
);
2849 mutex_lock(&fs_info
->scrub_lock
);
2851 if (!dev
->in_fs_metadata
|| dev
->is_tgtdev_for_dev_replace
) {
2852 mutex_unlock(&fs_info
->scrub_lock
);
2853 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2854 scrub_workers_put(fs_info
);
2858 btrfs_dev_replace_lock(&fs_info
->dev_replace
);
2859 if (dev
->scrub_device
||
2861 btrfs_dev_replace_is_ongoing(&fs_info
->dev_replace
))) {
2862 btrfs_dev_replace_unlock(&fs_info
->dev_replace
);
2863 mutex_unlock(&fs_info
->scrub_lock
);
2864 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2865 scrub_workers_put(fs_info
);
2866 return -EINPROGRESS
;
2868 btrfs_dev_replace_unlock(&fs_info
->dev_replace
);
2869 sctx
= scrub_setup_ctx(dev
, is_dev_replace
);
2871 mutex_unlock(&fs_info
->scrub_lock
);
2872 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2873 scrub_workers_put(fs_info
);
2874 return PTR_ERR(sctx
);
2876 sctx
->readonly
= readonly
;
2877 dev
->scrub_device
= sctx
;
2879 atomic_inc(&fs_info
->scrubs_running
);
2880 mutex_unlock(&fs_info
->scrub_lock
);
2881 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2883 if (!is_dev_replace
) {
2884 down_read(&fs_info
->scrub_super_lock
);
2885 ret
= scrub_supers(sctx
, dev
);
2886 up_read(&fs_info
->scrub_super_lock
);
2890 ret
= scrub_enumerate_chunks(sctx
, dev
, start
, end
,
2893 wait_event(sctx
->list_wait
, atomic_read(&sctx
->bios_in_flight
) == 0);
2894 atomic_dec(&fs_info
->scrubs_running
);
2895 wake_up(&fs_info
->scrub_pause_wait
);
2897 wait_event(sctx
->list_wait
, atomic_read(&sctx
->workers_pending
) == 0);
2900 memcpy(progress
, &sctx
->stat
, sizeof(*progress
));
2902 mutex_lock(&fs_info
->scrub_lock
);
2903 dev
->scrub_device
= NULL
;
2904 mutex_unlock(&fs_info
->scrub_lock
);
2906 scrub_free_ctx(sctx
);
2907 scrub_workers_put(fs_info
);
2912 void btrfs_scrub_pause(struct btrfs_root
*root
)
2914 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2916 mutex_lock(&fs_info
->scrub_lock
);
2917 atomic_inc(&fs_info
->scrub_pause_req
);
2918 while (atomic_read(&fs_info
->scrubs_paused
) !=
2919 atomic_read(&fs_info
->scrubs_running
)) {
2920 mutex_unlock(&fs_info
->scrub_lock
);
2921 wait_event(fs_info
->scrub_pause_wait
,
2922 atomic_read(&fs_info
->scrubs_paused
) ==
2923 atomic_read(&fs_info
->scrubs_running
));
2924 mutex_lock(&fs_info
->scrub_lock
);
2926 mutex_unlock(&fs_info
->scrub_lock
);
2929 void btrfs_scrub_continue(struct btrfs_root
*root
)
2931 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2933 atomic_dec(&fs_info
->scrub_pause_req
);
2934 wake_up(&fs_info
->scrub_pause_wait
);
2937 void btrfs_scrub_pause_super(struct btrfs_root
*root
)
2939 down_write(&root
->fs_info
->scrub_super_lock
);
2942 void btrfs_scrub_continue_super(struct btrfs_root
*root
)
2944 up_write(&root
->fs_info
->scrub_super_lock
);
2947 int btrfs_scrub_cancel(struct btrfs_fs_info
*fs_info
)
2949 mutex_lock(&fs_info
->scrub_lock
);
2950 if (!atomic_read(&fs_info
->scrubs_running
)) {
2951 mutex_unlock(&fs_info
->scrub_lock
);
2955 atomic_inc(&fs_info
->scrub_cancel_req
);
2956 while (atomic_read(&fs_info
->scrubs_running
)) {
2957 mutex_unlock(&fs_info
->scrub_lock
);
2958 wait_event(fs_info
->scrub_pause_wait
,
2959 atomic_read(&fs_info
->scrubs_running
) == 0);
2960 mutex_lock(&fs_info
->scrub_lock
);
2962 atomic_dec(&fs_info
->scrub_cancel_req
);
2963 mutex_unlock(&fs_info
->scrub_lock
);
2968 int btrfs_scrub_cancel_dev(struct btrfs_fs_info
*fs_info
,
2969 struct btrfs_device
*dev
)
2971 struct scrub_ctx
*sctx
;
2973 mutex_lock(&fs_info
->scrub_lock
);
2974 sctx
= dev
->scrub_device
;
2976 mutex_unlock(&fs_info
->scrub_lock
);
2979 atomic_inc(&sctx
->cancel_req
);
2980 while (dev
->scrub_device
) {
2981 mutex_unlock(&fs_info
->scrub_lock
);
2982 wait_event(fs_info
->scrub_pause_wait
,
2983 dev
->scrub_device
== NULL
);
2984 mutex_lock(&fs_info
->scrub_lock
);
2986 mutex_unlock(&fs_info
->scrub_lock
);
2991 int btrfs_scrub_cancel_devid(struct btrfs_root
*root
, u64 devid
)
2993 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2994 struct btrfs_device
*dev
;
2998 * we have to hold the device_list_mutex here so the device
2999 * does not go away in cancel_dev. FIXME: find a better solution
3001 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
3002 dev
= btrfs_find_device(fs_info
, devid
, NULL
, NULL
);
3004 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3007 ret
= btrfs_scrub_cancel_dev(fs_info
, dev
);
3008 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3013 int btrfs_scrub_progress(struct btrfs_root
*root
, u64 devid
,
3014 struct btrfs_scrub_progress
*progress
)
3016 struct btrfs_device
*dev
;
3017 struct scrub_ctx
*sctx
= NULL
;
3019 mutex_lock(&root
->fs_info
->fs_devices
->device_list_mutex
);
3020 dev
= btrfs_find_device(root
->fs_info
, devid
, NULL
, NULL
);
3022 sctx
= dev
->scrub_device
;
3024 memcpy(progress
, &sctx
->stat
, sizeof(*progress
));
3025 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
3027 return dev
? (sctx
? 0 : -ENOTCONN
) : -ENODEV
;
3030 static void scrub_remap_extent(struct btrfs_fs_info
*fs_info
,
3031 u64 extent_logical
, u64 extent_len
,
3032 u64
*extent_physical
,
3033 struct btrfs_device
**extent_dev
,
3034 int *extent_mirror_num
)
3037 struct btrfs_bio
*bbio
= NULL
;
3040 mapped_length
= extent_len
;
3041 ret
= btrfs_map_block(fs_info
, READ
, extent_logical
,
3042 &mapped_length
, &bbio
, 0);
3043 if (ret
|| !bbio
|| mapped_length
< extent_len
||
3044 !bbio
->stripes
[0].dev
->bdev
) {
3049 *extent_physical
= bbio
->stripes
[0].physical
;
3050 *extent_mirror_num
= bbio
->mirror_num
;
3051 *extent_dev
= bbio
->stripes
[0].dev
;
3055 static int scrub_setup_wr_ctx(struct scrub_ctx
*sctx
,
3056 struct scrub_wr_ctx
*wr_ctx
,
3057 struct btrfs_fs_info
*fs_info
,
3058 struct btrfs_device
*dev
,
3061 WARN_ON(wr_ctx
->wr_curr_bio
!= NULL
);
3063 mutex_init(&wr_ctx
->wr_lock
);
3064 wr_ctx
->wr_curr_bio
= NULL
;
3065 if (!is_dev_replace
)
3068 WARN_ON(!dev
->bdev
);
3069 wr_ctx
->pages_per_wr_bio
= min_t(int, SCRUB_PAGES_PER_WR_BIO
,
3070 bio_get_nr_vecs(dev
->bdev
));
3071 wr_ctx
->tgtdev
= dev
;
3072 atomic_set(&wr_ctx
->flush_all_writes
, 0);
3076 static void scrub_free_wr_ctx(struct scrub_wr_ctx
*wr_ctx
)
3078 mutex_lock(&wr_ctx
->wr_lock
);
3079 kfree(wr_ctx
->wr_curr_bio
);
3080 wr_ctx
->wr_curr_bio
= NULL
;
3081 mutex_unlock(&wr_ctx
->wr_lock
);
3084 static int copy_nocow_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
3085 int mirror_num
, u64 physical_for_dev_replace
)
3087 struct scrub_copy_nocow_ctx
*nocow_ctx
;
3088 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
3090 nocow_ctx
= kzalloc(sizeof(*nocow_ctx
), GFP_NOFS
);
3092 spin_lock(&sctx
->stat_lock
);
3093 sctx
->stat
.malloc_errors
++;
3094 spin_unlock(&sctx
->stat_lock
);
3098 scrub_pending_trans_workers_inc(sctx
);
3100 nocow_ctx
->sctx
= sctx
;
3101 nocow_ctx
->logical
= logical
;
3102 nocow_ctx
->len
= len
;
3103 nocow_ctx
->mirror_num
= mirror_num
;
3104 nocow_ctx
->physical_for_dev_replace
= physical_for_dev_replace
;
3105 nocow_ctx
->work
.func
= copy_nocow_pages_worker
;
3106 btrfs_queue_worker(&fs_info
->scrub_nocow_workers
,
3112 static void copy_nocow_pages_worker(struct btrfs_work
*work
)
3114 struct scrub_copy_nocow_ctx
*nocow_ctx
=
3115 container_of(work
, struct scrub_copy_nocow_ctx
, work
);
3116 struct scrub_ctx
*sctx
= nocow_ctx
->sctx
;
3117 u64 logical
= nocow_ctx
->logical
;
3118 u64 len
= nocow_ctx
->len
;
3119 int mirror_num
= nocow_ctx
->mirror_num
;
3120 u64 physical_for_dev_replace
= nocow_ctx
->physical_for_dev_replace
;
3122 struct btrfs_trans_handle
*trans
= NULL
;
3123 struct btrfs_fs_info
*fs_info
;
3124 struct btrfs_path
*path
;
3125 struct btrfs_root
*root
;
3126 int not_written
= 0;
3128 fs_info
= sctx
->dev_root
->fs_info
;
3129 root
= fs_info
->extent_root
;
3131 path
= btrfs_alloc_path();
3133 spin_lock(&sctx
->stat_lock
);
3134 sctx
->stat
.malloc_errors
++;
3135 spin_unlock(&sctx
->stat_lock
);
3140 trans
= btrfs_join_transaction(root
);
3141 if (IS_ERR(trans
)) {
3146 ret
= iterate_inodes_from_logical(logical
, fs_info
, path
,
3147 copy_nocow_pages_for_inode
,
3149 if (ret
!= 0 && ret
!= -ENOENT
) {
3150 pr_warn("iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %llu, ret %d\n",
3151 (unsigned long long)logical
,
3152 (unsigned long long)physical_for_dev_replace
,
3153 (unsigned long long)len
,
3154 (unsigned long long)mirror_num
, ret
);
3160 if (trans
&& !IS_ERR(trans
))
3161 btrfs_end_transaction(trans
, root
);
3163 btrfs_dev_replace_stats_inc(&fs_info
->dev_replace
.
3164 num_uncorrectable_read_errors
);
3166 btrfs_free_path(path
);
3169 scrub_pending_trans_workers_dec(sctx
);
3172 static int copy_nocow_pages_for_inode(u64 inum
, u64 offset
, u64 root
, void *ctx
)
3174 unsigned long index
;
3175 struct scrub_copy_nocow_ctx
*nocow_ctx
= ctx
;
3177 struct btrfs_key key
;
3178 struct inode
*inode
= NULL
;
3179 struct btrfs_root
*local_root
;
3180 u64 physical_for_dev_replace
;
3182 struct btrfs_fs_info
*fs_info
= nocow_ctx
->sctx
->dev_root
->fs_info
;
3184 key
.objectid
= root
;
3185 key
.type
= BTRFS_ROOT_ITEM_KEY
;
3186 key
.offset
= (u64
)-1;
3187 local_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
3188 if (IS_ERR(local_root
))
3189 return PTR_ERR(local_root
);
3191 key
.type
= BTRFS_INODE_ITEM_KEY
;
3192 key
.objectid
= inum
;
3194 inode
= btrfs_iget(fs_info
->sb
, &key
, local_root
, NULL
);
3196 return PTR_ERR(inode
);
3198 physical_for_dev_replace
= nocow_ctx
->physical_for_dev_replace
;
3199 len
= nocow_ctx
->len
;
3200 while (len
>= PAGE_CACHE_SIZE
) {
3201 struct page
*page
= NULL
;
3204 index
= offset
>> PAGE_CACHE_SHIFT
;
3206 page
= find_or_create_page(inode
->i_mapping
, index
, GFP_NOFS
);
3208 pr_err("find_or_create_page() failed\n");
3213 if (PageUptodate(page
)) {
3214 if (PageDirty(page
))
3217 ClearPageError(page
);
3218 ret_sub
= extent_read_full_page(&BTRFS_I(inode
)->
3220 page
, btrfs_get_extent
,
3221 nocow_ctx
->mirror_num
);
3226 wait_on_page_locked(page
);
3227 if (!PageUptodate(page
)) {
3232 ret_sub
= write_page_nocow(nocow_ctx
->sctx
,
3233 physical_for_dev_replace
, page
);
3244 offset
+= PAGE_CACHE_SIZE
;
3245 physical_for_dev_replace
+= PAGE_CACHE_SIZE
;
3246 len
-= PAGE_CACHE_SIZE
;
3254 static int write_page_nocow(struct scrub_ctx
*sctx
,
3255 u64 physical_for_dev_replace
, struct page
*page
)
3258 struct btrfs_device
*dev
;
3260 DECLARE_COMPLETION_ONSTACK(compl);
3262 dev
= sctx
->wr_ctx
.tgtdev
;
3266 printk_ratelimited(KERN_WARNING
3267 "btrfs: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
3270 bio
= bio_alloc(GFP_NOFS
, 1);
3272 spin_lock(&sctx
->stat_lock
);
3273 sctx
->stat
.malloc_errors
++;
3274 spin_unlock(&sctx
->stat_lock
);
3277 bio
->bi_private
= &compl;
3278 bio
->bi_end_io
= scrub_complete_bio_end_io
;
3280 bio
->bi_sector
= physical_for_dev_replace
>> 9;
3281 bio
->bi_bdev
= dev
->bdev
;
3282 ret
= bio_add_page(bio
, page
, PAGE_CACHE_SIZE
, 0);
3283 if (ret
!= PAGE_CACHE_SIZE
) {
3286 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_WRITE_ERRS
);
3289 btrfsic_submit_bio(WRITE_SYNC
, bio
);
3290 wait_for_completion(&compl);
3292 if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
3293 goto leave_with_eio
;