2 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/blkdev.h>
20 #include <linux/ratelimit.h>
24 #include "ordered-data.h"
25 #include "transaction.h"
27 #include "extent_io.h"
28 #include "dev-replace.h"
29 #include "check-integrity.h"
30 #include "rcu-string.h"
34 * This is only the first step towards a full-features scrub. It reads all
35 * extent and super block and verifies the checksums. In case a bad checksum
36 * is found or the extent cannot be read, good data will be written back if
39 * Future enhancements:
40 * - In case an unrepairable extent is encountered, track which files are
41 * affected and report them
42 * - track and record media errors, throw out bad devices
43 * - add a mode to also read unallocated space
50 * the following three values only influence the performance.
51 * The last one configures the number of parallel and outstanding I/O
52 * operations. The first two values configure an upper limit for the number
53 * of (dynamically allocated) pages that are added to a bio.
55 #define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
56 #define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
57 #define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
60 * the following value times PAGE_SIZE needs to be large enough to match the
61 * largest node/leaf/sector size that shall be supported.
62 * Values larger than BTRFS_STRIPE_LEN are not supported.
64 #define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
67 struct scrub_block
*sblock
;
69 struct btrfs_device
*dev
;
70 u64 flags
; /* extent flags */
74 u64 physical_for_dev_replace
;
77 unsigned int mirror_num
:8;
78 unsigned int have_csum
:1;
79 unsigned int io_error
:1;
81 u8 csum
[BTRFS_CSUM_SIZE
];
86 struct scrub_ctx
*sctx
;
87 struct btrfs_device
*dev
;
92 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
93 struct scrub_page
*pagev
[SCRUB_PAGES_PER_WR_BIO
];
95 struct scrub_page
*pagev
[SCRUB_PAGES_PER_RD_BIO
];
99 struct btrfs_work work
;
103 struct scrub_page
*pagev
[SCRUB_MAX_PAGES_PER_BLOCK
];
105 atomic_t outstanding_pages
;
106 atomic_t ref_count
; /* free mem on transition to zero */
107 struct scrub_ctx
*sctx
;
109 unsigned int header_error
:1;
110 unsigned int checksum_error
:1;
111 unsigned int no_io_error_seen
:1;
112 unsigned int generation_error
:1; /* also sets header_error */
116 struct scrub_wr_ctx
{
117 struct scrub_bio
*wr_curr_bio
;
118 struct btrfs_device
*tgtdev
;
119 int pages_per_wr_bio
; /* <= SCRUB_PAGES_PER_WR_BIO */
120 atomic_t flush_all_writes
;
121 struct mutex wr_lock
;
125 struct scrub_bio
*bios
[SCRUB_BIOS_PER_SCTX
];
126 struct btrfs_root
*dev_root
;
129 atomic_t bios_in_flight
;
130 atomic_t workers_pending
;
131 spinlock_t list_lock
;
132 wait_queue_head_t list_wait
;
134 struct list_head csum_list
;
137 int pages_per_rd_bio
;
143 struct scrub_wr_ctx wr_ctx
;
148 struct btrfs_scrub_progress stat
;
149 spinlock_t stat_lock
;
152 struct scrub_fixup_nodatasum
{
153 struct scrub_ctx
*sctx
;
154 struct btrfs_device
*dev
;
156 struct btrfs_root
*root
;
157 struct btrfs_work work
;
161 struct scrub_nocow_inode
{
165 struct list_head list
;
168 struct scrub_copy_nocow_ctx
{
169 struct scrub_ctx
*sctx
;
173 u64 physical_for_dev_replace
;
174 struct list_head inodes
;
175 struct btrfs_work work
;
178 struct scrub_warning
{
179 struct btrfs_path
*path
;
180 u64 extent_item_size
;
186 struct btrfs_device
*dev
;
192 static void scrub_pending_bio_inc(struct scrub_ctx
*sctx
);
193 static void scrub_pending_bio_dec(struct scrub_ctx
*sctx
);
194 static void scrub_pending_trans_workers_inc(struct scrub_ctx
*sctx
);
195 static void scrub_pending_trans_workers_dec(struct scrub_ctx
*sctx
);
196 static int scrub_handle_errored_block(struct scrub_block
*sblock_to_check
);
197 static int scrub_setup_recheck_block(struct scrub_ctx
*sctx
,
198 struct btrfs_fs_info
*fs_info
,
199 struct scrub_block
*original_sblock
,
200 u64 length
, u64 logical
,
201 struct scrub_block
*sblocks_for_recheck
);
202 static void scrub_recheck_block(struct btrfs_fs_info
*fs_info
,
203 struct scrub_block
*sblock
, int is_metadata
,
204 int have_csum
, u8
*csum
, u64 generation
,
206 static void scrub_recheck_block_checksum(struct btrfs_fs_info
*fs_info
,
207 struct scrub_block
*sblock
,
208 int is_metadata
, int have_csum
,
209 const u8
*csum
, u64 generation
,
211 static int scrub_repair_block_from_good_copy(struct scrub_block
*sblock_bad
,
212 struct scrub_block
*sblock_good
,
214 static int scrub_repair_page_from_good_copy(struct scrub_block
*sblock_bad
,
215 struct scrub_block
*sblock_good
,
216 int page_num
, int force_write
);
217 static void scrub_write_block_to_dev_replace(struct scrub_block
*sblock
);
218 static int scrub_write_page_to_dev_replace(struct scrub_block
*sblock
,
220 static int scrub_checksum_data(struct scrub_block
*sblock
);
221 static int scrub_checksum_tree_block(struct scrub_block
*sblock
);
222 static int scrub_checksum_super(struct scrub_block
*sblock
);
223 static void scrub_block_get(struct scrub_block
*sblock
);
224 static void scrub_block_put(struct scrub_block
*sblock
);
225 static void scrub_page_get(struct scrub_page
*spage
);
226 static void scrub_page_put(struct scrub_page
*spage
);
227 static int scrub_add_page_to_rd_bio(struct scrub_ctx
*sctx
,
228 struct scrub_page
*spage
);
229 static int scrub_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
230 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
231 u64 gen
, int mirror_num
, u8
*csum
, int force
,
232 u64 physical_for_dev_replace
);
233 static void scrub_bio_end_io(struct bio
*bio
, int err
);
234 static void scrub_bio_end_io_worker(struct btrfs_work
*work
);
235 static void scrub_block_complete(struct scrub_block
*sblock
);
236 static void scrub_remap_extent(struct btrfs_fs_info
*fs_info
,
237 u64 extent_logical
, u64 extent_len
,
238 u64
*extent_physical
,
239 struct btrfs_device
**extent_dev
,
240 int *extent_mirror_num
);
241 static int scrub_setup_wr_ctx(struct scrub_ctx
*sctx
,
242 struct scrub_wr_ctx
*wr_ctx
,
243 struct btrfs_fs_info
*fs_info
,
244 struct btrfs_device
*dev
,
246 static void scrub_free_wr_ctx(struct scrub_wr_ctx
*wr_ctx
);
247 static int scrub_add_page_to_wr_bio(struct scrub_ctx
*sctx
,
248 struct scrub_page
*spage
);
249 static void scrub_wr_submit(struct scrub_ctx
*sctx
);
250 static void scrub_wr_bio_end_io(struct bio
*bio
, int err
);
251 static void scrub_wr_bio_end_io_worker(struct btrfs_work
*work
);
252 static int write_page_nocow(struct scrub_ctx
*sctx
,
253 u64 physical_for_dev_replace
, struct page
*page
);
254 static int copy_nocow_pages_for_inode(u64 inum
, u64 offset
, u64 root
,
255 struct scrub_copy_nocow_ctx
*ctx
);
256 static int copy_nocow_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
257 int mirror_num
, u64 physical_for_dev_replace
);
258 static void copy_nocow_pages_worker(struct btrfs_work
*work
);
259 static void __scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
);
260 static void scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
);
263 static void scrub_pending_bio_inc(struct scrub_ctx
*sctx
)
265 atomic_inc(&sctx
->bios_in_flight
);
268 static void scrub_pending_bio_dec(struct scrub_ctx
*sctx
)
270 atomic_dec(&sctx
->bios_in_flight
);
271 wake_up(&sctx
->list_wait
);
274 static void __scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
)
276 while (atomic_read(&fs_info
->scrub_pause_req
)) {
277 mutex_unlock(&fs_info
->scrub_lock
);
278 wait_event(fs_info
->scrub_pause_wait
,
279 atomic_read(&fs_info
->scrub_pause_req
) == 0);
280 mutex_lock(&fs_info
->scrub_lock
);
284 static void scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
)
286 atomic_inc(&fs_info
->scrubs_paused
);
287 wake_up(&fs_info
->scrub_pause_wait
);
289 mutex_lock(&fs_info
->scrub_lock
);
290 __scrub_blocked_if_needed(fs_info
);
291 atomic_dec(&fs_info
->scrubs_paused
);
292 mutex_unlock(&fs_info
->scrub_lock
);
294 wake_up(&fs_info
->scrub_pause_wait
);
298 * used for workers that require transaction commits (i.e., for the
301 static void scrub_pending_trans_workers_inc(struct scrub_ctx
*sctx
)
303 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
306 * increment scrubs_running to prevent cancel requests from
307 * completing as long as a worker is running. we must also
308 * increment scrubs_paused to prevent deadlocking on pause
309 * requests used for transactions commits (as the worker uses a
310 * transaction context). it is safe to regard the worker
311 * as paused for all matters practical. effectively, we only
312 * avoid cancellation requests from completing.
314 mutex_lock(&fs_info
->scrub_lock
);
315 atomic_inc(&fs_info
->scrubs_running
);
316 atomic_inc(&fs_info
->scrubs_paused
);
317 mutex_unlock(&fs_info
->scrub_lock
);
320 * check if @scrubs_running=@scrubs_paused condition
321 * inside wait_event() is not an atomic operation.
322 * which means we may inc/dec @scrub_running/paused
323 * at any time. Let's wake up @scrub_pause_wait as
324 * much as we can to let commit transaction blocked less.
326 wake_up(&fs_info
->scrub_pause_wait
);
328 atomic_inc(&sctx
->workers_pending
);
331 /* used for workers that require transaction commits */
332 static void scrub_pending_trans_workers_dec(struct scrub_ctx
*sctx
)
334 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
337 * see scrub_pending_trans_workers_inc() why we're pretending
338 * to be paused in the scrub counters
340 mutex_lock(&fs_info
->scrub_lock
);
341 atomic_dec(&fs_info
->scrubs_running
);
342 atomic_dec(&fs_info
->scrubs_paused
);
343 mutex_unlock(&fs_info
->scrub_lock
);
344 atomic_dec(&sctx
->workers_pending
);
345 wake_up(&fs_info
->scrub_pause_wait
);
346 wake_up(&sctx
->list_wait
);
349 static void scrub_free_csums(struct scrub_ctx
*sctx
)
351 while (!list_empty(&sctx
->csum_list
)) {
352 struct btrfs_ordered_sum
*sum
;
353 sum
= list_first_entry(&sctx
->csum_list
,
354 struct btrfs_ordered_sum
, list
);
355 list_del(&sum
->list
);
360 static noinline_for_stack
void scrub_free_ctx(struct scrub_ctx
*sctx
)
367 scrub_free_wr_ctx(&sctx
->wr_ctx
);
369 /* this can happen when scrub is cancelled */
370 if (sctx
->curr
!= -1) {
371 struct scrub_bio
*sbio
= sctx
->bios
[sctx
->curr
];
373 for (i
= 0; i
< sbio
->page_count
; i
++) {
374 WARN_ON(!sbio
->pagev
[i
]->page
);
375 scrub_block_put(sbio
->pagev
[i
]->sblock
);
380 for (i
= 0; i
< SCRUB_BIOS_PER_SCTX
; ++i
) {
381 struct scrub_bio
*sbio
= sctx
->bios
[i
];
388 scrub_free_csums(sctx
);
392 static noinline_for_stack
393 struct scrub_ctx
*scrub_setup_ctx(struct btrfs_device
*dev
, int is_dev_replace
)
395 struct scrub_ctx
*sctx
;
397 struct btrfs_fs_info
*fs_info
= dev
->dev_root
->fs_info
;
398 int pages_per_rd_bio
;
402 * the setting of pages_per_rd_bio is correct for scrub but might
403 * be wrong for the dev_replace code where we might read from
404 * different devices in the initial huge bios. However, that
405 * code is able to correctly handle the case when adding a page
409 pages_per_rd_bio
= min_t(int, SCRUB_PAGES_PER_RD_BIO
,
410 bio_get_nr_vecs(dev
->bdev
));
412 pages_per_rd_bio
= SCRUB_PAGES_PER_RD_BIO
;
413 sctx
= kzalloc(sizeof(*sctx
), GFP_NOFS
);
416 sctx
->is_dev_replace
= is_dev_replace
;
417 sctx
->pages_per_rd_bio
= pages_per_rd_bio
;
419 sctx
->dev_root
= dev
->dev_root
;
420 for (i
= 0; i
< SCRUB_BIOS_PER_SCTX
; ++i
) {
421 struct scrub_bio
*sbio
;
423 sbio
= kzalloc(sizeof(*sbio
), GFP_NOFS
);
426 sctx
->bios
[i
] = sbio
;
430 sbio
->page_count
= 0;
431 btrfs_init_work(&sbio
->work
, scrub_bio_end_io_worker
,
434 if (i
!= SCRUB_BIOS_PER_SCTX
- 1)
435 sctx
->bios
[i
]->next_free
= i
+ 1;
437 sctx
->bios
[i
]->next_free
= -1;
439 sctx
->first_free
= 0;
440 sctx
->nodesize
= dev
->dev_root
->nodesize
;
441 sctx
->leafsize
= dev
->dev_root
->leafsize
;
442 sctx
->sectorsize
= dev
->dev_root
->sectorsize
;
443 atomic_set(&sctx
->bios_in_flight
, 0);
444 atomic_set(&sctx
->workers_pending
, 0);
445 atomic_set(&sctx
->cancel_req
, 0);
446 sctx
->csum_size
= btrfs_super_csum_size(fs_info
->super_copy
);
447 INIT_LIST_HEAD(&sctx
->csum_list
);
449 spin_lock_init(&sctx
->list_lock
);
450 spin_lock_init(&sctx
->stat_lock
);
451 init_waitqueue_head(&sctx
->list_wait
);
453 ret
= scrub_setup_wr_ctx(sctx
, &sctx
->wr_ctx
, fs_info
,
454 fs_info
->dev_replace
.tgtdev
, is_dev_replace
);
456 scrub_free_ctx(sctx
);
462 scrub_free_ctx(sctx
);
463 return ERR_PTR(-ENOMEM
);
466 static int scrub_print_warning_inode(u64 inum
, u64 offset
, u64 root
,
473 struct extent_buffer
*eb
;
474 struct btrfs_inode_item
*inode_item
;
475 struct scrub_warning
*swarn
= warn_ctx
;
476 struct btrfs_fs_info
*fs_info
= swarn
->dev
->dev_root
->fs_info
;
477 struct inode_fs_paths
*ipath
= NULL
;
478 struct btrfs_root
*local_root
;
479 struct btrfs_key root_key
;
481 root_key
.objectid
= root
;
482 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
483 root_key
.offset
= (u64
)-1;
484 local_root
= btrfs_read_fs_root_no_name(fs_info
, &root_key
);
485 if (IS_ERR(local_root
)) {
486 ret
= PTR_ERR(local_root
);
490 ret
= inode_item_info(inum
, 0, local_root
, swarn
->path
);
492 btrfs_release_path(swarn
->path
);
496 eb
= swarn
->path
->nodes
[0];
497 inode_item
= btrfs_item_ptr(eb
, swarn
->path
->slots
[0],
498 struct btrfs_inode_item
);
499 isize
= btrfs_inode_size(eb
, inode_item
);
500 nlink
= btrfs_inode_nlink(eb
, inode_item
);
501 btrfs_release_path(swarn
->path
);
503 ipath
= init_ipath(4096, local_root
, swarn
->path
);
505 ret
= PTR_ERR(ipath
);
509 ret
= paths_from_inode(inum
, ipath
);
515 * we deliberately ignore the bit ipath might have been too small to
516 * hold all of the paths here
518 for (i
= 0; i
< ipath
->fspath
->elem_cnt
; ++i
)
519 printk_in_rcu(KERN_WARNING
"BTRFS: %s at logical %llu on dev "
520 "%s, sector %llu, root %llu, inode %llu, offset %llu, "
521 "length %llu, links %u (path: %s)\n", swarn
->errstr
,
522 swarn
->logical
, rcu_str_deref(swarn
->dev
->name
),
523 (unsigned long long)swarn
->sector
, root
, inum
, offset
,
524 min(isize
- offset
, (u64
)PAGE_SIZE
), nlink
,
525 (char *)(unsigned long)ipath
->fspath
->val
[i
]);
531 printk_in_rcu(KERN_WARNING
"BTRFS: %s at logical %llu on dev "
532 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
533 "resolving failed with ret=%d\n", swarn
->errstr
,
534 swarn
->logical
, rcu_str_deref(swarn
->dev
->name
),
535 (unsigned long long)swarn
->sector
, root
, inum
, offset
, ret
);
541 static void scrub_print_warning(const char *errstr
, struct scrub_block
*sblock
)
543 struct btrfs_device
*dev
;
544 struct btrfs_fs_info
*fs_info
;
545 struct btrfs_path
*path
;
546 struct btrfs_key found_key
;
547 struct extent_buffer
*eb
;
548 struct btrfs_extent_item
*ei
;
549 struct scrub_warning swarn
;
550 unsigned long ptr
= 0;
556 const int bufsize
= 4096;
559 WARN_ON(sblock
->page_count
< 1);
560 dev
= sblock
->pagev
[0]->dev
;
561 fs_info
= sblock
->sctx
->dev_root
->fs_info
;
563 path
= btrfs_alloc_path();
565 swarn
.scratch_buf
= kmalloc(bufsize
, GFP_NOFS
);
566 swarn
.msg_buf
= kmalloc(bufsize
, GFP_NOFS
);
567 swarn
.sector
= (sblock
->pagev
[0]->physical
) >> 9;
568 swarn
.logical
= sblock
->pagev
[0]->logical
;
569 swarn
.errstr
= errstr
;
571 swarn
.msg_bufsize
= bufsize
;
572 swarn
.scratch_bufsize
= bufsize
;
574 if (!path
|| !swarn
.scratch_buf
|| !swarn
.msg_buf
)
577 ret
= extent_from_logical(fs_info
, swarn
.logical
, path
, &found_key
,
582 extent_item_pos
= swarn
.logical
- found_key
.objectid
;
583 swarn
.extent_item_size
= found_key
.offset
;
586 ei
= btrfs_item_ptr(eb
, path
->slots
[0], struct btrfs_extent_item
);
587 item_size
= btrfs_item_size_nr(eb
, path
->slots
[0]);
589 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
591 ret
= tree_backref_for_extent(&ptr
, eb
, &found_key
, ei
,
592 item_size
, &ref_root
,
594 printk_in_rcu(KERN_WARNING
595 "BTRFS: %s at logical %llu on dev %s, "
596 "sector %llu: metadata %s (level %d) in tree "
597 "%llu\n", errstr
, swarn
.logical
,
598 rcu_str_deref(dev
->name
),
599 (unsigned long long)swarn
.sector
,
600 ref_level
? "node" : "leaf",
601 ret
< 0 ? -1 : ref_level
,
602 ret
< 0 ? -1 : ref_root
);
604 btrfs_release_path(path
);
606 btrfs_release_path(path
);
609 iterate_extent_inodes(fs_info
, found_key
.objectid
,
611 scrub_print_warning_inode
, &swarn
);
615 btrfs_free_path(path
);
616 kfree(swarn
.scratch_buf
);
617 kfree(swarn
.msg_buf
);
620 static int scrub_fixup_readpage(u64 inum
, u64 offset
, u64 root
, void *fixup_ctx
)
622 struct page
*page
= NULL
;
624 struct scrub_fixup_nodatasum
*fixup
= fixup_ctx
;
627 struct btrfs_key key
;
628 struct inode
*inode
= NULL
;
629 struct btrfs_fs_info
*fs_info
;
630 u64 end
= offset
+ PAGE_SIZE
- 1;
631 struct btrfs_root
*local_root
;
635 key
.type
= BTRFS_ROOT_ITEM_KEY
;
636 key
.offset
= (u64
)-1;
638 fs_info
= fixup
->root
->fs_info
;
639 srcu_index
= srcu_read_lock(&fs_info
->subvol_srcu
);
641 local_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
642 if (IS_ERR(local_root
)) {
643 srcu_read_unlock(&fs_info
->subvol_srcu
, srcu_index
);
644 return PTR_ERR(local_root
);
647 key
.type
= BTRFS_INODE_ITEM_KEY
;
650 inode
= btrfs_iget(fs_info
->sb
, &key
, local_root
, NULL
);
651 srcu_read_unlock(&fs_info
->subvol_srcu
, srcu_index
);
653 return PTR_ERR(inode
);
655 index
= offset
>> PAGE_CACHE_SHIFT
;
657 page
= find_or_create_page(inode
->i_mapping
, index
, GFP_NOFS
);
663 if (PageUptodate(page
)) {
664 if (PageDirty(page
)) {
666 * we need to write the data to the defect sector. the
667 * data that was in that sector is not in memory,
668 * because the page was modified. we must not write the
669 * modified page to that sector.
671 * TODO: what could be done here: wait for the delalloc
672 * runner to write out that page (might involve
673 * COW) and see whether the sector is still
674 * referenced afterwards.
676 * For the meantime, we'll treat this error
677 * incorrectable, although there is a chance that a
678 * later scrub will find the bad sector again and that
679 * there's no dirty page in memory, then.
684 fs_info
= BTRFS_I(inode
)->root
->fs_info
;
685 ret
= repair_io_failure(fs_info
, offset
, PAGE_SIZE
,
686 fixup
->logical
, page
,
692 * we need to get good data first. the general readpage path
693 * will call repair_io_failure for us, we just have to make
694 * sure we read the bad mirror.
696 ret
= set_extent_bits(&BTRFS_I(inode
)->io_tree
, offset
, end
,
697 EXTENT_DAMAGED
, GFP_NOFS
);
699 /* set_extent_bits should give proper error */
706 ret
= extent_read_full_page(&BTRFS_I(inode
)->io_tree
, page
,
709 wait_on_page_locked(page
);
711 corrected
= !test_range_bit(&BTRFS_I(inode
)->io_tree
, offset
,
712 end
, EXTENT_DAMAGED
, 0, NULL
);
714 clear_extent_bits(&BTRFS_I(inode
)->io_tree
, offset
, end
,
715 EXTENT_DAMAGED
, GFP_NOFS
);
727 if (ret
== 0 && corrected
) {
729 * we only need to call readpage for one of the inodes belonging
730 * to this extent. so make iterate_extent_inodes stop
738 static void scrub_fixup_nodatasum(struct btrfs_work
*work
)
741 struct scrub_fixup_nodatasum
*fixup
;
742 struct scrub_ctx
*sctx
;
743 struct btrfs_trans_handle
*trans
= NULL
;
744 struct btrfs_path
*path
;
745 int uncorrectable
= 0;
747 fixup
= container_of(work
, struct scrub_fixup_nodatasum
, work
);
750 path
= btrfs_alloc_path();
752 spin_lock(&sctx
->stat_lock
);
753 ++sctx
->stat
.malloc_errors
;
754 spin_unlock(&sctx
->stat_lock
);
759 trans
= btrfs_join_transaction(fixup
->root
);
766 * the idea is to trigger a regular read through the standard path. we
767 * read a page from the (failed) logical address by specifying the
768 * corresponding copynum of the failed sector. thus, that readpage is
770 * that is the point where on-the-fly error correction will kick in
771 * (once it's finished) and rewrite the failed sector if a good copy
774 ret
= iterate_inodes_from_logical(fixup
->logical
, fixup
->root
->fs_info
,
775 path
, scrub_fixup_readpage
,
783 spin_lock(&sctx
->stat_lock
);
784 ++sctx
->stat
.corrected_errors
;
785 spin_unlock(&sctx
->stat_lock
);
788 if (trans
&& !IS_ERR(trans
))
789 btrfs_end_transaction(trans
, fixup
->root
);
791 spin_lock(&sctx
->stat_lock
);
792 ++sctx
->stat
.uncorrectable_errors
;
793 spin_unlock(&sctx
->stat_lock
);
794 btrfs_dev_replace_stats_inc(
795 &sctx
->dev_root
->fs_info
->dev_replace
.
796 num_uncorrectable_read_errors
);
797 printk_ratelimited_in_rcu(KERN_ERR
"BTRFS: "
798 "unable to fixup (nodatasum) error at logical %llu on dev %s\n",
799 fixup
->logical
, rcu_str_deref(fixup
->dev
->name
));
802 btrfs_free_path(path
);
805 scrub_pending_trans_workers_dec(sctx
);
809 * scrub_handle_errored_block gets called when either verification of the
810 * pages failed or the bio failed to read, e.g. with EIO. In the latter
811 * case, this function handles all pages in the bio, even though only one
813 * The goal of this function is to repair the errored block by using the
814 * contents of one of the mirrors.
816 static int scrub_handle_errored_block(struct scrub_block
*sblock_to_check
)
818 struct scrub_ctx
*sctx
= sblock_to_check
->sctx
;
819 struct btrfs_device
*dev
;
820 struct btrfs_fs_info
*fs_info
;
824 unsigned int failed_mirror_index
;
825 unsigned int is_metadata
;
826 unsigned int have_csum
;
828 struct scrub_block
*sblocks_for_recheck
; /* holds one for each mirror */
829 struct scrub_block
*sblock_bad
;
834 static DEFINE_RATELIMIT_STATE(_rs
, DEFAULT_RATELIMIT_INTERVAL
,
835 DEFAULT_RATELIMIT_BURST
);
837 BUG_ON(sblock_to_check
->page_count
< 1);
838 fs_info
= sctx
->dev_root
->fs_info
;
839 if (sblock_to_check
->pagev
[0]->flags
& BTRFS_EXTENT_FLAG_SUPER
) {
841 * if we find an error in a super block, we just report it.
842 * They will get written with the next transaction commit
845 spin_lock(&sctx
->stat_lock
);
846 ++sctx
->stat
.super_errors
;
847 spin_unlock(&sctx
->stat_lock
);
850 length
= sblock_to_check
->page_count
* PAGE_SIZE
;
851 logical
= sblock_to_check
->pagev
[0]->logical
;
852 generation
= sblock_to_check
->pagev
[0]->generation
;
853 BUG_ON(sblock_to_check
->pagev
[0]->mirror_num
< 1);
854 failed_mirror_index
= sblock_to_check
->pagev
[0]->mirror_num
- 1;
855 is_metadata
= !(sblock_to_check
->pagev
[0]->flags
&
856 BTRFS_EXTENT_FLAG_DATA
);
857 have_csum
= sblock_to_check
->pagev
[0]->have_csum
;
858 csum
= sblock_to_check
->pagev
[0]->csum
;
859 dev
= sblock_to_check
->pagev
[0]->dev
;
861 if (sctx
->is_dev_replace
&& !is_metadata
&& !have_csum
) {
862 sblocks_for_recheck
= NULL
;
867 * read all mirrors one after the other. This includes to
868 * re-read the extent or metadata block that failed (that was
869 * the cause that this fixup code is called) another time,
870 * page by page this time in order to know which pages
871 * caused I/O errors and which ones are good (for all mirrors).
872 * It is the goal to handle the situation when more than one
873 * mirror contains I/O errors, but the errors do not
874 * overlap, i.e. the data can be repaired by selecting the
875 * pages from those mirrors without I/O error on the
876 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
877 * would be that mirror #1 has an I/O error on the first page,
878 * the second page is good, and mirror #2 has an I/O error on
879 * the second page, but the first page is good.
880 * Then the first page of the first mirror can be repaired by
881 * taking the first page of the second mirror, and the
882 * second page of the second mirror can be repaired by
883 * copying the contents of the 2nd page of the 1st mirror.
884 * One more note: if the pages of one mirror contain I/O
885 * errors, the checksum cannot be verified. In order to get
886 * the best data for repairing, the first attempt is to find
887 * a mirror without I/O errors and with a validated checksum.
888 * Only if this is not possible, the pages are picked from
889 * mirrors with I/O errors without considering the checksum.
890 * If the latter is the case, at the end, the checksum of the
891 * repaired area is verified in order to correctly maintain
895 sblocks_for_recheck
= kzalloc(BTRFS_MAX_MIRRORS
*
896 sizeof(*sblocks_for_recheck
),
898 if (!sblocks_for_recheck
) {
899 spin_lock(&sctx
->stat_lock
);
900 sctx
->stat
.malloc_errors
++;
901 sctx
->stat
.read_errors
++;
902 sctx
->stat
.uncorrectable_errors
++;
903 spin_unlock(&sctx
->stat_lock
);
904 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
908 /* setup the context, map the logical blocks and alloc the pages */
909 ret
= scrub_setup_recheck_block(sctx
, fs_info
, sblock_to_check
, length
,
910 logical
, sblocks_for_recheck
);
912 spin_lock(&sctx
->stat_lock
);
913 sctx
->stat
.read_errors
++;
914 sctx
->stat
.uncorrectable_errors
++;
915 spin_unlock(&sctx
->stat_lock
);
916 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
919 BUG_ON(failed_mirror_index
>= BTRFS_MAX_MIRRORS
);
920 sblock_bad
= sblocks_for_recheck
+ failed_mirror_index
;
922 /* build and submit the bios for the failed mirror, check checksums */
923 scrub_recheck_block(fs_info
, sblock_bad
, is_metadata
, have_csum
,
924 csum
, generation
, sctx
->csum_size
);
926 if (!sblock_bad
->header_error
&& !sblock_bad
->checksum_error
&&
927 sblock_bad
->no_io_error_seen
) {
929 * the error disappeared after reading page by page, or
930 * the area was part of a huge bio and other parts of the
931 * bio caused I/O errors, or the block layer merged several
932 * read requests into one and the error is caused by a
933 * different bio (usually one of the two latter cases is
936 spin_lock(&sctx
->stat_lock
);
937 sctx
->stat
.unverified_errors
++;
938 spin_unlock(&sctx
->stat_lock
);
940 if (sctx
->is_dev_replace
)
941 scrub_write_block_to_dev_replace(sblock_bad
);
945 if (!sblock_bad
->no_io_error_seen
) {
946 spin_lock(&sctx
->stat_lock
);
947 sctx
->stat
.read_errors
++;
948 spin_unlock(&sctx
->stat_lock
);
949 if (__ratelimit(&_rs
))
950 scrub_print_warning("i/o error", sblock_to_check
);
951 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
952 } else if (sblock_bad
->checksum_error
) {
953 spin_lock(&sctx
->stat_lock
);
954 sctx
->stat
.csum_errors
++;
955 spin_unlock(&sctx
->stat_lock
);
956 if (__ratelimit(&_rs
))
957 scrub_print_warning("checksum error", sblock_to_check
);
958 btrfs_dev_stat_inc_and_print(dev
,
959 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
960 } else if (sblock_bad
->header_error
) {
961 spin_lock(&sctx
->stat_lock
);
962 sctx
->stat
.verify_errors
++;
963 spin_unlock(&sctx
->stat_lock
);
964 if (__ratelimit(&_rs
))
965 scrub_print_warning("checksum/header error",
967 if (sblock_bad
->generation_error
)
968 btrfs_dev_stat_inc_and_print(dev
,
969 BTRFS_DEV_STAT_GENERATION_ERRS
);
971 btrfs_dev_stat_inc_and_print(dev
,
972 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
975 if (sctx
->readonly
) {
976 ASSERT(!sctx
->is_dev_replace
);
980 if (!is_metadata
&& !have_csum
) {
981 struct scrub_fixup_nodatasum
*fixup_nodatasum
;
984 WARN_ON(sctx
->is_dev_replace
);
987 * !is_metadata and !have_csum, this means that the data
988 * might not be COW'ed, that it might be modified
989 * concurrently. The general strategy to work on the
990 * commit root does not help in the case when COW is not
993 fixup_nodatasum
= kzalloc(sizeof(*fixup_nodatasum
), GFP_NOFS
);
994 if (!fixup_nodatasum
)
995 goto did_not_correct_error
;
996 fixup_nodatasum
->sctx
= sctx
;
997 fixup_nodatasum
->dev
= dev
;
998 fixup_nodatasum
->logical
= logical
;
999 fixup_nodatasum
->root
= fs_info
->extent_root
;
1000 fixup_nodatasum
->mirror_num
= failed_mirror_index
+ 1;
1001 scrub_pending_trans_workers_inc(sctx
);
1002 btrfs_init_work(&fixup_nodatasum
->work
, scrub_fixup_nodatasum
,
1004 btrfs_queue_work(fs_info
->scrub_workers
,
1005 &fixup_nodatasum
->work
);
1010 * now build and submit the bios for the other mirrors, check
1012 * First try to pick the mirror which is completely without I/O
1013 * errors and also does not have a checksum error.
1014 * If one is found, and if a checksum is present, the full block
1015 * that is known to contain an error is rewritten. Afterwards
1016 * the block is known to be corrected.
1017 * If a mirror is found which is completely correct, and no
1018 * checksum is present, only those pages are rewritten that had
1019 * an I/O error in the block to be repaired, since it cannot be
1020 * determined, which copy of the other pages is better (and it
1021 * could happen otherwise that a correct page would be
1022 * overwritten by a bad one).
1024 for (mirror_index
= 0;
1025 mirror_index
< BTRFS_MAX_MIRRORS
&&
1026 sblocks_for_recheck
[mirror_index
].page_count
> 0;
1028 struct scrub_block
*sblock_other
;
1030 if (mirror_index
== failed_mirror_index
)
1032 sblock_other
= sblocks_for_recheck
+ mirror_index
;
1034 /* build and submit the bios, check checksums */
1035 scrub_recheck_block(fs_info
, sblock_other
, is_metadata
,
1036 have_csum
, csum
, generation
,
1039 if (!sblock_other
->header_error
&&
1040 !sblock_other
->checksum_error
&&
1041 sblock_other
->no_io_error_seen
) {
1042 if (sctx
->is_dev_replace
) {
1043 scrub_write_block_to_dev_replace(sblock_other
);
1045 int force_write
= is_metadata
|| have_csum
;
1047 ret
= scrub_repair_block_from_good_copy(
1048 sblock_bad
, sblock_other
,
1052 goto corrected_error
;
1057 * for dev_replace, pick good pages and write to the target device.
1059 if (sctx
->is_dev_replace
) {
1061 for (page_num
= 0; page_num
< sblock_bad
->page_count
;
1066 for (mirror_index
= 0;
1067 mirror_index
< BTRFS_MAX_MIRRORS
&&
1068 sblocks_for_recheck
[mirror_index
].page_count
> 0;
1070 struct scrub_block
*sblock_other
=
1071 sblocks_for_recheck
+ mirror_index
;
1072 struct scrub_page
*page_other
=
1073 sblock_other
->pagev
[page_num
];
1075 if (!page_other
->io_error
) {
1076 ret
= scrub_write_page_to_dev_replace(
1077 sblock_other
, page_num
);
1079 /* succeeded for this page */
1083 btrfs_dev_replace_stats_inc(
1085 fs_info
->dev_replace
.
1093 * did not find a mirror to fetch the page
1094 * from. scrub_write_page_to_dev_replace()
1095 * handles this case (page->io_error), by
1096 * filling the block with zeros before
1097 * submitting the write request
1100 ret
= scrub_write_page_to_dev_replace(
1101 sblock_bad
, page_num
);
1103 btrfs_dev_replace_stats_inc(
1104 &sctx
->dev_root
->fs_info
->
1105 dev_replace
.num_write_errors
);
1113 * for regular scrub, repair those pages that are errored.
1114 * In case of I/O errors in the area that is supposed to be
1115 * repaired, continue by picking good copies of those pages.
1116 * Select the good pages from mirrors to rewrite bad pages from
1117 * the area to fix. Afterwards verify the checksum of the block
1118 * that is supposed to be repaired. This verification step is
1119 * only done for the purpose of statistic counting and for the
1120 * final scrub report, whether errors remain.
1121 * A perfect algorithm could make use of the checksum and try
1122 * all possible combinations of pages from the different mirrors
1123 * until the checksum verification succeeds. For example, when
1124 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1125 * of mirror #2 is readable but the final checksum test fails,
1126 * then the 2nd page of mirror #3 could be tried, whether now
1127 * the final checksum succeedes. But this would be a rare
1128 * exception and is therefore not implemented. At least it is
1129 * avoided that the good copy is overwritten.
1130 * A more useful improvement would be to pick the sectors
1131 * without I/O error based on sector sizes (512 bytes on legacy
1132 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1133 * mirror could be repaired by taking 512 byte of a different
1134 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1135 * area are unreadable.
1138 /* can only fix I/O errors from here on */
1139 if (sblock_bad
->no_io_error_seen
)
1140 goto did_not_correct_error
;
1143 for (page_num
= 0; page_num
< sblock_bad
->page_count
; page_num
++) {
1144 struct scrub_page
*page_bad
= sblock_bad
->pagev
[page_num
];
1146 if (!page_bad
->io_error
)
1149 for (mirror_index
= 0;
1150 mirror_index
< BTRFS_MAX_MIRRORS
&&
1151 sblocks_for_recheck
[mirror_index
].page_count
> 0;
1153 struct scrub_block
*sblock_other
= sblocks_for_recheck
+
1155 struct scrub_page
*page_other
= sblock_other
->pagev
[
1158 if (!page_other
->io_error
) {
1159 ret
= scrub_repair_page_from_good_copy(
1160 sblock_bad
, sblock_other
, page_num
, 0);
1162 page_bad
->io_error
= 0;
1163 break; /* succeeded for this page */
1168 if (page_bad
->io_error
) {
1169 /* did not find a mirror to copy the page from */
1175 if (is_metadata
|| have_csum
) {
1177 * need to verify the checksum now that all
1178 * sectors on disk are repaired (the write
1179 * request for data to be repaired is on its way).
1180 * Just be lazy and use scrub_recheck_block()
1181 * which re-reads the data before the checksum
1182 * is verified, but most likely the data comes out
1183 * of the page cache.
1185 scrub_recheck_block(fs_info
, sblock_bad
,
1186 is_metadata
, have_csum
, csum
,
1187 generation
, sctx
->csum_size
);
1188 if (!sblock_bad
->header_error
&&
1189 !sblock_bad
->checksum_error
&&
1190 sblock_bad
->no_io_error_seen
)
1191 goto corrected_error
;
1193 goto did_not_correct_error
;
1196 spin_lock(&sctx
->stat_lock
);
1197 sctx
->stat
.corrected_errors
++;
1198 spin_unlock(&sctx
->stat_lock
);
1199 printk_ratelimited_in_rcu(KERN_ERR
1200 "BTRFS: fixed up error at logical %llu on dev %s\n",
1201 logical
, rcu_str_deref(dev
->name
));
1204 did_not_correct_error
:
1205 spin_lock(&sctx
->stat_lock
);
1206 sctx
->stat
.uncorrectable_errors
++;
1207 spin_unlock(&sctx
->stat_lock
);
1208 printk_ratelimited_in_rcu(KERN_ERR
1209 "BTRFS: unable to fixup (regular) error at logical %llu on dev %s\n",
1210 logical
, rcu_str_deref(dev
->name
));
1214 if (sblocks_for_recheck
) {
1215 for (mirror_index
= 0; mirror_index
< BTRFS_MAX_MIRRORS
;
1217 struct scrub_block
*sblock
= sblocks_for_recheck
+
1221 for (page_index
= 0; page_index
< sblock
->page_count
;
1223 sblock
->pagev
[page_index
]->sblock
= NULL
;
1224 scrub_page_put(sblock
->pagev
[page_index
]);
1227 kfree(sblocks_for_recheck
);
1233 static int scrub_setup_recheck_block(struct scrub_ctx
*sctx
,
1234 struct btrfs_fs_info
*fs_info
,
1235 struct scrub_block
*original_sblock
,
1236 u64 length
, u64 logical
,
1237 struct scrub_block
*sblocks_for_recheck
)
1244 * note: the two members ref_count and outstanding_pages
1245 * are not used (and not set) in the blocks that are used for
1246 * the recheck procedure
1250 while (length
> 0) {
1251 u64 sublen
= min_t(u64
, length
, PAGE_SIZE
);
1252 u64 mapped_length
= sublen
;
1253 struct btrfs_bio
*bbio
= NULL
;
1256 * with a length of PAGE_SIZE, each returned stripe
1257 * represents one mirror
1259 ret
= btrfs_map_block(fs_info
, REQ_GET_READ_MIRRORS
, logical
,
1260 &mapped_length
, &bbio
, 0);
1261 if (ret
|| !bbio
|| mapped_length
< sublen
) {
1266 BUG_ON(page_index
>= SCRUB_PAGES_PER_RD_BIO
);
1267 for (mirror_index
= 0; mirror_index
< (int)bbio
->num_stripes
;
1269 struct scrub_block
*sblock
;
1270 struct scrub_page
*page
;
1272 if (mirror_index
>= BTRFS_MAX_MIRRORS
)
1275 sblock
= sblocks_for_recheck
+ mirror_index
;
1276 sblock
->sctx
= sctx
;
1277 page
= kzalloc(sizeof(*page
), GFP_NOFS
);
1280 spin_lock(&sctx
->stat_lock
);
1281 sctx
->stat
.malloc_errors
++;
1282 spin_unlock(&sctx
->stat_lock
);
1286 scrub_page_get(page
);
1287 sblock
->pagev
[page_index
] = page
;
1288 page
->logical
= logical
;
1289 page
->physical
= bbio
->stripes
[mirror_index
].physical
;
1290 BUG_ON(page_index
>= original_sblock
->page_count
);
1291 page
->physical_for_dev_replace
=
1292 original_sblock
->pagev
[page_index
]->
1293 physical_for_dev_replace
;
1294 /* for missing devices, dev->bdev is NULL */
1295 page
->dev
= bbio
->stripes
[mirror_index
].dev
;
1296 page
->mirror_num
= mirror_index
+ 1;
1297 sblock
->page_count
++;
1298 page
->page
= alloc_page(GFP_NOFS
);
1312 * this function will check the on disk data for checksum errors, header
1313 * errors and read I/O errors. If any I/O errors happen, the exact pages
1314 * which are errored are marked as being bad. The goal is to enable scrub
1315 * to take those pages that are not errored from all the mirrors so that
1316 * the pages that are errored in the just handled mirror can be repaired.
1318 static void scrub_recheck_block(struct btrfs_fs_info
*fs_info
,
1319 struct scrub_block
*sblock
, int is_metadata
,
1320 int have_csum
, u8
*csum
, u64 generation
,
1325 sblock
->no_io_error_seen
= 1;
1326 sblock
->header_error
= 0;
1327 sblock
->checksum_error
= 0;
1329 for (page_num
= 0; page_num
< sblock
->page_count
; page_num
++) {
1331 struct scrub_page
*page
= sblock
->pagev
[page_num
];
1333 if (page
->dev
->bdev
== NULL
) {
1335 sblock
->no_io_error_seen
= 0;
1339 WARN_ON(!page
->page
);
1340 bio
= btrfs_io_bio_alloc(GFP_NOFS
, 1);
1343 sblock
->no_io_error_seen
= 0;
1346 bio
->bi_bdev
= page
->dev
->bdev
;
1347 bio
->bi_iter
.bi_sector
= page
->physical
>> 9;
1349 bio_add_page(bio
, page
->page
, PAGE_SIZE
, 0);
1350 if (btrfsic_submit_bio_wait(READ
, bio
))
1351 sblock
->no_io_error_seen
= 0;
1356 if (sblock
->no_io_error_seen
)
1357 scrub_recheck_block_checksum(fs_info
, sblock
, is_metadata
,
1358 have_csum
, csum
, generation
,
1364 static void scrub_recheck_block_checksum(struct btrfs_fs_info
*fs_info
,
1365 struct scrub_block
*sblock
,
1366 int is_metadata
, int have_csum
,
1367 const u8
*csum
, u64 generation
,
1371 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1373 void *mapped_buffer
;
1375 WARN_ON(!sblock
->pagev
[0]->page
);
1377 struct btrfs_header
*h
;
1379 mapped_buffer
= kmap_atomic(sblock
->pagev
[0]->page
);
1380 h
= (struct btrfs_header
*)mapped_buffer
;
1382 if (sblock
->pagev
[0]->logical
!= btrfs_stack_header_bytenr(h
) ||
1383 memcmp(h
->fsid
, fs_info
->fsid
, BTRFS_UUID_SIZE
) ||
1384 memcmp(h
->chunk_tree_uuid
, fs_info
->chunk_tree_uuid
,
1386 sblock
->header_error
= 1;
1387 } else if (generation
!= btrfs_stack_header_generation(h
)) {
1388 sblock
->header_error
= 1;
1389 sblock
->generation_error
= 1;
1396 mapped_buffer
= kmap_atomic(sblock
->pagev
[0]->page
);
1399 for (page_num
= 0;;) {
1400 if (page_num
== 0 && is_metadata
)
1401 crc
= btrfs_csum_data(
1402 ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
,
1403 crc
, PAGE_SIZE
- BTRFS_CSUM_SIZE
);
1405 crc
= btrfs_csum_data(mapped_buffer
, crc
, PAGE_SIZE
);
1407 kunmap_atomic(mapped_buffer
);
1409 if (page_num
>= sblock
->page_count
)
1411 WARN_ON(!sblock
->pagev
[page_num
]->page
);
1413 mapped_buffer
= kmap_atomic(sblock
->pagev
[page_num
]->page
);
1416 btrfs_csum_final(crc
, calculated_csum
);
1417 if (memcmp(calculated_csum
, csum
, csum_size
))
1418 sblock
->checksum_error
= 1;
1421 static int scrub_repair_block_from_good_copy(struct scrub_block
*sblock_bad
,
1422 struct scrub_block
*sblock_good
,
1428 for (page_num
= 0; page_num
< sblock_bad
->page_count
; page_num
++) {
1431 ret_sub
= scrub_repair_page_from_good_copy(sblock_bad
,
1442 static int scrub_repair_page_from_good_copy(struct scrub_block
*sblock_bad
,
1443 struct scrub_block
*sblock_good
,
1444 int page_num
, int force_write
)
1446 struct scrub_page
*page_bad
= sblock_bad
->pagev
[page_num
];
1447 struct scrub_page
*page_good
= sblock_good
->pagev
[page_num
];
1449 BUG_ON(page_bad
->page
== NULL
);
1450 BUG_ON(page_good
->page
== NULL
);
1451 if (force_write
|| sblock_bad
->header_error
||
1452 sblock_bad
->checksum_error
|| page_bad
->io_error
) {
1456 if (!page_bad
->dev
->bdev
) {
1457 printk_ratelimited(KERN_WARNING
"BTRFS: "
1458 "scrub_repair_page_from_good_copy(bdev == NULL) "
1459 "is unexpected!\n");
1463 bio
= btrfs_io_bio_alloc(GFP_NOFS
, 1);
1466 bio
->bi_bdev
= page_bad
->dev
->bdev
;
1467 bio
->bi_iter
.bi_sector
= page_bad
->physical
>> 9;
1469 ret
= bio_add_page(bio
, page_good
->page
, PAGE_SIZE
, 0);
1470 if (PAGE_SIZE
!= ret
) {
1475 if (btrfsic_submit_bio_wait(WRITE
, bio
)) {
1476 btrfs_dev_stat_inc_and_print(page_bad
->dev
,
1477 BTRFS_DEV_STAT_WRITE_ERRS
);
1478 btrfs_dev_replace_stats_inc(
1479 &sblock_bad
->sctx
->dev_root
->fs_info
->
1480 dev_replace
.num_write_errors
);
1490 static void scrub_write_block_to_dev_replace(struct scrub_block
*sblock
)
1494 for (page_num
= 0; page_num
< sblock
->page_count
; page_num
++) {
1497 ret
= scrub_write_page_to_dev_replace(sblock
, page_num
);
1499 btrfs_dev_replace_stats_inc(
1500 &sblock
->sctx
->dev_root
->fs_info
->dev_replace
.
1505 static int scrub_write_page_to_dev_replace(struct scrub_block
*sblock
,
1508 struct scrub_page
*spage
= sblock
->pagev
[page_num
];
1510 BUG_ON(spage
->page
== NULL
);
1511 if (spage
->io_error
) {
1512 void *mapped_buffer
= kmap_atomic(spage
->page
);
1514 memset(mapped_buffer
, 0, PAGE_CACHE_SIZE
);
1515 flush_dcache_page(spage
->page
);
1516 kunmap_atomic(mapped_buffer
);
1518 return scrub_add_page_to_wr_bio(sblock
->sctx
, spage
);
1521 static int scrub_add_page_to_wr_bio(struct scrub_ctx
*sctx
,
1522 struct scrub_page
*spage
)
1524 struct scrub_wr_ctx
*wr_ctx
= &sctx
->wr_ctx
;
1525 struct scrub_bio
*sbio
;
1528 mutex_lock(&wr_ctx
->wr_lock
);
1530 if (!wr_ctx
->wr_curr_bio
) {
1531 wr_ctx
->wr_curr_bio
= kzalloc(sizeof(*wr_ctx
->wr_curr_bio
),
1533 if (!wr_ctx
->wr_curr_bio
) {
1534 mutex_unlock(&wr_ctx
->wr_lock
);
1537 wr_ctx
->wr_curr_bio
->sctx
= sctx
;
1538 wr_ctx
->wr_curr_bio
->page_count
= 0;
1540 sbio
= wr_ctx
->wr_curr_bio
;
1541 if (sbio
->page_count
== 0) {
1544 sbio
->physical
= spage
->physical_for_dev_replace
;
1545 sbio
->logical
= spage
->logical
;
1546 sbio
->dev
= wr_ctx
->tgtdev
;
1549 bio
= btrfs_io_bio_alloc(GFP_NOFS
, wr_ctx
->pages_per_wr_bio
);
1551 mutex_unlock(&wr_ctx
->wr_lock
);
1557 bio
->bi_private
= sbio
;
1558 bio
->bi_end_io
= scrub_wr_bio_end_io
;
1559 bio
->bi_bdev
= sbio
->dev
->bdev
;
1560 bio
->bi_iter
.bi_sector
= sbio
->physical
>> 9;
1562 } else if (sbio
->physical
+ sbio
->page_count
* PAGE_SIZE
!=
1563 spage
->physical_for_dev_replace
||
1564 sbio
->logical
+ sbio
->page_count
* PAGE_SIZE
!=
1566 scrub_wr_submit(sctx
);
1570 ret
= bio_add_page(sbio
->bio
, spage
->page
, PAGE_SIZE
, 0);
1571 if (ret
!= PAGE_SIZE
) {
1572 if (sbio
->page_count
< 1) {
1575 mutex_unlock(&wr_ctx
->wr_lock
);
1578 scrub_wr_submit(sctx
);
1582 sbio
->pagev
[sbio
->page_count
] = spage
;
1583 scrub_page_get(spage
);
1585 if (sbio
->page_count
== wr_ctx
->pages_per_wr_bio
)
1586 scrub_wr_submit(sctx
);
1587 mutex_unlock(&wr_ctx
->wr_lock
);
1592 static void scrub_wr_submit(struct scrub_ctx
*sctx
)
1594 struct scrub_wr_ctx
*wr_ctx
= &sctx
->wr_ctx
;
1595 struct scrub_bio
*sbio
;
1597 if (!wr_ctx
->wr_curr_bio
)
1600 sbio
= wr_ctx
->wr_curr_bio
;
1601 wr_ctx
->wr_curr_bio
= NULL
;
1602 WARN_ON(!sbio
->bio
->bi_bdev
);
1603 scrub_pending_bio_inc(sctx
);
1604 /* process all writes in a single worker thread. Then the block layer
1605 * orders the requests before sending them to the driver which
1606 * doubled the write performance on spinning disks when measured
1608 btrfsic_submit_bio(WRITE
, sbio
->bio
);
1611 static void scrub_wr_bio_end_io(struct bio
*bio
, int err
)
1613 struct scrub_bio
*sbio
= bio
->bi_private
;
1614 struct btrfs_fs_info
*fs_info
= sbio
->dev
->dev_root
->fs_info
;
1619 btrfs_init_work(&sbio
->work
, scrub_wr_bio_end_io_worker
, NULL
, NULL
);
1620 btrfs_queue_work(fs_info
->scrub_wr_completion_workers
, &sbio
->work
);
1623 static void scrub_wr_bio_end_io_worker(struct btrfs_work
*work
)
1625 struct scrub_bio
*sbio
= container_of(work
, struct scrub_bio
, work
);
1626 struct scrub_ctx
*sctx
= sbio
->sctx
;
1629 WARN_ON(sbio
->page_count
> SCRUB_PAGES_PER_WR_BIO
);
1631 struct btrfs_dev_replace
*dev_replace
=
1632 &sbio
->sctx
->dev_root
->fs_info
->dev_replace
;
1634 for (i
= 0; i
< sbio
->page_count
; i
++) {
1635 struct scrub_page
*spage
= sbio
->pagev
[i
];
1637 spage
->io_error
= 1;
1638 btrfs_dev_replace_stats_inc(&dev_replace
->
1643 for (i
= 0; i
< sbio
->page_count
; i
++)
1644 scrub_page_put(sbio
->pagev
[i
]);
1648 scrub_pending_bio_dec(sctx
);
1651 static int scrub_checksum(struct scrub_block
*sblock
)
1656 WARN_ON(sblock
->page_count
< 1);
1657 flags
= sblock
->pagev
[0]->flags
;
1659 if (flags
& BTRFS_EXTENT_FLAG_DATA
)
1660 ret
= scrub_checksum_data(sblock
);
1661 else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)
1662 ret
= scrub_checksum_tree_block(sblock
);
1663 else if (flags
& BTRFS_EXTENT_FLAG_SUPER
)
1664 (void)scrub_checksum_super(sblock
);
1668 scrub_handle_errored_block(sblock
);
1673 static int scrub_checksum_data(struct scrub_block
*sblock
)
1675 struct scrub_ctx
*sctx
= sblock
->sctx
;
1676 u8 csum
[BTRFS_CSUM_SIZE
];
1685 BUG_ON(sblock
->page_count
< 1);
1686 if (!sblock
->pagev
[0]->have_csum
)
1689 on_disk_csum
= sblock
->pagev
[0]->csum
;
1690 page
= sblock
->pagev
[0]->page
;
1691 buffer
= kmap_atomic(page
);
1693 len
= sctx
->sectorsize
;
1696 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
1698 crc
= btrfs_csum_data(buffer
, crc
, l
);
1699 kunmap_atomic(buffer
);
1704 BUG_ON(index
>= sblock
->page_count
);
1705 BUG_ON(!sblock
->pagev
[index
]->page
);
1706 page
= sblock
->pagev
[index
]->page
;
1707 buffer
= kmap_atomic(page
);
1710 btrfs_csum_final(crc
, csum
);
1711 if (memcmp(csum
, on_disk_csum
, sctx
->csum_size
))
1717 static int scrub_checksum_tree_block(struct scrub_block
*sblock
)
1719 struct scrub_ctx
*sctx
= sblock
->sctx
;
1720 struct btrfs_header
*h
;
1721 struct btrfs_root
*root
= sctx
->dev_root
;
1722 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1723 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1724 u8 on_disk_csum
[BTRFS_CSUM_SIZE
];
1726 void *mapped_buffer
;
1735 BUG_ON(sblock
->page_count
< 1);
1736 page
= sblock
->pagev
[0]->page
;
1737 mapped_buffer
= kmap_atomic(page
);
1738 h
= (struct btrfs_header
*)mapped_buffer
;
1739 memcpy(on_disk_csum
, h
->csum
, sctx
->csum_size
);
1742 * we don't use the getter functions here, as we
1743 * a) don't have an extent buffer and
1744 * b) the page is already kmapped
1747 if (sblock
->pagev
[0]->logical
!= btrfs_stack_header_bytenr(h
))
1750 if (sblock
->pagev
[0]->generation
!= btrfs_stack_header_generation(h
))
1753 if (memcmp(h
->fsid
, fs_info
->fsid
, BTRFS_UUID_SIZE
))
1756 if (memcmp(h
->chunk_tree_uuid
, fs_info
->chunk_tree_uuid
,
1760 WARN_ON(sctx
->nodesize
!= sctx
->leafsize
);
1761 len
= sctx
->nodesize
- BTRFS_CSUM_SIZE
;
1762 mapped_size
= PAGE_SIZE
- BTRFS_CSUM_SIZE
;
1763 p
= ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
;
1766 u64 l
= min_t(u64
, len
, mapped_size
);
1768 crc
= btrfs_csum_data(p
, crc
, l
);
1769 kunmap_atomic(mapped_buffer
);
1774 BUG_ON(index
>= sblock
->page_count
);
1775 BUG_ON(!sblock
->pagev
[index
]->page
);
1776 page
= sblock
->pagev
[index
]->page
;
1777 mapped_buffer
= kmap_atomic(page
);
1778 mapped_size
= PAGE_SIZE
;
1782 btrfs_csum_final(crc
, calculated_csum
);
1783 if (memcmp(calculated_csum
, on_disk_csum
, sctx
->csum_size
))
1786 return fail
|| crc_fail
;
1789 static int scrub_checksum_super(struct scrub_block
*sblock
)
1791 struct btrfs_super_block
*s
;
1792 struct scrub_ctx
*sctx
= sblock
->sctx
;
1793 struct btrfs_root
*root
= sctx
->dev_root
;
1794 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1795 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1796 u8 on_disk_csum
[BTRFS_CSUM_SIZE
];
1798 void *mapped_buffer
;
1807 BUG_ON(sblock
->page_count
< 1);
1808 page
= sblock
->pagev
[0]->page
;
1809 mapped_buffer
= kmap_atomic(page
);
1810 s
= (struct btrfs_super_block
*)mapped_buffer
;
1811 memcpy(on_disk_csum
, s
->csum
, sctx
->csum_size
);
1813 if (sblock
->pagev
[0]->logical
!= btrfs_super_bytenr(s
))
1816 if (sblock
->pagev
[0]->generation
!= btrfs_super_generation(s
))
1819 if (memcmp(s
->fsid
, fs_info
->fsid
, BTRFS_UUID_SIZE
))
1822 len
= BTRFS_SUPER_INFO_SIZE
- BTRFS_CSUM_SIZE
;
1823 mapped_size
= PAGE_SIZE
- BTRFS_CSUM_SIZE
;
1824 p
= ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
;
1827 u64 l
= min_t(u64
, len
, mapped_size
);
1829 crc
= btrfs_csum_data(p
, crc
, l
);
1830 kunmap_atomic(mapped_buffer
);
1835 BUG_ON(index
>= sblock
->page_count
);
1836 BUG_ON(!sblock
->pagev
[index
]->page
);
1837 page
= sblock
->pagev
[index
]->page
;
1838 mapped_buffer
= kmap_atomic(page
);
1839 mapped_size
= PAGE_SIZE
;
1843 btrfs_csum_final(crc
, calculated_csum
);
1844 if (memcmp(calculated_csum
, on_disk_csum
, sctx
->csum_size
))
1847 if (fail_cor
+ fail_gen
) {
1849 * if we find an error in a super block, we just report it.
1850 * They will get written with the next transaction commit
1853 spin_lock(&sctx
->stat_lock
);
1854 ++sctx
->stat
.super_errors
;
1855 spin_unlock(&sctx
->stat_lock
);
1857 btrfs_dev_stat_inc_and_print(sblock
->pagev
[0]->dev
,
1858 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
1860 btrfs_dev_stat_inc_and_print(sblock
->pagev
[0]->dev
,
1861 BTRFS_DEV_STAT_GENERATION_ERRS
);
1864 return fail_cor
+ fail_gen
;
1867 static void scrub_block_get(struct scrub_block
*sblock
)
1869 atomic_inc(&sblock
->ref_count
);
1872 static void scrub_block_put(struct scrub_block
*sblock
)
1874 if (atomic_dec_and_test(&sblock
->ref_count
)) {
1877 for (i
= 0; i
< sblock
->page_count
; i
++)
1878 scrub_page_put(sblock
->pagev
[i
]);
1883 static void scrub_page_get(struct scrub_page
*spage
)
1885 atomic_inc(&spage
->ref_count
);
1888 static void scrub_page_put(struct scrub_page
*spage
)
1890 if (atomic_dec_and_test(&spage
->ref_count
)) {
1892 __free_page(spage
->page
);
1897 static void scrub_submit(struct scrub_ctx
*sctx
)
1899 struct scrub_bio
*sbio
;
1901 if (sctx
->curr
== -1)
1904 sbio
= sctx
->bios
[sctx
->curr
];
1906 scrub_pending_bio_inc(sctx
);
1908 if (!sbio
->bio
->bi_bdev
) {
1910 * this case should not happen. If btrfs_map_block() is
1911 * wrong, it could happen for dev-replace operations on
1912 * missing devices when no mirrors are available, but in
1913 * this case it should already fail the mount.
1914 * This case is handled correctly (but _very_ slowly).
1916 printk_ratelimited(KERN_WARNING
1917 "BTRFS: scrub_submit(bio bdev == NULL) is unexpected!\n");
1918 bio_endio(sbio
->bio
, -EIO
);
1920 btrfsic_submit_bio(READ
, sbio
->bio
);
1924 static int scrub_add_page_to_rd_bio(struct scrub_ctx
*sctx
,
1925 struct scrub_page
*spage
)
1927 struct scrub_block
*sblock
= spage
->sblock
;
1928 struct scrub_bio
*sbio
;
1933 * grab a fresh bio or wait for one to become available
1935 while (sctx
->curr
== -1) {
1936 spin_lock(&sctx
->list_lock
);
1937 sctx
->curr
= sctx
->first_free
;
1938 if (sctx
->curr
!= -1) {
1939 sctx
->first_free
= sctx
->bios
[sctx
->curr
]->next_free
;
1940 sctx
->bios
[sctx
->curr
]->next_free
= -1;
1941 sctx
->bios
[sctx
->curr
]->page_count
= 0;
1942 spin_unlock(&sctx
->list_lock
);
1944 spin_unlock(&sctx
->list_lock
);
1945 wait_event(sctx
->list_wait
, sctx
->first_free
!= -1);
1948 sbio
= sctx
->bios
[sctx
->curr
];
1949 if (sbio
->page_count
== 0) {
1952 sbio
->physical
= spage
->physical
;
1953 sbio
->logical
= spage
->logical
;
1954 sbio
->dev
= spage
->dev
;
1957 bio
= btrfs_io_bio_alloc(GFP_NOFS
, sctx
->pages_per_rd_bio
);
1963 bio
->bi_private
= sbio
;
1964 bio
->bi_end_io
= scrub_bio_end_io
;
1965 bio
->bi_bdev
= sbio
->dev
->bdev
;
1966 bio
->bi_iter
.bi_sector
= sbio
->physical
>> 9;
1968 } else if (sbio
->physical
+ sbio
->page_count
* PAGE_SIZE
!=
1970 sbio
->logical
+ sbio
->page_count
* PAGE_SIZE
!=
1972 sbio
->dev
!= spage
->dev
) {
1977 sbio
->pagev
[sbio
->page_count
] = spage
;
1978 ret
= bio_add_page(sbio
->bio
, spage
->page
, PAGE_SIZE
, 0);
1979 if (ret
!= PAGE_SIZE
) {
1980 if (sbio
->page_count
< 1) {
1989 scrub_block_get(sblock
); /* one for the page added to the bio */
1990 atomic_inc(&sblock
->outstanding_pages
);
1992 if (sbio
->page_count
== sctx
->pages_per_rd_bio
)
1998 static int scrub_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
1999 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
2000 u64 gen
, int mirror_num
, u8
*csum
, int force
,
2001 u64 physical_for_dev_replace
)
2003 struct scrub_block
*sblock
;
2006 sblock
= kzalloc(sizeof(*sblock
), GFP_NOFS
);
2008 spin_lock(&sctx
->stat_lock
);
2009 sctx
->stat
.malloc_errors
++;
2010 spin_unlock(&sctx
->stat_lock
);
2014 /* one ref inside this function, plus one for each page added to
2016 atomic_set(&sblock
->ref_count
, 1);
2017 sblock
->sctx
= sctx
;
2018 sblock
->no_io_error_seen
= 1;
2020 for (index
= 0; len
> 0; index
++) {
2021 struct scrub_page
*spage
;
2022 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
2024 spage
= kzalloc(sizeof(*spage
), GFP_NOFS
);
2027 spin_lock(&sctx
->stat_lock
);
2028 sctx
->stat
.malloc_errors
++;
2029 spin_unlock(&sctx
->stat_lock
);
2030 scrub_block_put(sblock
);
2033 BUG_ON(index
>= SCRUB_MAX_PAGES_PER_BLOCK
);
2034 scrub_page_get(spage
);
2035 sblock
->pagev
[index
] = spage
;
2036 spage
->sblock
= sblock
;
2038 spage
->flags
= flags
;
2039 spage
->generation
= gen
;
2040 spage
->logical
= logical
;
2041 spage
->physical
= physical
;
2042 spage
->physical_for_dev_replace
= physical_for_dev_replace
;
2043 spage
->mirror_num
= mirror_num
;
2045 spage
->have_csum
= 1;
2046 memcpy(spage
->csum
, csum
, sctx
->csum_size
);
2048 spage
->have_csum
= 0;
2050 sblock
->page_count
++;
2051 spage
->page
= alloc_page(GFP_NOFS
);
2057 physical_for_dev_replace
+= l
;
2060 WARN_ON(sblock
->page_count
== 0);
2061 for (index
= 0; index
< sblock
->page_count
; index
++) {
2062 struct scrub_page
*spage
= sblock
->pagev
[index
];
2065 ret
= scrub_add_page_to_rd_bio(sctx
, spage
);
2067 scrub_block_put(sblock
);
2075 /* last one frees, either here or in bio completion for last page */
2076 scrub_block_put(sblock
);
2080 static void scrub_bio_end_io(struct bio
*bio
, int err
)
2082 struct scrub_bio
*sbio
= bio
->bi_private
;
2083 struct btrfs_fs_info
*fs_info
= sbio
->dev
->dev_root
->fs_info
;
2088 btrfs_queue_work(fs_info
->scrub_workers
, &sbio
->work
);
2091 static void scrub_bio_end_io_worker(struct btrfs_work
*work
)
2093 struct scrub_bio
*sbio
= container_of(work
, struct scrub_bio
, work
);
2094 struct scrub_ctx
*sctx
= sbio
->sctx
;
2097 BUG_ON(sbio
->page_count
> SCRUB_PAGES_PER_RD_BIO
);
2099 for (i
= 0; i
< sbio
->page_count
; i
++) {
2100 struct scrub_page
*spage
= sbio
->pagev
[i
];
2102 spage
->io_error
= 1;
2103 spage
->sblock
->no_io_error_seen
= 0;
2107 /* now complete the scrub_block items that have all pages completed */
2108 for (i
= 0; i
< sbio
->page_count
; i
++) {
2109 struct scrub_page
*spage
= sbio
->pagev
[i
];
2110 struct scrub_block
*sblock
= spage
->sblock
;
2112 if (atomic_dec_and_test(&sblock
->outstanding_pages
))
2113 scrub_block_complete(sblock
);
2114 scrub_block_put(sblock
);
2119 spin_lock(&sctx
->list_lock
);
2120 sbio
->next_free
= sctx
->first_free
;
2121 sctx
->first_free
= sbio
->index
;
2122 spin_unlock(&sctx
->list_lock
);
2124 if (sctx
->is_dev_replace
&&
2125 atomic_read(&sctx
->wr_ctx
.flush_all_writes
)) {
2126 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
2127 scrub_wr_submit(sctx
);
2128 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
2131 scrub_pending_bio_dec(sctx
);
2134 static void scrub_block_complete(struct scrub_block
*sblock
)
2136 if (!sblock
->no_io_error_seen
) {
2137 scrub_handle_errored_block(sblock
);
2140 * if has checksum error, write via repair mechanism in
2141 * dev replace case, otherwise write here in dev replace
2144 if (!scrub_checksum(sblock
) && sblock
->sctx
->is_dev_replace
)
2145 scrub_write_block_to_dev_replace(sblock
);
2149 static int scrub_find_csum(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
2152 struct btrfs_ordered_sum
*sum
= NULL
;
2153 unsigned long index
;
2154 unsigned long num_sectors
;
2156 while (!list_empty(&sctx
->csum_list
)) {
2157 sum
= list_first_entry(&sctx
->csum_list
,
2158 struct btrfs_ordered_sum
, list
);
2159 if (sum
->bytenr
> logical
)
2161 if (sum
->bytenr
+ sum
->len
> logical
)
2164 ++sctx
->stat
.csum_discards
;
2165 list_del(&sum
->list
);
2172 index
= ((u32
)(logical
- sum
->bytenr
)) / sctx
->sectorsize
;
2173 num_sectors
= sum
->len
/ sctx
->sectorsize
;
2174 memcpy(csum
, sum
->sums
+ index
, sctx
->csum_size
);
2175 if (index
== num_sectors
- 1) {
2176 list_del(&sum
->list
);
2182 /* scrub extent tries to collect up to 64 kB for each bio */
2183 static int scrub_extent(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
2184 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
2185 u64 gen
, int mirror_num
, u64 physical_for_dev_replace
)
2188 u8 csum
[BTRFS_CSUM_SIZE
];
2191 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2192 blocksize
= sctx
->sectorsize
;
2193 spin_lock(&sctx
->stat_lock
);
2194 sctx
->stat
.data_extents_scrubbed
++;
2195 sctx
->stat
.data_bytes_scrubbed
+= len
;
2196 spin_unlock(&sctx
->stat_lock
);
2197 } else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
2198 WARN_ON(sctx
->nodesize
!= sctx
->leafsize
);
2199 blocksize
= sctx
->nodesize
;
2200 spin_lock(&sctx
->stat_lock
);
2201 sctx
->stat
.tree_extents_scrubbed
++;
2202 sctx
->stat
.tree_bytes_scrubbed
+= len
;
2203 spin_unlock(&sctx
->stat_lock
);
2205 blocksize
= sctx
->sectorsize
;
2210 u64 l
= min_t(u64
, len
, blocksize
);
2213 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2214 /* push csums to sbio */
2215 have_csum
= scrub_find_csum(sctx
, logical
, l
, csum
);
2217 ++sctx
->stat
.no_csum
;
2218 if (sctx
->is_dev_replace
&& !have_csum
) {
2219 ret
= copy_nocow_pages(sctx
, logical
, l
,
2221 physical_for_dev_replace
);
2222 goto behind_scrub_pages
;
2225 ret
= scrub_pages(sctx
, logical
, l
, physical
, dev
, flags
, gen
,
2226 mirror_num
, have_csum
? csum
: NULL
, 0,
2227 physical_for_dev_replace
);
2234 physical_for_dev_replace
+= l
;
2240 * Given a physical address, this will calculate it's
2241 * logical offset. if this is a parity stripe, it will return
2242 * the most left data stripe's logical offset.
2244 * return 0 if it is a data stripe, 1 means parity stripe.
2246 static int get_raid56_logic_offset(u64 physical
, int num
,
2247 struct map_lookup
*map
, u64
*offset
)
2256 last_offset
= (physical
- map
->stripes
[num
].physical
) *
2257 nr_data_stripes(map
);
2258 *offset
= last_offset
;
2259 for (i
= 0; i
< nr_data_stripes(map
); i
++) {
2260 *offset
= last_offset
+ i
* map
->stripe_len
;
2262 stripe_nr
= *offset
;
2263 do_div(stripe_nr
, map
->stripe_len
);
2264 do_div(stripe_nr
, nr_data_stripes(map
));
2266 /* Work out the disk rotation on this stripe-set */
2267 rot
= do_div(stripe_nr
, map
->num_stripes
);
2268 /* calculate which stripe this data locates */
2270 stripe_index
= rot
% map
->num_stripes
;
2271 if (stripe_index
== num
)
2273 if (stripe_index
< num
)
2276 *offset
= last_offset
+ j
* map
->stripe_len
;
2280 static noinline_for_stack
int scrub_stripe(struct scrub_ctx
*sctx
,
2281 struct map_lookup
*map
,
2282 struct btrfs_device
*scrub_dev
,
2283 int num
, u64 base
, u64 length
,
2286 struct btrfs_path
*path
;
2287 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
2288 struct btrfs_root
*root
= fs_info
->extent_root
;
2289 struct btrfs_root
*csum_root
= fs_info
->csum_root
;
2290 struct btrfs_extent_item
*extent
;
2291 struct blk_plug plug
;
2296 struct extent_buffer
*l
;
2297 struct btrfs_key key
;
2304 struct reada_control
*reada1
;
2305 struct reada_control
*reada2
;
2306 struct btrfs_key key_start
;
2307 struct btrfs_key key_end
;
2308 u64 increment
= map
->stripe_len
;
2311 u64 extent_physical
;
2313 struct btrfs_device
*extent_dev
;
2314 int extent_mirror_num
;
2318 physical
= map
->stripes
[num
].physical
;
2320 do_div(nstripes
, map
->stripe_len
);
2321 if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
2322 offset
= map
->stripe_len
* num
;
2323 increment
= map
->stripe_len
* map
->num_stripes
;
2325 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
2326 int factor
= map
->num_stripes
/ map
->sub_stripes
;
2327 offset
= map
->stripe_len
* (num
/ map
->sub_stripes
);
2328 increment
= map
->stripe_len
* factor
;
2329 mirror_num
= num
% map
->sub_stripes
+ 1;
2330 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
2331 increment
= map
->stripe_len
;
2332 mirror_num
= num
% map
->num_stripes
+ 1;
2333 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
2334 increment
= map
->stripe_len
;
2335 mirror_num
= num
% map
->num_stripes
+ 1;
2336 } else if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
|
2337 BTRFS_BLOCK_GROUP_RAID6
)) {
2338 get_raid56_logic_offset(physical
, num
, map
, &offset
);
2339 increment
= map
->stripe_len
* nr_data_stripes(map
);
2342 increment
= map
->stripe_len
;
2346 path
= btrfs_alloc_path();
2351 * work on commit root. The related disk blocks are static as
2352 * long as COW is applied. This means, it is save to rewrite
2353 * them to repair disk errors without any race conditions
2355 path
->search_commit_root
= 1;
2356 path
->skip_locking
= 1;
2359 * trigger the readahead for extent tree csum tree and wait for
2360 * completion. During readahead, the scrub is officially paused
2361 * to not hold off transaction commits
2363 logical
= base
+ offset
;
2364 physical_end
= physical
+ nstripes
* map
->stripe_len
;
2365 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
|
2366 BTRFS_BLOCK_GROUP_RAID6
)) {
2367 get_raid56_logic_offset(physical_end
, num
,
2371 logic_end
= logical
+ increment
* nstripes
;
2373 wait_event(sctx
->list_wait
,
2374 atomic_read(&sctx
->bios_in_flight
) == 0);
2375 scrub_blocked_if_needed(fs_info
);
2377 /* FIXME it might be better to start readahead at commit root */
2378 key_start
.objectid
= logical
;
2379 key_start
.type
= BTRFS_EXTENT_ITEM_KEY
;
2380 key_start
.offset
= (u64
)0;
2381 key_end
.objectid
= logic_end
;
2382 key_end
.type
= BTRFS_METADATA_ITEM_KEY
;
2383 key_end
.offset
= (u64
)-1;
2384 reada1
= btrfs_reada_add(root
, &key_start
, &key_end
);
2386 key_start
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
2387 key_start
.type
= BTRFS_EXTENT_CSUM_KEY
;
2388 key_start
.offset
= logical
;
2389 key_end
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
2390 key_end
.type
= BTRFS_EXTENT_CSUM_KEY
;
2391 key_end
.offset
= logic_end
;
2392 reada2
= btrfs_reada_add(csum_root
, &key_start
, &key_end
);
2394 if (!IS_ERR(reada1
))
2395 btrfs_reada_wait(reada1
);
2396 if (!IS_ERR(reada2
))
2397 btrfs_reada_wait(reada2
);
2401 * collect all data csums for the stripe to avoid seeking during
2402 * the scrub. This might currently (crc32) end up to be about 1MB
2404 blk_start_plug(&plug
);
2407 * now find all extents for each stripe and scrub them
2410 while (physical
< physical_end
) {
2411 /* for raid56, we skip parity stripe */
2412 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
|
2413 BTRFS_BLOCK_GROUP_RAID6
)) {
2414 ret
= get_raid56_logic_offset(physical
, num
,
2423 if (atomic_read(&fs_info
->scrub_cancel_req
) ||
2424 atomic_read(&sctx
->cancel_req
)) {
2429 * check to see if we have to pause
2431 if (atomic_read(&fs_info
->scrub_pause_req
)) {
2432 /* push queued extents */
2433 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 1);
2435 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
2436 scrub_wr_submit(sctx
);
2437 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
2438 wait_event(sctx
->list_wait
,
2439 atomic_read(&sctx
->bios_in_flight
) == 0);
2440 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 0);
2441 scrub_blocked_if_needed(fs_info
);
2444 if (btrfs_fs_incompat(fs_info
, SKINNY_METADATA
))
2445 key
.type
= BTRFS_METADATA_ITEM_KEY
;
2447 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
2448 key
.objectid
= logical
;
2449 key
.offset
= (u64
)-1;
2451 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2456 ret
= btrfs_previous_extent_item(root
, path
, 0);
2460 /* there's no smaller item, so stick with the
2462 btrfs_release_path(path
);
2463 ret
= btrfs_search_slot(NULL
, root
, &key
,
2475 slot
= path
->slots
[0];
2476 if (slot
>= btrfs_header_nritems(l
)) {
2477 ret
= btrfs_next_leaf(root
, path
);
2486 btrfs_item_key_to_cpu(l
, &key
, slot
);
2488 if (key
.type
== BTRFS_METADATA_ITEM_KEY
)
2489 bytes
= root
->leafsize
;
2493 if (key
.objectid
+ bytes
<= logical
)
2496 if (key
.type
!= BTRFS_EXTENT_ITEM_KEY
&&
2497 key
.type
!= BTRFS_METADATA_ITEM_KEY
)
2500 if (key
.objectid
>= logical
+ map
->stripe_len
) {
2501 /* out of this device extent */
2502 if (key
.objectid
>= logic_end
)
2507 extent
= btrfs_item_ptr(l
, slot
,
2508 struct btrfs_extent_item
);
2509 flags
= btrfs_extent_flags(l
, extent
);
2510 generation
= btrfs_extent_generation(l
, extent
);
2512 if (key
.objectid
< logical
&&
2513 (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)) {
2515 "scrub: tree block %llu spanning "
2516 "stripes, ignored. logical=%llu",
2517 key
.objectid
, logical
);
2522 extent_logical
= key
.objectid
;
2526 * trim extent to this stripe
2528 if (extent_logical
< logical
) {
2529 extent_len
-= logical
- extent_logical
;
2530 extent_logical
= logical
;
2532 if (extent_logical
+ extent_len
>
2533 logical
+ map
->stripe_len
) {
2534 extent_len
= logical
+ map
->stripe_len
-
2538 extent_physical
= extent_logical
- logical
+ physical
;
2539 extent_dev
= scrub_dev
;
2540 extent_mirror_num
= mirror_num
;
2542 scrub_remap_extent(fs_info
, extent_logical
,
2543 extent_len
, &extent_physical
,
2545 &extent_mirror_num
);
2547 ret
= btrfs_lookup_csums_range(csum_root
, logical
,
2548 logical
+ map
->stripe_len
- 1,
2549 &sctx
->csum_list
, 1);
2553 ret
= scrub_extent(sctx
, extent_logical
, extent_len
,
2554 extent_physical
, extent_dev
, flags
,
2555 generation
, extent_mirror_num
,
2556 extent_logical
- logical
+ physical
);
2560 scrub_free_csums(sctx
);
2561 if (extent_logical
+ extent_len
<
2562 key
.objectid
+ bytes
) {
2563 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
|
2564 BTRFS_BLOCK_GROUP_RAID6
)) {
2566 * loop until we find next data stripe
2567 * or we have finished all stripes.
2570 physical
+= map
->stripe_len
;
2571 ret
= get_raid56_logic_offset(
2575 } while (physical
< physical_end
&& ret
);
2577 physical
+= map
->stripe_len
;
2578 logical
+= increment
;
2580 if (logical
< key
.objectid
+ bytes
) {
2585 if (physical
>= physical_end
) {
2593 btrfs_release_path(path
);
2595 logical
+= increment
;
2596 physical
+= map
->stripe_len
;
2597 spin_lock(&sctx
->stat_lock
);
2599 sctx
->stat
.last_physical
= map
->stripes
[num
].physical
+
2602 sctx
->stat
.last_physical
= physical
;
2603 spin_unlock(&sctx
->stat_lock
);
2608 /* push queued extents */
2610 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
2611 scrub_wr_submit(sctx
);
2612 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
2614 blk_finish_plug(&plug
);
2615 btrfs_free_path(path
);
2616 return ret
< 0 ? ret
: 0;
2619 static noinline_for_stack
int scrub_chunk(struct scrub_ctx
*sctx
,
2620 struct btrfs_device
*scrub_dev
,
2621 u64 chunk_tree
, u64 chunk_objectid
,
2622 u64 chunk_offset
, u64 length
,
2623 u64 dev_offset
, int is_dev_replace
)
2625 struct btrfs_mapping_tree
*map_tree
=
2626 &sctx
->dev_root
->fs_info
->mapping_tree
;
2627 struct map_lookup
*map
;
2628 struct extent_map
*em
;
2632 read_lock(&map_tree
->map_tree
.lock
);
2633 em
= lookup_extent_mapping(&map_tree
->map_tree
, chunk_offset
, 1);
2634 read_unlock(&map_tree
->map_tree
.lock
);
2639 map
= (struct map_lookup
*)em
->bdev
;
2640 if (em
->start
!= chunk_offset
)
2643 if (em
->len
< length
)
2646 for (i
= 0; i
< map
->num_stripes
; ++i
) {
2647 if (map
->stripes
[i
].dev
->bdev
== scrub_dev
->bdev
&&
2648 map
->stripes
[i
].physical
== dev_offset
) {
2649 ret
= scrub_stripe(sctx
, map
, scrub_dev
, i
,
2650 chunk_offset
, length
,
2657 free_extent_map(em
);
2662 static noinline_for_stack
2663 int scrub_enumerate_chunks(struct scrub_ctx
*sctx
,
2664 struct btrfs_device
*scrub_dev
, u64 start
, u64 end
,
2667 struct btrfs_dev_extent
*dev_extent
= NULL
;
2668 struct btrfs_path
*path
;
2669 struct btrfs_root
*root
= sctx
->dev_root
;
2670 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2677 struct extent_buffer
*l
;
2678 struct btrfs_key key
;
2679 struct btrfs_key found_key
;
2680 struct btrfs_block_group_cache
*cache
;
2681 struct btrfs_dev_replace
*dev_replace
= &fs_info
->dev_replace
;
2683 path
= btrfs_alloc_path();
2688 path
->search_commit_root
= 1;
2689 path
->skip_locking
= 1;
2691 key
.objectid
= scrub_dev
->devid
;
2693 key
.type
= BTRFS_DEV_EXTENT_KEY
;
2696 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2700 if (path
->slots
[0] >=
2701 btrfs_header_nritems(path
->nodes
[0])) {
2702 ret
= btrfs_next_leaf(root
, path
);
2709 slot
= path
->slots
[0];
2711 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
2713 if (found_key
.objectid
!= scrub_dev
->devid
)
2716 if (btrfs_key_type(&found_key
) != BTRFS_DEV_EXTENT_KEY
)
2719 if (found_key
.offset
>= end
)
2722 if (found_key
.offset
< key
.offset
)
2725 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
2726 length
= btrfs_dev_extent_length(l
, dev_extent
);
2728 if (found_key
.offset
+ length
<= start
) {
2729 key
.offset
= found_key
.offset
+ length
;
2730 btrfs_release_path(path
);
2734 chunk_tree
= btrfs_dev_extent_chunk_tree(l
, dev_extent
);
2735 chunk_objectid
= btrfs_dev_extent_chunk_objectid(l
, dev_extent
);
2736 chunk_offset
= btrfs_dev_extent_chunk_offset(l
, dev_extent
);
2739 * get a reference on the corresponding block group to prevent
2740 * the chunk from going away while we scrub it
2742 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
2747 dev_replace
->cursor_right
= found_key
.offset
+ length
;
2748 dev_replace
->cursor_left
= found_key
.offset
;
2749 dev_replace
->item_needs_writeback
= 1;
2750 ret
= scrub_chunk(sctx
, scrub_dev
, chunk_tree
, chunk_objectid
,
2751 chunk_offset
, length
, found_key
.offset
,
2755 * flush, submit all pending read and write bios, afterwards
2757 * Note that in the dev replace case, a read request causes
2758 * write requests that are submitted in the read completion
2759 * worker. Therefore in the current situation, it is required
2760 * that all write requests are flushed, so that all read and
2761 * write requests are really completed when bios_in_flight
2764 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 1);
2766 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
2767 scrub_wr_submit(sctx
);
2768 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
2770 wait_event(sctx
->list_wait
,
2771 atomic_read(&sctx
->bios_in_flight
) == 0);
2772 atomic_inc(&fs_info
->scrubs_paused
);
2773 wake_up(&fs_info
->scrub_pause_wait
);
2776 * must be called before we decrease @scrub_paused.
2777 * make sure we don't block transaction commit while
2778 * we are waiting pending workers finished.
2780 wait_event(sctx
->list_wait
,
2781 atomic_read(&sctx
->workers_pending
) == 0);
2782 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 0);
2784 mutex_lock(&fs_info
->scrub_lock
);
2785 __scrub_blocked_if_needed(fs_info
);
2786 atomic_dec(&fs_info
->scrubs_paused
);
2787 mutex_unlock(&fs_info
->scrub_lock
);
2788 wake_up(&fs_info
->scrub_pause_wait
);
2790 btrfs_put_block_group(cache
);
2793 if (is_dev_replace
&&
2794 atomic64_read(&dev_replace
->num_write_errors
) > 0) {
2798 if (sctx
->stat
.malloc_errors
> 0) {
2803 dev_replace
->cursor_left
= dev_replace
->cursor_right
;
2804 dev_replace
->item_needs_writeback
= 1;
2806 key
.offset
= found_key
.offset
+ length
;
2807 btrfs_release_path(path
);
2810 btrfs_free_path(path
);
2813 * ret can still be 1 from search_slot or next_leaf,
2814 * that's not an error
2816 return ret
< 0 ? ret
: 0;
2819 static noinline_for_stack
int scrub_supers(struct scrub_ctx
*sctx
,
2820 struct btrfs_device
*scrub_dev
)
2826 struct btrfs_root
*root
= sctx
->dev_root
;
2828 if (test_bit(BTRFS_FS_STATE_ERROR
, &root
->fs_info
->fs_state
))
2831 gen
= root
->fs_info
->last_trans_committed
;
2833 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
2834 bytenr
= btrfs_sb_offset(i
);
2835 if (bytenr
+ BTRFS_SUPER_INFO_SIZE
> scrub_dev
->total_bytes
)
2838 ret
= scrub_pages(sctx
, bytenr
, BTRFS_SUPER_INFO_SIZE
, bytenr
,
2839 scrub_dev
, BTRFS_EXTENT_FLAG_SUPER
, gen
, i
,
2844 wait_event(sctx
->list_wait
, atomic_read(&sctx
->bios_in_flight
) == 0);
2850 * get a reference count on fs_info->scrub_workers. start worker if necessary
2852 static noinline_for_stack
int scrub_workers_get(struct btrfs_fs_info
*fs_info
,
2856 int flags
= WQ_FREEZABLE
| WQ_UNBOUND
;
2857 int max_active
= fs_info
->thread_pool_size
;
2859 if (fs_info
->scrub_workers_refcnt
== 0) {
2861 fs_info
->scrub_workers
=
2862 btrfs_alloc_workqueue("btrfs-scrub", flags
,
2865 fs_info
->scrub_workers
=
2866 btrfs_alloc_workqueue("btrfs-scrub", flags
,
2868 if (!fs_info
->scrub_workers
) {
2872 fs_info
->scrub_wr_completion_workers
=
2873 btrfs_alloc_workqueue("btrfs-scrubwrc", flags
,
2875 if (!fs_info
->scrub_wr_completion_workers
) {
2879 fs_info
->scrub_nocow_workers
=
2880 btrfs_alloc_workqueue("btrfs-scrubnc", flags
, 1, 0);
2881 if (!fs_info
->scrub_nocow_workers
) {
2886 ++fs_info
->scrub_workers_refcnt
;
2891 static noinline_for_stack
void scrub_workers_put(struct btrfs_fs_info
*fs_info
)
2893 if (--fs_info
->scrub_workers_refcnt
== 0) {
2894 btrfs_destroy_workqueue(fs_info
->scrub_workers
);
2895 btrfs_destroy_workqueue(fs_info
->scrub_wr_completion_workers
);
2896 btrfs_destroy_workqueue(fs_info
->scrub_nocow_workers
);
2898 WARN_ON(fs_info
->scrub_workers_refcnt
< 0);
2901 int btrfs_scrub_dev(struct btrfs_fs_info
*fs_info
, u64 devid
, u64 start
,
2902 u64 end
, struct btrfs_scrub_progress
*progress
,
2903 int readonly
, int is_dev_replace
)
2905 struct scrub_ctx
*sctx
;
2907 struct btrfs_device
*dev
;
2909 if (btrfs_fs_closing(fs_info
))
2913 * check some assumptions
2915 if (fs_info
->chunk_root
->nodesize
!= fs_info
->chunk_root
->leafsize
) {
2917 "scrub: size assumption nodesize == leafsize (%d == %d) fails",
2918 fs_info
->chunk_root
->nodesize
,
2919 fs_info
->chunk_root
->leafsize
);
2923 if (fs_info
->chunk_root
->nodesize
> BTRFS_STRIPE_LEN
) {
2925 * in this case scrub is unable to calculate the checksum
2926 * the way scrub is implemented. Do not handle this
2927 * situation at all because it won't ever happen.
2930 "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
2931 fs_info
->chunk_root
->nodesize
, BTRFS_STRIPE_LEN
);
2935 if (fs_info
->chunk_root
->sectorsize
!= PAGE_SIZE
) {
2936 /* not supported for data w/o checksums */
2938 "scrub: size assumption sectorsize != PAGE_SIZE "
2939 "(%d != %lu) fails",
2940 fs_info
->chunk_root
->sectorsize
, PAGE_SIZE
);
2944 if (fs_info
->chunk_root
->nodesize
>
2945 PAGE_SIZE
* SCRUB_MAX_PAGES_PER_BLOCK
||
2946 fs_info
->chunk_root
->sectorsize
>
2947 PAGE_SIZE
* SCRUB_MAX_PAGES_PER_BLOCK
) {
2949 * would exhaust the array bounds of pagev member in
2950 * struct scrub_block
2952 btrfs_err(fs_info
, "scrub: size assumption nodesize and sectorsize "
2953 "<= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
2954 fs_info
->chunk_root
->nodesize
,
2955 SCRUB_MAX_PAGES_PER_BLOCK
,
2956 fs_info
->chunk_root
->sectorsize
,
2957 SCRUB_MAX_PAGES_PER_BLOCK
);
2962 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
2963 dev
= btrfs_find_device(fs_info
, devid
, NULL
, NULL
);
2964 if (!dev
|| (dev
->missing
&& !is_dev_replace
)) {
2965 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2969 mutex_lock(&fs_info
->scrub_lock
);
2970 if (!dev
->in_fs_metadata
|| dev
->is_tgtdev_for_dev_replace
) {
2971 mutex_unlock(&fs_info
->scrub_lock
);
2972 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2976 btrfs_dev_replace_lock(&fs_info
->dev_replace
);
2977 if (dev
->scrub_device
||
2979 btrfs_dev_replace_is_ongoing(&fs_info
->dev_replace
))) {
2980 btrfs_dev_replace_unlock(&fs_info
->dev_replace
);
2981 mutex_unlock(&fs_info
->scrub_lock
);
2982 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2983 return -EINPROGRESS
;
2985 btrfs_dev_replace_unlock(&fs_info
->dev_replace
);
2987 ret
= scrub_workers_get(fs_info
, is_dev_replace
);
2989 mutex_unlock(&fs_info
->scrub_lock
);
2990 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2994 sctx
= scrub_setup_ctx(dev
, is_dev_replace
);
2996 mutex_unlock(&fs_info
->scrub_lock
);
2997 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2998 scrub_workers_put(fs_info
);
2999 return PTR_ERR(sctx
);
3001 sctx
->readonly
= readonly
;
3002 dev
->scrub_device
= sctx
;
3003 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3006 * checking @scrub_pause_req here, we can avoid
3007 * race between committing transaction and scrubbing.
3009 __scrub_blocked_if_needed(fs_info
);
3010 atomic_inc(&fs_info
->scrubs_running
);
3011 mutex_unlock(&fs_info
->scrub_lock
);
3013 if (!is_dev_replace
) {
3015 * by holding device list mutex, we can
3016 * kick off writing super in log tree sync.
3018 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
3019 ret
= scrub_supers(sctx
, dev
);
3020 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3024 ret
= scrub_enumerate_chunks(sctx
, dev
, start
, end
,
3027 wait_event(sctx
->list_wait
, atomic_read(&sctx
->bios_in_flight
) == 0);
3028 atomic_dec(&fs_info
->scrubs_running
);
3029 wake_up(&fs_info
->scrub_pause_wait
);
3031 wait_event(sctx
->list_wait
, atomic_read(&sctx
->workers_pending
) == 0);
3034 memcpy(progress
, &sctx
->stat
, sizeof(*progress
));
3036 mutex_lock(&fs_info
->scrub_lock
);
3037 dev
->scrub_device
= NULL
;
3038 scrub_workers_put(fs_info
);
3039 mutex_unlock(&fs_info
->scrub_lock
);
3041 scrub_free_ctx(sctx
);
3046 void btrfs_scrub_pause(struct btrfs_root
*root
)
3048 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3050 mutex_lock(&fs_info
->scrub_lock
);
3051 atomic_inc(&fs_info
->scrub_pause_req
);
3052 while (atomic_read(&fs_info
->scrubs_paused
) !=
3053 atomic_read(&fs_info
->scrubs_running
)) {
3054 mutex_unlock(&fs_info
->scrub_lock
);
3055 wait_event(fs_info
->scrub_pause_wait
,
3056 atomic_read(&fs_info
->scrubs_paused
) ==
3057 atomic_read(&fs_info
->scrubs_running
));
3058 mutex_lock(&fs_info
->scrub_lock
);
3060 mutex_unlock(&fs_info
->scrub_lock
);
3063 void btrfs_scrub_continue(struct btrfs_root
*root
)
3065 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3067 atomic_dec(&fs_info
->scrub_pause_req
);
3068 wake_up(&fs_info
->scrub_pause_wait
);
3071 int btrfs_scrub_cancel(struct btrfs_fs_info
*fs_info
)
3073 mutex_lock(&fs_info
->scrub_lock
);
3074 if (!atomic_read(&fs_info
->scrubs_running
)) {
3075 mutex_unlock(&fs_info
->scrub_lock
);
3079 atomic_inc(&fs_info
->scrub_cancel_req
);
3080 while (atomic_read(&fs_info
->scrubs_running
)) {
3081 mutex_unlock(&fs_info
->scrub_lock
);
3082 wait_event(fs_info
->scrub_pause_wait
,
3083 atomic_read(&fs_info
->scrubs_running
) == 0);
3084 mutex_lock(&fs_info
->scrub_lock
);
3086 atomic_dec(&fs_info
->scrub_cancel_req
);
3087 mutex_unlock(&fs_info
->scrub_lock
);
3092 int btrfs_scrub_cancel_dev(struct btrfs_fs_info
*fs_info
,
3093 struct btrfs_device
*dev
)
3095 struct scrub_ctx
*sctx
;
3097 mutex_lock(&fs_info
->scrub_lock
);
3098 sctx
= dev
->scrub_device
;
3100 mutex_unlock(&fs_info
->scrub_lock
);
3103 atomic_inc(&sctx
->cancel_req
);
3104 while (dev
->scrub_device
) {
3105 mutex_unlock(&fs_info
->scrub_lock
);
3106 wait_event(fs_info
->scrub_pause_wait
,
3107 dev
->scrub_device
== NULL
);
3108 mutex_lock(&fs_info
->scrub_lock
);
3110 mutex_unlock(&fs_info
->scrub_lock
);
3115 int btrfs_scrub_progress(struct btrfs_root
*root
, u64 devid
,
3116 struct btrfs_scrub_progress
*progress
)
3118 struct btrfs_device
*dev
;
3119 struct scrub_ctx
*sctx
= NULL
;
3121 mutex_lock(&root
->fs_info
->fs_devices
->device_list_mutex
);
3122 dev
= btrfs_find_device(root
->fs_info
, devid
, NULL
, NULL
);
3124 sctx
= dev
->scrub_device
;
3126 memcpy(progress
, &sctx
->stat
, sizeof(*progress
));
3127 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
3129 return dev
? (sctx
? 0 : -ENOTCONN
) : -ENODEV
;
3132 static void scrub_remap_extent(struct btrfs_fs_info
*fs_info
,
3133 u64 extent_logical
, u64 extent_len
,
3134 u64
*extent_physical
,
3135 struct btrfs_device
**extent_dev
,
3136 int *extent_mirror_num
)
3139 struct btrfs_bio
*bbio
= NULL
;
3142 mapped_length
= extent_len
;
3143 ret
= btrfs_map_block(fs_info
, READ
, extent_logical
,
3144 &mapped_length
, &bbio
, 0);
3145 if (ret
|| !bbio
|| mapped_length
< extent_len
||
3146 !bbio
->stripes
[0].dev
->bdev
) {
3151 *extent_physical
= bbio
->stripes
[0].physical
;
3152 *extent_mirror_num
= bbio
->mirror_num
;
3153 *extent_dev
= bbio
->stripes
[0].dev
;
3157 static int scrub_setup_wr_ctx(struct scrub_ctx
*sctx
,
3158 struct scrub_wr_ctx
*wr_ctx
,
3159 struct btrfs_fs_info
*fs_info
,
3160 struct btrfs_device
*dev
,
3163 WARN_ON(wr_ctx
->wr_curr_bio
!= NULL
);
3165 mutex_init(&wr_ctx
->wr_lock
);
3166 wr_ctx
->wr_curr_bio
= NULL
;
3167 if (!is_dev_replace
)
3170 WARN_ON(!dev
->bdev
);
3171 wr_ctx
->pages_per_wr_bio
= min_t(int, SCRUB_PAGES_PER_WR_BIO
,
3172 bio_get_nr_vecs(dev
->bdev
));
3173 wr_ctx
->tgtdev
= dev
;
3174 atomic_set(&wr_ctx
->flush_all_writes
, 0);
3178 static void scrub_free_wr_ctx(struct scrub_wr_ctx
*wr_ctx
)
3180 mutex_lock(&wr_ctx
->wr_lock
);
3181 kfree(wr_ctx
->wr_curr_bio
);
3182 wr_ctx
->wr_curr_bio
= NULL
;
3183 mutex_unlock(&wr_ctx
->wr_lock
);
3186 static int copy_nocow_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
3187 int mirror_num
, u64 physical_for_dev_replace
)
3189 struct scrub_copy_nocow_ctx
*nocow_ctx
;
3190 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
3192 nocow_ctx
= kzalloc(sizeof(*nocow_ctx
), GFP_NOFS
);
3194 spin_lock(&sctx
->stat_lock
);
3195 sctx
->stat
.malloc_errors
++;
3196 spin_unlock(&sctx
->stat_lock
);
3200 scrub_pending_trans_workers_inc(sctx
);
3202 nocow_ctx
->sctx
= sctx
;
3203 nocow_ctx
->logical
= logical
;
3204 nocow_ctx
->len
= len
;
3205 nocow_ctx
->mirror_num
= mirror_num
;
3206 nocow_ctx
->physical_for_dev_replace
= physical_for_dev_replace
;
3207 btrfs_init_work(&nocow_ctx
->work
, copy_nocow_pages_worker
, NULL
, NULL
);
3208 INIT_LIST_HEAD(&nocow_ctx
->inodes
);
3209 btrfs_queue_work(fs_info
->scrub_nocow_workers
,
3215 static int record_inode_for_nocow(u64 inum
, u64 offset
, u64 root
, void *ctx
)
3217 struct scrub_copy_nocow_ctx
*nocow_ctx
= ctx
;
3218 struct scrub_nocow_inode
*nocow_inode
;
3220 nocow_inode
= kzalloc(sizeof(*nocow_inode
), GFP_NOFS
);
3223 nocow_inode
->inum
= inum
;
3224 nocow_inode
->offset
= offset
;
3225 nocow_inode
->root
= root
;
3226 list_add_tail(&nocow_inode
->list
, &nocow_ctx
->inodes
);
3230 #define COPY_COMPLETE 1
3232 static void copy_nocow_pages_worker(struct btrfs_work
*work
)
3234 struct scrub_copy_nocow_ctx
*nocow_ctx
=
3235 container_of(work
, struct scrub_copy_nocow_ctx
, work
);
3236 struct scrub_ctx
*sctx
= nocow_ctx
->sctx
;
3237 u64 logical
= nocow_ctx
->logical
;
3238 u64 len
= nocow_ctx
->len
;
3239 int mirror_num
= nocow_ctx
->mirror_num
;
3240 u64 physical_for_dev_replace
= nocow_ctx
->physical_for_dev_replace
;
3242 struct btrfs_trans_handle
*trans
= NULL
;
3243 struct btrfs_fs_info
*fs_info
;
3244 struct btrfs_path
*path
;
3245 struct btrfs_root
*root
;
3246 int not_written
= 0;
3248 fs_info
= sctx
->dev_root
->fs_info
;
3249 root
= fs_info
->extent_root
;
3251 path
= btrfs_alloc_path();
3253 spin_lock(&sctx
->stat_lock
);
3254 sctx
->stat
.malloc_errors
++;
3255 spin_unlock(&sctx
->stat_lock
);
3260 trans
= btrfs_join_transaction(root
);
3261 if (IS_ERR(trans
)) {
3266 ret
= iterate_inodes_from_logical(logical
, fs_info
, path
,
3267 record_inode_for_nocow
, nocow_ctx
);
3268 if (ret
!= 0 && ret
!= -ENOENT
) {
3269 btrfs_warn(fs_info
, "iterate_inodes_from_logical() failed: log %llu, "
3270 "phys %llu, len %llu, mir %u, ret %d",
3271 logical
, physical_for_dev_replace
, len
, mirror_num
,
3277 btrfs_end_transaction(trans
, root
);
3279 while (!list_empty(&nocow_ctx
->inodes
)) {
3280 struct scrub_nocow_inode
*entry
;
3281 entry
= list_first_entry(&nocow_ctx
->inodes
,
3282 struct scrub_nocow_inode
,
3284 list_del_init(&entry
->list
);
3285 ret
= copy_nocow_pages_for_inode(entry
->inum
, entry
->offset
,
3286 entry
->root
, nocow_ctx
);
3288 if (ret
== COPY_COMPLETE
) {
3296 while (!list_empty(&nocow_ctx
->inodes
)) {
3297 struct scrub_nocow_inode
*entry
;
3298 entry
= list_first_entry(&nocow_ctx
->inodes
,
3299 struct scrub_nocow_inode
,
3301 list_del_init(&entry
->list
);
3304 if (trans
&& !IS_ERR(trans
))
3305 btrfs_end_transaction(trans
, root
);
3307 btrfs_dev_replace_stats_inc(&fs_info
->dev_replace
.
3308 num_uncorrectable_read_errors
);
3310 btrfs_free_path(path
);
3313 scrub_pending_trans_workers_dec(sctx
);
3316 static int copy_nocow_pages_for_inode(u64 inum
, u64 offset
, u64 root
,
3317 struct scrub_copy_nocow_ctx
*nocow_ctx
)
3319 struct btrfs_fs_info
*fs_info
= nocow_ctx
->sctx
->dev_root
->fs_info
;
3320 struct btrfs_key key
;
3321 struct inode
*inode
;
3323 struct btrfs_root
*local_root
;
3324 struct btrfs_ordered_extent
*ordered
;
3325 struct extent_map
*em
;
3326 struct extent_state
*cached_state
= NULL
;
3327 struct extent_io_tree
*io_tree
;
3328 u64 physical_for_dev_replace
;
3329 u64 len
= nocow_ctx
->len
;
3330 u64 lockstart
= offset
, lockend
= offset
+ len
- 1;
3331 unsigned long index
;
3336 key
.objectid
= root
;
3337 key
.type
= BTRFS_ROOT_ITEM_KEY
;
3338 key
.offset
= (u64
)-1;
3340 srcu_index
= srcu_read_lock(&fs_info
->subvol_srcu
);
3342 local_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
3343 if (IS_ERR(local_root
)) {
3344 srcu_read_unlock(&fs_info
->subvol_srcu
, srcu_index
);
3345 return PTR_ERR(local_root
);
3348 key
.type
= BTRFS_INODE_ITEM_KEY
;
3349 key
.objectid
= inum
;
3351 inode
= btrfs_iget(fs_info
->sb
, &key
, local_root
, NULL
);
3352 srcu_read_unlock(&fs_info
->subvol_srcu
, srcu_index
);
3354 return PTR_ERR(inode
);
3356 /* Avoid truncate/dio/punch hole.. */
3357 mutex_lock(&inode
->i_mutex
);
3358 inode_dio_wait(inode
);
3360 physical_for_dev_replace
= nocow_ctx
->physical_for_dev_replace
;
3361 io_tree
= &BTRFS_I(inode
)->io_tree
;
3363 lock_extent_bits(io_tree
, lockstart
, lockend
, 0, &cached_state
);
3364 ordered
= btrfs_lookup_ordered_range(inode
, lockstart
, len
);
3366 btrfs_put_ordered_extent(ordered
);
3370 em
= btrfs_get_extent(inode
, NULL
, 0, lockstart
, len
, 0);
3377 * This extent does not actually cover the logical extent anymore,
3378 * move on to the next inode.
3380 if (em
->block_start
> nocow_ctx
->logical
||
3381 em
->block_start
+ em
->block_len
< nocow_ctx
->logical
+ len
) {
3382 free_extent_map(em
);
3385 free_extent_map(em
);
3387 while (len
>= PAGE_CACHE_SIZE
) {
3388 index
= offset
>> PAGE_CACHE_SHIFT
;
3390 page
= find_or_create_page(inode
->i_mapping
, index
, GFP_NOFS
);
3392 btrfs_err(fs_info
, "find_or_create_page() failed");
3397 if (PageUptodate(page
)) {
3398 if (PageDirty(page
))
3401 ClearPageError(page
);
3402 err
= extent_read_full_page_nolock(io_tree
, page
,
3404 nocow_ctx
->mirror_num
);
3412 * If the page has been remove from the page cache,
3413 * the data on it is meaningless, because it may be
3414 * old one, the new data may be written into the new
3415 * page in the page cache.
3417 if (page
->mapping
!= inode
->i_mapping
) {
3419 page_cache_release(page
);
3422 if (!PageUptodate(page
)) {
3427 err
= write_page_nocow(nocow_ctx
->sctx
,
3428 physical_for_dev_replace
, page
);
3433 page_cache_release(page
);
3438 offset
+= PAGE_CACHE_SIZE
;
3439 physical_for_dev_replace
+= PAGE_CACHE_SIZE
;
3440 len
-= PAGE_CACHE_SIZE
;
3442 ret
= COPY_COMPLETE
;
3444 unlock_extent_cached(io_tree
, lockstart
, lockend
, &cached_state
,
3447 mutex_unlock(&inode
->i_mutex
);
3452 static int write_page_nocow(struct scrub_ctx
*sctx
,
3453 u64 physical_for_dev_replace
, struct page
*page
)
3456 struct btrfs_device
*dev
;
3459 dev
= sctx
->wr_ctx
.tgtdev
;
3463 printk_ratelimited(KERN_WARNING
3464 "BTRFS: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
3467 bio
= btrfs_io_bio_alloc(GFP_NOFS
, 1);
3469 spin_lock(&sctx
->stat_lock
);
3470 sctx
->stat
.malloc_errors
++;
3471 spin_unlock(&sctx
->stat_lock
);
3474 bio
->bi_iter
.bi_size
= 0;
3475 bio
->bi_iter
.bi_sector
= physical_for_dev_replace
>> 9;
3476 bio
->bi_bdev
= dev
->bdev
;
3477 ret
= bio_add_page(bio
, page
, PAGE_CACHE_SIZE
, 0);
3478 if (ret
!= PAGE_CACHE_SIZE
) {
3481 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_WRITE_ERRS
);
3485 if (btrfsic_submit_bio_wait(WRITE_SYNC
, bio
))
3486 goto leave_with_eio
;