2 * raid10.c : Multiple Devices driver for Linux
4 * Copyright (C) 2000-2004 Neil Brown
6 * RAID-10 support for md.
8 * Base on code in raid1.c. See raid1.c for further copyright information.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 #include <linux/slab.h>
22 #include <linux/delay.h>
23 #include <linux/blkdev.h>
24 #include <linux/seq_file.h>
25 #include <linux/ratelimit.h>
32 * RAID10 provides a combination of RAID0 and RAID1 functionality.
33 * The layout of data is defined by
36 * near_copies (stored in low byte of layout)
37 * far_copies (stored in second byte of layout)
38 * far_offset (stored in bit 16 of layout )
40 * The data to be stored is divided into chunks using chunksize.
41 * Each device is divided into far_copies sections.
42 * In each section, chunks are laid out in a style similar to raid0, but
43 * near_copies copies of each chunk is stored (each on a different drive).
44 * The starting device for each section is offset near_copies from the starting
45 * device of the previous section.
46 * Thus they are (near_copies*far_copies) of each chunk, and each is on a different
48 * near_copies and far_copies must be at least one, and their product is at most
51 * If far_offset is true, then the far_copies are handled a bit differently.
52 * The copies are still in different stripes, but instead of be very far apart
53 * on disk, there are adjacent stripes.
57 * Number of guaranteed r10bios in case of extreme VM load:
59 #define NR_RAID10_BIOS 256
61 static void allow_barrier(conf_t
*conf
);
62 static void lower_barrier(conf_t
*conf
);
64 static void * r10bio_pool_alloc(gfp_t gfp_flags
, void *data
)
67 int size
= offsetof(struct r10bio_s
, devs
[conf
->copies
]);
69 /* allocate a r10bio with room for raid_disks entries in the bios array */
70 return kzalloc(size
, gfp_flags
);
73 static void r10bio_pool_free(void *r10_bio
, void *data
)
78 /* Maximum size of each resync request */
79 #define RESYNC_BLOCK_SIZE (64*1024)
80 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
81 /* amount of memory to reserve for resync requests */
82 #define RESYNC_WINDOW (1024*1024)
83 /* maximum number of concurrent requests, memory permitting */
84 #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
87 * When performing a resync, we need to read and compare, so
88 * we need as many pages are there are copies.
89 * When performing a recovery, we need 2 bios, one for read,
90 * one for write (we recover only one drive per r10buf)
93 static void * r10buf_pool_alloc(gfp_t gfp_flags
, void *data
)
102 r10_bio
= r10bio_pool_alloc(gfp_flags
, conf
);
106 if (test_bit(MD_RECOVERY_SYNC
, &conf
->mddev
->recovery
))
107 nalloc
= conf
->copies
; /* resync */
109 nalloc
= 2; /* recovery */
114 for (j
= nalloc
; j
-- ; ) {
115 bio
= bio_kmalloc(gfp_flags
, RESYNC_PAGES
);
118 r10_bio
->devs
[j
].bio
= bio
;
121 * Allocate RESYNC_PAGES data pages and attach them
124 for (j
= 0 ; j
< nalloc
; j
++) {
125 bio
= r10_bio
->devs
[j
].bio
;
126 for (i
= 0; i
< RESYNC_PAGES
; i
++) {
127 if (j
== 1 && !test_bit(MD_RECOVERY_SYNC
,
128 &conf
->mddev
->recovery
)) {
129 /* we can share bv_page's during recovery */
130 struct bio
*rbio
= r10_bio
->devs
[0].bio
;
131 page
= rbio
->bi_io_vec
[i
].bv_page
;
134 page
= alloc_page(gfp_flags
);
138 bio
->bi_io_vec
[i
].bv_page
= page
;
146 safe_put_page(bio
->bi_io_vec
[i
-1].bv_page
);
148 for (i
= 0; i
< RESYNC_PAGES
; i
++)
149 safe_put_page(r10_bio
->devs
[j
].bio
->bi_io_vec
[i
].bv_page
);
152 while ( ++j
< nalloc
)
153 bio_put(r10_bio
->devs
[j
].bio
);
154 r10bio_pool_free(r10_bio
, conf
);
158 static void r10buf_pool_free(void *__r10_bio
, void *data
)
162 r10bio_t
*r10bio
= __r10_bio
;
165 for (j
=0; j
< conf
->copies
; j
++) {
166 struct bio
*bio
= r10bio
->devs
[j
].bio
;
168 for (i
= 0; i
< RESYNC_PAGES
; i
++) {
169 safe_put_page(bio
->bi_io_vec
[i
].bv_page
);
170 bio
->bi_io_vec
[i
].bv_page
= NULL
;
175 r10bio_pool_free(r10bio
, conf
);
178 static void put_all_bios(conf_t
*conf
, r10bio_t
*r10_bio
)
182 for (i
= 0; i
< conf
->copies
; i
++) {
183 struct bio
**bio
= & r10_bio
->devs
[i
].bio
;
184 if (*bio
&& *bio
!= IO_BLOCKED
)
190 static void free_r10bio(r10bio_t
*r10_bio
)
192 conf_t
*conf
= r10_bio
->mddev
->private;
194 put_all_bios(conf
, r10_bio
);
195 mempool_free(r10_bio
, conf
->r10bio_pool
);
198 static void put_buf(r10bio_t
*r10_bio
)
200 conf_t
*conf
= r10_bio
->mddev
->private;
202 mempool_free(r10_bio
, conf
->r10buf_pool
);
207 static void reschedule_retry(r10bio_t
*r10_bio
)
210 mddev_t
*mddev
= r10_bio
->mddev
;
211 conf_t
*conf
= mddev
->private;
213 spin_lock_irqsave(&conf
->device_lock
, flags
);
214 list_add(&r10_bio
->retry_list
, &conf
->retry_list
);
216 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
218 /* wake up frozen array... */
219 wake_up(&conf
->wait_barrier
);
221 md_wakeup_thread(mddev
->thread
);
225 * raid_end_bio_io() is called when we have finished servicing a mirrored
226 * operation and are ready to return a success/failure code to the buffer
229 static void raid_end_bio_io(r10bio_t
*r10_bio
)
231 struct bio
*bio
= r10_bio
->master_bio
;
233 conf_t
*conf
= r10_bio
->mddev
->private;
235 if (bio
->bi_phys_segments
) {
237 spin_lock_irqsave(&conf
->device_lock
, flags
);
238 bio
->bi_phys_segments
--;
239 done
= (bio
->bi_phys_segments
== 0);
240 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
243 if (!test_bit(R10BIO_Uptodate
, &r10_bio
->state
))
244 clear_bit(BIO_UPTODATE
, &bio
->bi_flags
);
248 * Wake up any possible resync thread that waits for the device
253 free_r10bio(r10_bio
);
257 * Update disk head position estimator based on IRQ completion info.
259 static inline void update_head_pos(int slot
, r10bio_t
*r10_bio
)
261 conf_t
*conf
= r10_bio
->mddev
->private;
263 conf
->mirrors
[r10_bio
->devs
[slot
].devnum
].head_position
=
264 r10_bio
->devs
[slot
].addr
+ (r10_bio
->sectors
);
268 * Find the disk number which triggered given bio
270 static int find_bio_disk(conf_t
*conf
, r10bio_t
*r10_bio
, struct bio
*bio
)
274 for (slot
= 0; slot
< conf
->copies
; slot
++)
275 if (r10_bio
->devs
[slot
].bio
== bio
)
278 BUG_ON(slot
== conf
->copies
);
279 update_head_pos(slot
, r10_bio
);
281 return r10_bio
->devs
[slot
].devnum
;
284 static void raid10_end_read_request(struct bio
*bio
, int error
)
286 int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
287 r10bio_t
*r10_bio
= bio
->bi_private
;
289 conf_t
*conf
= r10_bio
->mddev
->private;
292 slot
= r10_bio
->read_slot
;
293 dev
= r10_bio
->devs
[slot
].devnum
;
295 * this branch is our 'one mirror IO has finished' event handler:
297 update_head_pos(slot
, r10_bio
);
301 * Set R10BIO_Uptodate in our master bio, so that
302 * we will return a good error code to the higher
303 * levels even if IO on some other mirrored buffer fails.
305 * The 'master' represents the composite IO operation to
306 * user-side. So if something waits for IO, then it will
307 * wait for the 'master' bio.
309 set_bit(R10BIO_Uptodate
, &r10_bio
->state
);
310 raid_end_bio_io(r10_bio
);
311 rdev_dec_pending(conf
->mirrors
[dev
].rdev
, conf
->mddev
);
314 * oops, read error - keep the refcount on the rdev
316 char b
[BDEVNAME_SIZE
];
317 printk_ratelimited(KERN_ERR
318 "md/raid10:%s: %s: rescheduling sector %llu\n",
320 bdevname(conf
->mirrors
[dev
].rdev
->bdev
, b
),
321 (unsigned long long)r10_bio
->sector
);
322 set_bit(R10BIO_ReadError
, &r10_bio
->state
);
323 reschedule_retry(r10_bio
);
327 static void raid10_end_write_request(struct bio
*bio
, int error
)
329 int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
330 r10bio_t
*r10_bio
= bio
->bi_private
;
332 conf_t
*conf
= r10_bio
->mddev
->private;
334 dev
= find_bio_disk(conf
, r10_bio
, bio
);
337 * this branch is our 'one mirror IO has finished' event handler:
340 md_error(r10_bio
->mddev
, conf
->mirrors
[dev
].rdev
);
341 /* an I/O failed, we can't clear the bitmap */
342 set_bit(R10BIO_Degraded
, &r10_bio
->state
);
345 * Set R10BIO_Uptodate in our master bio, so that
346 * we will return a good error code for to the higher
347 * levels even if IO on some other mirrored buffer fails.
349 * The 'master' represents the composite IO operation to
350 * user-side. So if something waits for IO, then it will
351 * wait for the 'master' bio.
353 set_bit(R10BIO_Uptodate
, &r10_bio
->state
);
357 * Let's see if all mirrored write operations have finished
360 if (atomic_dec_and_test(&r10_bio
->remaining
)) {
361 /* clear the bitmap if all writes complete successfully */
362 bitmap_endwrite(r10_bio
->mddev
->bitmap
, r10_bio
->sector
,
364 !test_bit(R10BIO_Degraded
, &r10_bio
->state
),
366 md_write_end(r10_bio
->mddev
);
367 raid_end_bio_io(r10_bio
);
370 rdev_dec_pending(conf
->mirrors
[dev
].rdev
, conf
->mddev
);
375 * RAID10 layout manager
376 * As well as the chunksize and raid_disks count, there are two
377 * parameters: near_copies and far_copies.
378 * near_copies * far_copies must be <= raid_disks.
379 * Normally one of these will be 1.
380 * If both are 1, we get raid0.
381 * If near_copies == raid_disks, we get raid1.
383 * Chunks are laid out in raid0 style with near_copies copies of the
384 * first chunk, followed by near_copies copies of the next chunk and
386 * If far_copies > 1, then after 1/far_copies of the array has been assigned
387 * as described above, we start again with a device offset of near_copies.
388 * So we effectively have another copy of the whole array further down all
389 * the drives, but with blocks on different drives.
390 * With this layout, and block is never stored twice on the one device.
392 * raid10_find_phys finds the sector offset of a given virtual sector
393 * on each device that it is on.
395 * raid10_find_virt does the reverse mapping, from a device and a
396 * sector offset to a virtual address
399 static void raid10_find_phys(conf_t
*conf
, r10bio_t
*r10bio
)
409 /* now calculate first sector/dev */
410 chunk
= r10bio
->sector
>> conf
->chunk_shift
;
411 sector
= r10bio
->sector
& conf
->chunk_mask
;
413 chunk
*= conf
->near_copies
;
415 dev
= sector_div(stripe
, conf
->raid_disks
);
416 if (conf
->far_offset
)
417 stripe
*= conf
->far_copies
;
419 sector
+= stripe
<< conf
->chunk_shift
;
421 /* and calculate all the others */
422 for (n
=0; n
< conf
->near_copies
; n
++) {
425 r10bio
->devs
[slot
].addr
= sector
;
426 r10bio
->devs
[slot
].devnum
= d
;
429 for (f
= 1; f
< conf
->far_copies
; f
++) {
430 d
+= conf
->near_copies
;
431 if (d
>= conf
->raid_disks
)
432 d
-= conf
->raid_disks
;
434 r10bio
->devs
[slot
].devnum
= d
;
435 r10bio
->devs
[slot
].addr
= s
;
439 if (dev
>= conf
->raid_disks
) {
441 sector
+= (conf
->chunk_mask
+ 1);
444 BUG_ON(slot
!= conf
->copies
);
447 static sector_t
raid10_find_virt(conf_t
*conf
, sector_t sector
, int dev
)
449 sector_t offset
, chunk
, vchunk
;
451 offset
= sector
& conf
->chunk_mask
;
452 if (conf
->far_offset
) {
454 chunk
= sector
>> conf
->chunk_shift
;
455 fc
= sector_div(chunk
, conf
->far_copies
);
456 dev
-= fc
* conf
->near_copies
;
458 dev
+= conf
->raid_disks
;
460 while (sector
>= conf
->stride
) {
461 sector
-= conf
->stride
;
462 if (dev
< conf
->near_copies
)
463 dev
+= conf
->raid_disks
- conf
->near_copies
;
465 dev
-= conf
->near_copies
;
467 chunk
= sector
>> conf
->chunk_shift
;
469 vchunk
= chunk
* conf
->raid_disks
+ dev
;
470 sector_div(vchunk
, conf
->near_copies
);
471 return (vchunk
<< conf
->chunk_shift
) + offset
;
475 * raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
477 * @bvm: properties of new bio
478 * @biovec: the request that could be merged to it.
480 * Return amount of bytes we can accept at this offset
481 * If near_copies == raid_disk, there are no striping issues,
482 * but in that case, the function isn't called at all.
484 static int raid10_mergeable_bvec(struct request_queue
*q
,
485 struct bvec_merge_data
*bvm
,
486 struct bio_vec
*biovec
)
488 mddev_t
*mddev
= q
->queuedata
;
489 sector_t sector
= bvm
->bi_sector
+ get_start_sect(bvm
->bi_bdev
);
491 unsigned int chunk_sectors
= mddev
->chunk_sectors
;
492 unsigned int bio_sectors
= bvm
->bi_size
>> 9;
494 max
= (chunk_sectors
- ((sector
& (chunk_sectors
- 1)) + bio_sectors
)) << 9;
495 if (max
< 0) max
= 0; /* bio_add cannot handle a negative return */
496 if (max
<= biovec
->bv_len
&& bio_sectors
== 0)
497 return biovec
->bv_len
;
503 * This routine returns the disk from which the requested read should
504 * be done. There is a per-array 'next expected sequential IO' sector
505 * number - if this matches on the next IO then we use the last disk.
506 * There is also a per-disk 'last know head position' sector that is
507 * maintained from IRQ contexts, both the normal and the resync IO
508 * completion handlers update this position correctly. If there is no
509 * perfect sequential match then we pick the disk whose head is closest.
511 * If there are 2 mirrors in the same 2 devices, performance degrades
512 * because position is mirror, not device based.
514 * The rdev for the device selected will have nr_pending incremented.
518 * FIXME: possibly should rethink readbalancing and do it differently
519 * depending on near_copies / far_copies geometry.
521 static int read_balance(conf_t
*conf
, r10bio_t
*r10_bio
, int *max_sectors
)
523 const sector_t this_sector
= r10_bio
->sector
;
525 int sectors
= r10_bio
->sectors
;
526 int best_good_sectors
;
527 sector_t new_distance
, best_dist
;
532 raid10_find_phys(conf
, r10_bio
);
535 sectors
= r10_bio
->sectors
;
537 best_dist
= MaxSector
;
538 best_good_sectors
= 0;
541 * Check if we can balance. We can balance on the whole
542 * device if no resync is going on (recovery is ok), or below
543 * the resync window. We take the first readable disk when
544 * above the resync window.
546 if (conf
->mddev
->recovery_cp
< MaxSector
547 && (this_sector
+ sectors
>= conf
->next_resync
))
550 for (slot
= 0; slot
< conf
->copies
; slot
++) {
555 if (r10_bio
->devs
[slot
].bio
== IO_BLOCKED
)
557 disk
= r10_bio
->devs
[slot
].devnum
;
558 rdev
= rcu_dereference(conf
->mirrors
[disk
].rdev
);
561 if (!test_bit(In_sync
, &rdev
->flags
))
564 dev_sector
= r10_bio
->devs
[slot
].addr
;
565 if (is_badblock(rdev
, dev_sector
, sectors
,
566 &first_bad
, &bad_sectors
)) {
567 if (best_dist
< MaxSector
)
568 /* Already have a better slot */
570 if (first_bad
<= dev_sector
) {
571 /* Cannot read here. If this is the
572 * 'primary' device, then we must not read
573 * beyond 'bad_sectors' from another device.
575 bad_sectors
-= (dev_sector
- first_bad
);
576 if (!do_balance
&& sectors
> bad_sectors
)
577 sectors
= bad_sectors
;
578 if (best_good_sectors
> sectors
)
579 best_good_sectors
= sectors
;
581 sector_t good_sectors
=
582 first_bad
- dev_sector
;
583 if (good_sectors
> best_good_sectors
) {
584 best_good_sectors
= good_sectors
;
588 /* Must read from here */
593 best_good_sectors
= sectors
;
598 /* This optimisation is debatable, and completely destroys
599 * sequential read speed for 'far copies' arrays. So only
600 * keep it for 'near' arrays, and review those later.
602 if (conf
->near_copies
> 1 && !atomic_read(&rdev
->nr_pending
))
605 /* for far > 1 always use the lowest address */
606 if (conf
->far_copies
> 1)
607 new_distance
= r10_bio
->devs
[slot
].addr
;
609 new_distance
= abs(r10_bio
->devs
[slot
].addr
-
610 conf
->mirrors
[disk
].head_position
);
611 if (new_distance
< best_dist
) {
612 best_dist
= new_distance
;
616 if (slot
== conf
->copies
)
620 disk
= r10_bio
->devs
[slot
].devnum
;
621 rdev
= rcu_dereference(conf
->mirrors
[disk
].rdev
);
624 atomic_inc(&rdev
->nr_pending
);
625 if (test_bit(Faulty
, &rdev
->flags
)) {
626 /* Cannot risk returning a device that failed
627 * before we inc'ed nr_pending
629 rdev_dec_pending(rdev
, conf
->mddev
);
632 r10_bio
->read_slot
= slot
;
636 *max_sectors
= best_good_sectors
;
641 static int raid10_congested(void *data
, int bits
)
643 mddev_t
*mddev
= data
;
644 conf_t
*conf
= mddev
->private;
647 if (mddev_congested(mddev
, bits
))
650 for (i
= 0; i
< conf
->raid_disks
&& ret
== 0; i
++) {
651 mdk_rdev_t
*rdev
= rcu_dereference(conf
->mirrors
[i
].rdev
);
652 if (rdev
&& !test_bit(Faulty
, &rdev
->flags
)) {
653 struct request_queue
*q
= bdev_get_queue(rdev
->bdev
);
655 ret
|= bdi_congested(&q
->backing_dev_info
, bits
);
662 static void flush_pending_writes(conf_t
*conf
)
664 /* Any writes that have been queued but are awaiting
665 * bitmap updates get flushed here.
667 spin_lock_irq(&conf
->device_lock
);
669 if (conf
->pending_bio_list
.head
) {
671 bio
= bio_list_get(&conf
->pending_bio_list
);
672 spin_unlock_irq(&conf
->device_lock
);
673 /* flush any pending bitmap writes to disk
674 * before proceeding w/ I/O */
675 bitmap_unplug(conf
->mddev
->bitmap
);
677 while (bio
) { /* submit pending writes */
678 struct bio
*next
= bio
->bi_next
;
680 generic_make_request(bio
);
684 spin_unlock_irq(&conf
->device_lock
);
688 * Sometimes we need to suspend IO while we do something else,
689 * either some resync/recovery, or reconfigure the array.
690 * To do this we raise a 'barrier'.
691 * The 'barrier' is a counter that can be raised multiple times
692 * to count how many activities are happening which preclude
694 * We can only raise the barrier if there is no pending IO.
695 * i.e. if nr_pending == 0.
696 * We choose only to raise the barrier if no-one is waiting for the
697 * barrier to go down. This means that as soon as an IO request
698 * is ready, no other operations which require a barrier will start
699 * until the IO request has had a chance.
701 * So: regular IO calls 'wait_barrier'. When that returns there
702 * is no backgroup IO happening, It must arrange to call
703 * allow_barrier when it has finished its IO.
704 * backgroup IO calls must call raise_barrier. Once that returns
705 * there is no normal IO happeing. It must arrange to call
706 * lower_barrier when the particular background IO completes.
709 static void raise_barrier(conf_t
*conf
, int force
)
711 BUG_ON(force
&& !conf
->barrier
);
712 spin_lock_irq(&conf
->resync_lock
);
714 /* Wait until no block IO is waiting (unless 'force') */
715 wait_event_lock_irq(conf
->wait_barrier
, force
|| !conf
->nr_waiting
,
716 conf
->resync_lock
, );
718 /* block any new IO from starting */
721 /* Now wait for all pending IO to complete */
722 wait_event_lock_irq(conf
->wait_barrier
,
723 !conf
->nr_pending
&& conf
->barrier
< RESYNC_DEPTH
,
724 conf
->resync_lock
, );
726 spin_unlock_irq(&conf
->resync_lock
);
729 static void lower_barrier(conf_t
*conf
)
732 spin_lock_irqsave(&conf
->resync_lock
, flags
);
734 spin_unlock_irqrestore(&conf
->resync_lock
, flags
);
735 wake_up(&conf
->wait_barrier
);
738 static void wait_barrier(conf_t
*conf
)
740 spin_lock_irq(&conf
->resync_lock
);
743 wait_event_lock_irq(conf
->wait_barrier
, !conf
->barrier
,
749 spin_unlock_irq(&conf
->resync_lock
);
752 static void allow_barrier(conf_t
*conf
)
755 spin_lock_irqsave(&conf
->resync_lock
, flags
);
757 spin_unlock_irqrestore(&conf
->resync_lock
, flags
);
758 wake_up(&conf
->wait_barrier
);
761 static void freeze_array(conf_t
*conf
)
763 /* stop syncio and normal IO and wait for everything to
765 * We increment barrier and nr_waiting, and then
766 * wait until nr_pending match nr_queued+1
767 * This is called in the context of one normal IO request
768 * that has failed. Thus any sync request that might be pending
769 * will be blocked by nr_pending, and we need to wait for
770 * pending IO requests to complete or be queued for re-try.
771 * Thus the number queued (nr_queued) plus this request (1)
772 * must match the number of pending IOs (nr_pending) before
775 spin_lock_irq(&conf
->resync_lock
);
778 wait_event_lock_irq(conf
->wait_barrier
,
779 conf
->nr_pending
== conf
->nr_queued
+1,
781 flush_pending_writes(conf
));
783 spin_unlock_irq(&conf
->resync_lock
);
786 static void unfreeze_array(conf_t
*conf
)
788 /* reverse the effect of the freeze */
789 spin_lock_irq(&conf
->resync_lock
);
792 wake_up(&conf
->wait_barrier
);
793 spin_unlock_irq(&conf
->resync_lock
);
796 static int make_request(mddev_t
*mddev
, struct bio
* bio
)
798 conf_t
*conf
= mddev
->private;
799 mirror_info_t
*mirror
;
801 struct bio
*read_bio
;
803 int chunk_sects
= conf
->chunk_mask
+ 1;
804 const int rw
= bio_data_dir(bio
);
805 const unsigned long do_sync
= (bio
->bi_rw
& REQ_SYNC
);
806 const unsigned long do_fua
= (bio
->bi_rw
& REQ_FUA
);
808 mdk_rdev_t
*blocked_rdev
;
811 if (unlikely(bio
->bi_rw
& REQ_FLUSH
)) {
812 md_flush_request(mddev
, bio
);
816 /* If this request crosses a chunk boundary, we need to
817 * split it. This will only happen for 1 PAGE (or less) requests.
819 if (unlikely( (bio
->bi_sector
& conf
->chunk_mask
) + (bio
->bi_size
>> 9)
821 conf
->near_copies
< conf
->raid_disks
)) {
823 /* Sanity check -- queue functions should prevent this happening */
824 if (bio
->bi_vcnt
!= 1 ||
827 /* This is a one page bio that upper layers
828 * refuse to split for us, so we need to split it.
831 chunk_sects
- (bio
->bi_sector
& (chunk_sects
- 1)) );
833 /* Each of these 'make_request' calls will call 'wait_barrier'.
834 * If the first succeeds but the second blocks due to the resync
835 * thread raising the barrier, we will deadlock because the
836 * IO to the underlying device will be queued in generic_make_request
837 * and will never complete, so will never reduce nr_pending.
838 * So increment nr_waiting here so no new raise_barriers will
839 * succeed, and so the second wait_barrier cannot block.
841 spin_lock_irq(&conf
->resync_lock
);
843 spin_unlock_irq(&conf
->resync_lock
);
845 if (make_request(mddev
, &bp
->bio1
))
846 generic_make_request(&bp
->bio1
);
847 if (make_request(mddev
, &bp
->bio2
))
848 generic_make_request(&bp
->bio2
);
850 spin_lock_irq(&conf
->resync_lock
);
852 wake_up(&conf
->wait_barrier
);
853 spin_unlock_irq(&conf
->resync_lock
);
855 bio_pair_release(bp
);
858 printk("md/raid10:%s: make_request bug: can't convert block across chunks"
859 " or bigger than %dk %llu %d\n", mdname(mddev
), chunk_sects
/2,
860 (unsigned long long)bio
->bi_sector
, bio
->bi_size
>> 10);
866 md_write_start(mddev
, bio
);
869 * Register the new request and wait if the reconstruction
870 * thread has put up a bar for new requests.
871 * Continue immediately if no resync is active currently.
875 r10_bio
= mempool_alloc(conf
->r10bio_pool
, GFP_NOIO
);
877 r10_bio
->master_bio
= bio
;
878 r10_bio
->sectors
= bio
->bi_size
>> 9;
880 r10_bio
->mddev
= mddev
;
881 r10_bio
->sector
= bio
->bi_sector
;
884 /* We might need to issue multiple reads to different
885 * devices if there are bad blocks around, so we keep
886 * track of the number of reads in bio->bi_phys_segments.
887 * If this is 0, there is only one r10_bio and no locking
888 * will be needed when the request completes. If it is
889 * non-zero, then it is the number of not-completed requests.
891 bio
->bi_phys_segments
= 0;
892 clear_bit(BIO_SEG_VALID
, &bio
->bi_flags
);
896 * read balancing logic:
903 disk
= read_balance(conf
, r10_bio
, &max_sectors
);
904 slot
= r10_bio
->read_slot
;
906 raid_end_bio_io(r10_bio
);
909 mirror
= conf
->mirrors
+ disk
;
911 read_bio
= bio_clone_mddev(bio
, GFP_NOIO
, mddev
);
912 md_trim_bio(read_bio
, r10_bio
->sector
- bio
->bi_sector
,
915 r10_bio
->devs
[slot
].bio
= read_bio
;
917 read_bio
->bi_sector
= r10_bio
->devs
[slot
].addr
+
918 mirror
->rdev
->data_offset
;
919 read_bio
->bi_bdev
= mirror
->rdev
->bdev
;
920 read_bio
->bi_end_io
= raid10_end_read_request
;
921 read_bio
->bi_rw
= READ
| do_sync
;
922 read_bio
->bi_private
= r10_bio
;
924 if (max_sectors
< r10_bio
->sectors
) {
925 /* Could not read all from this device, so we will
926 * need another r10_bio.
930 sectors_handled
= (r10_bio
->sectors
+ max_sectors
932 r10_bio
->sectors
= max_sectors
;
933 spin_lock_irq(&conf
->device_lock
);
934 if (bio
->bi_phys_segments
== 0)
935 bio
->bi_phys_segments
= 2;
937 bio
->bi_phys_segments
++;
938 spin_unlock(&conf
->device_lock
);
939 /* Cannot call generic_make_request directly
940 * as that will be queued in __generic_make_request
941 * and subsequent mempool_alloc might block
942 * waiting for it. so hand bio over to raid10d.
944 reschedule_retry(r10_bio
);
946 r10_bio
= mempool_alloc(conf
->r10bio_pool
, GFP_NOIO
);
948 r10_bio
->master_bio
= bio
;
949 r10_bio
->sectors
= ((bio
->bi_size
>> 9)
952 r10_bio
->mddev
= mddev
;
953 r10_bio
->sector
= bio
->bi_sector
+ sectors_handled
;
956 generic_make_request(read_bio
);
963 /* first select target devices under rcu_lock and
964 * inc refcount on their rdev. Record them by setting
967 plugged
= mddev_check_plugged(mddev
);
969 raid10_find_phys(conf
, r10_bio
);
973 for (i
= 0; i
< conf
->copies
; i
++) {
974 int d
= r10_bio
->devs
[i
].devnum
;
975 mdk_rdev_t
*rdev
= rcu_dereference(conf
->mirrors
[d
].rdev
);
976 if (rdev
&& unlikely(test_bit(Blocked
, &rdev
->flags
))) {
977 atomic_inc(&rdev
->nr_pending
);
981 if (rdev
&& !test_bit(Faulty
, &rdev
->flags
)) {
982 atomic_inc(&rdev
->nr_pending
);
983 r10_bio
->devs
[i
].bio
= bio
;
985 r10_bio
->devs
[i
].bio
= NULL
;
986 set_bit(R10BIO_Degraded
, &r10_bio
->state
);
991 if (unlikely(blocked_rdev
)) {
992 /* Have to wait for this device to get unblocked, then retry */
996 for (j
= 0; j
< i
; j
++)
997 if (r10_bio
->devs
[j
].bio
) {
998 d
= r10_bio
->devs
[j
].devnum
;
999 rdev_dec_pending(conf
->mirrors
[d
].rdev
, mddev
);
1001 allow_barrier(conf
);
1002 md_wait_for_blocked_rdev(blocked_rdev
, mddev
);
1007 atomic_set(&r10_bio
->remaining
, 1);
1008 bitmap_startwrite(mddev
->bitmap
, bio
->bi_sector
, r10_bio
->sectors
, 0);
1010 for (i
= 0; i
< conf
->copies
; i
++) {
1012 int d
= r10_bio
->devs
[i
].devnum
;
1013 if (!r10_bio
->devs
[i
].bio
)
1016 mbio
= bio_clone_mddev(bio
, GFP_NOIO
, mddev
);
1017 r10_bio
->devs
[i
].bio
= mbio
;
1019 mbio
->bi_sector
= r10_bio
->devs
[i
].addr
+
1020 conf
->mirrors
[d
].rdev
->data_offset
;
1021 mbio
->bi_bdev
= conf
->mirrors
[d
].rdev
->bdev
;
1022 mbio
->bi_end_io
= raid10_end_write_request
;
1023 mbio
->bi_rw
= WRITE
| do_sync
| do_fua
;
1024 mbio
->bi_private
= r10_bio
;
1026 atomic_inc(&r10_bio
->remaining
);
1027 spin_lock_irqsave(&conf
->device_lock
, flags
);
1028 bio_list_add(&conf
->pending_bio_list
, mbio
);
1029 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1032 if (atomic_dec_and_test(&r10_bio
->remaining
)) {
1033 /* This matches the end of raid10_end_write_request() */
1034 bitmap_endwrite(r10_bio
->mddev
->bitmap
, r10_bio
->sector
,
1036 !test_bit(R10BIO_Degraded
, &r10_bio
->state
),
1038 md_write_end(mddev
);
1039 raid_end_bio_io(r10_bio
);
1042 /* In case raid10d snuck in to freeze_array */
1043 wake_up(&conf
->wait_barrier
);
1045 if (do_sync
|| !mddev
->bitmap
|| !plugged
)
1046 md_wakeup_thread(mddev
->thread
);
1050 static void status(struct seq_file
*seq
, mddev_t
*mddev
)
1052 conf_t
*conf
= mddev
->private;
1055 if (conf
->near_copies
< conf
->raid_disks
)
1056 seq_printf(seq
, " %dK chunks", mddev
->chunk_sectors
/ 2);
1057 if (conf
->near_copies
> 1)
1058 seq_printf(seq
, " %d near-copies", conf
->near_copies
);
1059 if (conf
->far_copies
> 1) {
1060 if (conf
->far_offset
)
1061 seq_printf(seq
, " %d offset-copies", conf
->far_copies
);
1063 seq_printf(seq
, " %d far-copies", conf
->far_copies
);
1065 seq_printf(seq
, " [%d/%d] [", conf
->raid_disks
,
1066 conf
->raid_disks
- mddev
->degraded
);
1067 for (i
= 0; i
< conf
->raid_disks
; i
++)
1068 seq_printf(seq
, "%s",
1069 conf
->mirrors
[i
].rdev
&&
1070 test_bit(In_sync
, &conf
->mirrors
[i
].rdev
->flags
) ? "U" : "_");
1071 seq_printf(seq
, "]");
1074 /* check if there are enough drives for
1075 * every block to appear on atleast one.
1076 * Don't consider the device numbered 'ignore'
1077 * as we might be about to remove it.
1079 static int enough(conf_t
*conf
, int ignore
)
1084 int n
= conf
->copies
;
1087 if (conf
->mirrors
[first
].rdev
&&
1090 first
= (first
+1) % conf
->raid_disks
;
1094 } while (first
!= 0);
1098 static void error(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1100 char b
[BDEVNAME_SIZE
];
1101 conf_t
*conf
= mddev
->private;
1104 * If it is not operational, then we have already marked it as dead
1105 * else if it is the last working disks, ignore the error, let the
1106 * next level up know.
1107 * else mark the drive as failed
1109 if (test_bit(In_sync
, &rdev
->flags
)
1110 && !enough(conf
, rdev
->raid_disk
))
1112 * Don't fail the drive, just return an IO error.
1115 if (test_and_clear_bit(In_sync
, &rdev
->flags
)) {
1116 unsigned long flags
;
1117 spin_lock_irqsave(&conf
->device_lock
, flags
);
1119 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1121 * if recovery is running, make sure it aborts.
1123 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
1125 set_bit(Blocked
, &rdev
->flags
);
1126 set_bit(Faulty
, &rdev
->flags
);
1127 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
1129 "md/raid10:%s: Disk failure on %s, disabling device.\n"
1130 "md/raid10:%s: Operation continuing on %d devices.\n",
1131 mdname(mddev
), bdevname(rdev
->bdev
, b
),
1132 mdname(mddev
), conf
->raid_disks
- mddev
->degraded
);
1135 static void print_conf(conf_t
*conf
)
1140 printk(KERN_DEBUG
"RAID10 conf printout:\n");
1142 printk(KERN_DEBUG
"(!conf)\n");
1145 printk(KERN_DEBUG
" --- wd:%d rd:%d\n", conf
->raid_disks
- conf
->mddev
->degraded
,
1148 for (i
= 0; i
< conf
->raid_disks
; i
++) {
1149 char b
[BDEVNAME_SIZE
];
1150 tmp
= conf
->mirrors
+ i
;
1152 printk(KERN_DEBUG
" disk %d, wo:%d, o:%d, dev:%s\n",
1153 i
, !test_bit(In_sync
, &tmp
->rdev
->flags
),
1154 !test_bit(Faulty
, &tmp
->rdev
->flags
),
1155 bdevname(tmp
->rdev
->bdev
,b
));
1159 static void close_sync(conf_t
*conf
)
1162 allow_barrier(conf
);
1164 mempool_destroy(conf
->r10buf_pool
);
1165 conf
->r10buf_pool
= NULL
;
1168 static int raid10_spare_active(mddev_t
*mddev
)
1171 conf_t
*conf
= mddev
->private;
1174 unsigned long flags
;
1177 * Find all non-in_sync disks within the RAID10 configuration
1178 * and mark them in_sync
1180 for (i
= 0; i
< conf
->raid_disks
; i
++) {
1181 tmp
= conf
->mirrors
+ i
;
1183 && !test_bit(Faulty
, &tmp
->rdev
->flags
)
1184 && !test_and_set_bit(In_sync
, &tmp
->rdev
->flags
)) {
1186 sysfs_notify_dirent(tmp
->rdev
->sysfs_state
);
1189 spin_lock_irqsave(&conf
->device_lock
, flags
);
1190 mddev
->degraded
-= count
;
1191 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1198 static int raid10_add_disk(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1200 conf_t
*conf
= mddev
->private;
1204 int last
= conf
->raid_disks
- 1;
1206 if (rdev
->badblocks
.count
)
1209 if (mddev
->recovery_cp
< MaxSector
)
1210 /* only hot-add to in-sync arrays, as recovery is
1211 * very different from resync
1214 if (!enough(conf
, -1))
1217 if (rdev
->raid_disk
>= 0)
1218 first
= last
= rdev
->raid_disk
;
1220 if (rdev
->saved_raid_disk
>= first
&&
1221 conf
->mirrors
[rdev
->saved_raid_disk
].rdev
== NULL
)
1222 mirror
= rdev
->saved_raid_disk
;
1225 for ( ; mirror
<= last
; mirror
++) {
1226 mirror_info_t
*p
= &conf
->mirrors
[mirror
];
1227 if (p
->recovery_disabled
== mddev
->recovery_disabled
)
1232 disk_stack_limits(mddev
->gendisk
, rdev
->bdev
,
1233 rdev
->data_offset
<< 9);
1234 /* as we don't honour merge_bvec_fn, we must
1235 * never risk violating it, so limit
1236 * ->max_segments to one lying with a single
1237 * page, as a one page request is never in
1240 if (rdev
->bdev
->bd_disk
->queue
->merge_bvec_fn
) {
1241 blk_queue_max_segments(mddev
->queue
, 1);
1242 blk_queue_segment_boundary(mddev
->queue
,
1243 PAGE_CACHE_SIZE
- 1);
1246 p
->head_position
= 0;
1247 rdev
->raid_disk
= mirror
;
1249 if (rdev
->saved_raid_disk
!= mirror
)
1251 rcu_assign_pointer(p
->rdev
, rdev
);
1255 md_integrity_add_rdev(rdev
, mddev
);
1260 static int raid10_remove_disk(mddev_t
*mddev
, int number
)
1262 conf_t
*conf
= mddev
->private;
1265 mirror_info_t
*p
= conf
->mirrors
+ number
;
1270 if (test_bit(In_sync
, &rdev
->flags
) ||
1271 atomic_read(&rdev
->nr_pending
)) {
1275 /* Only remove faulty devices in recovery
1278 if (!test_bit(Faulty
, &rdev
->flags
) &&
1279 mddev
->recovery_disabled
!= p
->recovery_disabled
&&
1286 if (atomic_read(&rdev
->nr_pending
)) {
1287 /* lost the race, try later */
1292 err
= md_integrity_register(mddev
);
1301 static void end_sync_read(struct bio
*bio
, int error
)
1303 r10bio_t
*r10_bio
= bio
->bi_private
;
1304 conf_t
*conf
= r10_bio
->mddev
->private;
1307 d
= find_bio_disk(conf
, r10_bio
, bio
);
1309 if (test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
1310 set_bit(R10BIO_Uptodate
, &r10_bio
->state
);
1312 atomic_add(r10_bio
->sectors
,
1313 &conf
->mirrors
[d
].rdev
->corrected_errors
);
1314 if (!test_bit(MD_RECOVERY_SYNC
, &conf
->mddev
->recovery
))
1315 md_error(r10_bio
->mddev
,
1316 conf
->mirrors
[d
].rdev
);
1319 /* for reconstruct, we always reschedule after a read.
1320 * for resync, only after all reads
1322 rdev_dec_pending(conf
->mirrors
[d
].rdev
, conf
->mddev
);
1323 if (test_bit(R10BIO_IsRecover
, &r10_bio
->state
) ||
1324 atomic_dec_and_test(&r10_bio
->remaining
)) {
1325 /* we have read all the blocks,
1326 * do the comparison in process context in raid10d
1328 reschedule_retry(r10_bio
);
1332 static void end_sync_write(struct bio
*bio
, int error
)
1334 int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
1335 r10bio_t
*r10_bio
= bio
->bi_private
;
1336 mddev_t
*mddev
= r10_bio
->mddev
;
1337 conf_t
*conf
= mddev
->private;
1340 d
= find_bio_disk(conf
, r10_bio
, bio
);
1343 md_error(mddev
, conf
->mirrors
[d
].rdev
);
1345 rdev_dec_pending(conf
->mirrors
[d
].rdev
, mddev
);
1346 while (atomic_dec_and_test(&r10_bio
->remaining
)) {
1347 if (r10_bio
->master_bio
== NULL
) {
1348 /* the primary of several recovery bios */
1349 sector_t s
= r10_bio
->sectors
;
1351 md_done_sync(mddev
, s
, 1);
1354 r10bio_t
*r10_bio2
= (r10bio_t
*)r10_bio
->master_bio
;
1362 * Note: sync and recover and handled very differently for raid10
1363 * This code is for resync.
1364 * For resync, we read through virtual addresses and read all blocks.
1365 * If there is any error, we schedule a write. The lowest numbered
1366 * drive is authoritative.
1367 * However requests come for physical address, so we need to map.
1368 * For every physical address there are raid_disks/copies virtual addresses,
1369 * which is always are least one, but is not necessarly an integer.
1370 * This means that a physical address can span multiple chunks, so we may
1371 * have to submit multiple io requests for a single sync request.
1374 * We check if all blocks are in-sync and only write to blocks that
1377 static void sync_request_write(mddev_t
*mddev
, r10bio_t
*r10_bio
)
1379 conf_t
*conf
= mddev
->private;
1381 struct bio
*tbio
, *fbio
;
1383 atomic_set(&r10_bio
->remaining
, 1);
1385 /* find the first device with a block */
1386 for (i
=0; i
<conf
->copies
; i
++)
1387 if (test_bit(BIO_UPTODATE
, &r10_bio
->devs
[i
].bio
->bi_flags
))
1390 if (i
== conf
->copies
)
1394 fbio
= r10_bio
->devs
[i
].bio
;
1396 /* now find blocks with errors */
1397 for (i
=0 ; i
< conf
->copies
; i
++) {
1399 int vcnt
= r10_bio
->sectors
>> (PAGE_SHIFT
-9);
1401 tbio
= r10_bio
->devs
[i
].bio
;
1403 if (tbio
->bi_end_io
!= end_sync_read
)
1407 if (test_bit(BIO_UPTODATE
, &r10_bio
->devs
[i
].bio
->bi_flags
)) {
1408 /* We know that the bi_io_vec layout is the same for
1409 * both 'first' and 'i', so we just compare them.
1410 * All vec entries are PAGE_SIZE;
1412 for (j
= 0; j
< vcnt
; j
++)
1413 if (memcmp(page_address(fbio
->bi_io_vec
[j
].bv_page
),
1414 page_address(tbio
->bi_io_vec
[j
].bv_page
),
1419 mddev
->resync_mismatches
+= r10_bio
->sectors
;
1421 if (test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
))
1422 /* Don't fix anything. */
1424 /* Ok, we need to write this bio
1425 * First we need to fixup bv_offset, bv_len and
1426 * bi_vecs, as the read request might have corrupted these
1428 tbio
->bi_vcnt
= vcnt
;
1429 tbio
->bi_size
= r10_bio
->sectors
<< 9;
1431 tbio
->bi_phys_segments
= 0;
1432 tbio
->bi_flags
&= ~(BIO_POOL_MASK
- 1);
1433 tbio
->bi_flags
|= 1 << BIO_UPTODATE
;
1434 tbio
->bi_next
= NULL
;
1435 tbio
->bi_rw
= WRITE
;
1436 tbio
->bi_private
= r10_bio
;
1437 tbio
->bi_sector
= r10_bio
->devs
[i
].addr
;
1439 for (j
=0; j
< vcnt
; j
++) {
1440 tbio
->bi_io_vec
[j
].bv_offset
= 0;
1441 tbio
->bi_io_vec
[j
].bv_len
= PAGE_SIZE
;
1443 memcpy(page_address(tbio
->bi_io_vec
[j
].bv_page
),
1444 page_address(fbio
->bi_io_vec
[j
].bv_page
),
1447 tbio
->bi_end_io
= end_sync_write
;
1449 d
= r10_bio
->devs
[i
].devnum
;
1450 atomic_inc(&conf
->mirrors
[d
].rdev
->nr_pending
);
1451 atomic_inc(&r10_bio
->remaining
);
1452 md_sync_acct(conf
->mirrors
[d
].rdev
->bdev
, tbio
->bi_size
>> 9);
1454 tbio
->bi_sector
+= conf
->mirrors
[d
].rdev
->data_offset
;
1455 tbio
->bi_bdev
= conf
->mirrors
[d
].rdev
->bdev
;
1456 generic_make_request(tbio
);
1460 if (atomic_dec_and_test(&r10_bio
->remaining
)) {
1461 md_done_sync(mddev
, r10_bio
->sectors
, 1);
1467 * Now for the recovery code.
1468 * Recovery happens across physical sectors.
1469 * We recover all non-is_sync drives by finding the virtual address of
1470 * each, and then choose a working drive that also has that virt address.
1471 * There is a separate r10_bio for each non-in_sync drive.
1472 * Only the first two slots are in use. The first for reading,
1473 * The second for writing.
1477 static void recovery_request_write(mddev_t
*mddev
, r10bio_t
*r10_bio
)
1479 conf_t
*conf
= mddev
->private;
1484 * share the pages with the first bio
1485 * and submit the write request
1487 wbio
= r10_bio
->devs
[1].bio
;
1488 d
= r10_bio
->devs
[1].devnum
;
1490 atomic_inc(&conf
->mirrors
[d
].rdev
->nr_pending
);
1491 md_sync_acct(conf
->mirrors
[d
].rdev
->bdev
, wbio
->bi_size
>> 9);
1492 if (test_bit(R10BIO_Uptodate
, &r10_bio
->state
))
1493 generic_make_request(wbio
);
1496 "md/raid10:%s: recovery aborted due to read error\n",
1498 conf
->mirrors
[d
].recovery_disabled
= mddev
->recovery_disabled
;
1499 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
1506 * Used by fix_read_error() to decay the per rdev read_errors.
1507 * We halve the read error count for every hour that has elapsed
1508 * since the last recorded read error.
1511 static void check_decay_read_errors(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1513 struct timespec cur_time_mon
;
1514 unsigned long hours_since_last
;
1515 unsigned int read_errors
= atomic_read(&rdev
->read_errors
);
1517 ktime_get_ts(&cur_time_mon
);
1519 if (rdev
->last_read_error
.tv_sec
== 0 &&
1520 rdev
->last_read_error
.tv_nsec
== 0) {
1521 /* first time we've seen a read error */
1522 rdev
->last_read_error
= cur_time_mon
;
1526 hours_since_last
= (cur_time_mon
.tv_sec
-
1527 rdev
->last_read_error
.tv_sec
) / 3600;
1529 rdev
->last_read_error
= cur_time_mon
;
1532 * if hours_since_last is > the number of bits in read_errors
1533 * just set read errors to 0. We do this to avoid
1534 * overflowing the shift of read_errors by hours_since_last.
1536 if (hours_since_last
>= 8 * sizeof(read_errors
))
1537 atomic_set(&rdev
->read_errors
, 0);
1539 atomic_set(&rdev
->read_errors
, read_errors
>> hours_since_last
);
1543 * This is a kernel thread which:
1545 * 1. Retries failed read operations on working mirrors.
1546 * 2. Updates the raid superblock when problems encounter.
1547 * 3. Performs writes following reads for array synchronising.
1550 static void fix_read_error(conf_t
*conf
, mddev_t
*mddev
, r10bio_t
*r10_bio
)
1552 int sect
= 0; /* Offset from r10_bio->sector */
1553 int sectors
= r10_bio
->sectors
;
1555 int max_read_errors
= atomic_read(&mddev
->max_corr_read_errors
);
1556 int d
= r10_bio
->devs
[r10_bio
->read_slot
].devnum
;
1558 /* still own a reference to this rdev, so it cannot
1559 * have been cleared recently.
1561 rdev
= conf
->mirrors
[d
].rdev
;
1563 if (test_bit(Faulty
, &rdev
->flags
))
1564 /* drive has already been failed, just ignore any
1565 more fix_read_error() attempts */
1568 check_decay_read_errors(mddev
, rdev
);
1569 atomic_inc(&rdev
->read_errors
);
1570 if (atomic_read(&rdev
->read_errors
) > max_read_errors
) {
1571 char b
[BDEVNAME_SIZE
];
1572 bdevname(rdev
->bdev
, b
);
1575 "md/raid10:%s: %s: Raid device exceeded "
1576 "read_error threshold [cur %d:max %d]\n",
1578 atomic_read(&rdev
->read_errors
), max_read_errors
);
1580 "md/raid10:%s: %s: Failing raid device\n",
1582 md_error(mddev
, conf
->mirrors
[d
].rdev
);
1588 int sl
= r10_bio
->read_slot
;
1592 if (s
> (PAGE_SIZE
>>9))
1600 d
= r10_bio
->devs
[sl
].devnum
;
1601 rdev
= rcu_dereference(conf
->mirrors
[d
].rdev
);
1603 test_bit(In_sync
, &rdev
->flags
) &&
1604 is_badblock(rdev
, r10_bio
->devs
[sl
].addr
+ sect
, s
,
1605 &first_bad
, &bad_sectors
) == 0) {
1606 atomic_inc(&rdev
->nr_pending
);
1608 success
= sync_page_io(rdev
,
1609 r10_bio
->devs
[sl
].addr
+
1612 conf
->tmppage
, READ
, false);
1613 rdev_dec_pending(rdev
, mddev
);
1619 if (sl
== conf
->copies
)
1621 } while (!success
&& sl
!= r10_bio
->read_slot
);
1625 /* Cannot read from anywhere -- bye bye array */
1626 int dn
= r10_bio
->devs
[r10_bio
->read_slot
].devnum
;
1627 md_error(mddev
, conf
->mirrors
[dn
].rdev
);
1632 /* write it back and re-read */
1634 while (sl
!= r10_bio
->read_slot
) {
1635 char b
[BDEVNAME_SIZE
];
1640 d
= r10_bio
->devs
[sl
].devnum
;
1641 rdev
= rcu_dereference(conf
->mirrors
[d
].rdev
);
1643 !test_bit(In_sync
, &rdev
->flags
))
1646 atomic_inc(&rdev
->nr_pending
);
1648 if (sync_page_io(rdev
,
1649 r10_bio
->devs
[sl
].addr
+
1651 s
<<9, conf
->tmppage
, WRITE
, false)
1653 /* Well, this device is dead */
1655 "md/raid10:%s: read correction "
1657 " (%d sectors at %llu on %s)\n",
1659 (unsigned long long)(
1660 sect
+ rdev
->data_offset
),
1661 bdevname(rdev
->bdev
, b
));
1662 printk(KERN_NOTICE
"md/raid10:%s: %s: failing "
1665 bdevname(rdev
->bdev
, b
));
1666 md_error(mddev
, rdev
);
1668 rdev_dec_pending(rdev
, mddev
);
1672 while (sl
!= r10_bio
->read_slot
) {
1673 char b
[BDEVNAME_SIZE
];
1678 d
= r10_bio
->devs
[sl
].devnum
;
1679 rdev
= rcu_dereference(conf
->mirrors
[d
].rdev
);
1681 !test_bit(In_sync
, &rdev
->flags
))
1684 atomic_inc(&rdev
->nr_pending
);
1686 if (sync_page_io(rdev
,
1687 r10_bio
->devs
[sl
].addr
+
1689 s
<<9, conf
->tmppage
,
1690 READ
, false) == 0) {
1691 /* Well, this device is dead */
1693 "md/raid10:%s: unable to read back "
1695 " (%d sectors at %llu on %s)\n",
1697 (unsigned long long)(
1698 sect
+ rdev
->data_offset
),
1699 bdevname(rdev
->bdev
, b
));
1700 printk(KERN_NOTICE
"md/raid10:%s: %s: failing "
1703 bdevname(rdev
->bdev
, b
));
1705 md_error(mddev
, rdev
);
1708 "md/raid10:%s: read error corrected"
1709 " (%d sectors at %llu on %s)\n",
1711 (unsigned long long)(
1712 sect
+ rdev
->data_offset
),
1713 bdevname(rdev
->bdev
, b
));
1714 atomic_add(s
, &rdev
->corrected_errors
);
1717 rdev_dec_pending(rdev
, mddev
);
1727 static void handle_read_error(mddev_t
*mddev
, r10bio_t
*r10_bio
)
1729 int slot
= r10_bio
->read_slot
;
1730 int mirror
= r10_bio
->devs
[slot
].devnum
;
1732 conf_t
*conf
= mddev
->private;
1734 char b
[BDEVNAME_SIZE
];
1735 unsigned long do_sync
;
1738 /* we got a read error. Maybe the drive is bad. Maybe just
1739 * the block and we can fix it.
1740 * We freeze all other IO, and try reading the block from
1741 * other devices. When we find one, we re-write
1742 * and check it that fixes the read error.
1743 * This is all done synchronously while the array is
1746 if (mddev
->ro
== 0) {
1748 fix_read_error(conf
, mddev
, r10_bio
);
1749 unfreeze_array(conf
);
1751 rdev_dec_pending(conf
->mirrors
[mirror
].rdev
, mddev
);
1753 bio
= r10_bio
->devs
[slot
].bio
;
1754 bdevname(bio
->bi_bdev
, b
);
1755 r10_bio
->devs
[slot
].bio
=
1756 mddev
->ro
? IO_BLOCKED
: NULL
;
1758 mirror
= read_balance(conf
, r10_bio
, &max_sectors
);
1760 printk(KERN_ALERT
"md/raid10:%s: %s: unrecoverable I/O"
1761 " read error for block %llu\n",
1763 (unsigned long long)r10_bio
->sector
);
1764 raid_end_bio_io(r10_bio
);
1769 do_sync
= (r10_bio
->master_bio
->bi_rw
& REQ_SYNC
);
1772 slot
= r10_bio
->read_slot
;
1773 rdev
= conf
->mirrors
[mirror
].rdev
;
1776 "md/raid10:%s: %s: redirecting"
1777 "sector %llu to another mirror\n",
1779 bdevname(rdev
->bdev
, b
),
1780 (unsigned long long)r10_bio
->sector
);
1781 bio
= bio_clone_mddev(r10_bio
->master_bio
,
1784 r10_bio
->sector
- bio
->bi_sector
,
1786 r10_bio
->devs
[slot
].bio
= bio
;
1787 bio
->bi_sector
= r10_bio
->devs
[slot
].addr
1788 + rdev
->data_offset
;
1789 bio
->bi_bdev
= rdev
->bdev
;
1790 bio
->bi_rw
= READ
| do_sync
;
1791 bio
->bi_private
= r10_bio
;
1792 bio
->bi_end_io
= raid10_end_read_request
;
1793 if (max_sectors
< r10_bio
->sectors
) {
1794 /* Drat - have to split this up more */
1795 struct bio
*mbio
= r10_bio
->master_bio
;
1796 int sectors_handled
=
1797 r10_bio
->sector
+ max_sectors
1799 r10_bio
->sectors
= max_sectors
;
1800 spin_lock_irq(&conf
->device_lock
);
1801 if (mbio
->bi_phys_segments
== 0)
1802 mbio
->bi_phys_segments
= 2;
1804 mbio
->bi_phys_segments
++;
1805 spin_unlock_irq(&conf
->device_lock
);
1806 generic_make_request(bio
);
1809 r10_bio
= mempool_alloc(conf
->r10bio_pool
,
1811 r10_bio
->master_bio
= mbio
;
1812 r10_bio
->sectors
= (mbio
->bi_size
>> 9)
1815 set_bit(R10BIO_ReadError
,
1817 r10_bio
->mddev
= mddev
;
1818 r10_bio
->sector
= mbio
->bi_sector
1823 generic_make_request(bio
);
1826 static void raid10d(mddev_t
*mddev
)
1829 unsigned long flags
;
1830 conf_t
*conf
= mddev
->private;
1831 struct list_head
*head
= &conf
->retry_list
;
1832 struct blk_plug plug
;
1834 md_check_recovery(mddev
);
1836 blk_start_plug(&plug
);
1839 flush_pending_writes(conf
);
1841 spin_lock_irqsave(&conf
->device_lock
, flags
);
1842 if (list_empty(head
)) {
1843 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1846 r10_bio
= list_entry(head
->prev
, r10bio_t
, retry_list
);
1847 list_del(head
->prev
);
1849 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1851 mddev
= r10_bio
->mddev
;
1852 conf
= mddev
->private;
1853 if (test_bit(R10BIO_IsSync
, &r10_bio
->state
))
1854 sync_request_write(mddev
, r10_bio
);
1855 else if (test_bit(R10BIO_IsRecover
, &r10_bio
->state
))
1856 recovery_request_write(mddev
, r10_bio
);
1857 else if (test_bit(R10BIO_ReadError
, &r10_bio
->state
))
1858 handle_read_error(mddev
, r10_bio
);
1860 /* just a partial read to be scheduled from a
1863 int slot
= r10_bio
->read_slot
;
1864 generic_make_request(r10_bio
->devs
[slot
].bio
);
1868 if (mddev
->flags
& ~(1<<MD_CHANGE_PENDING
))
1869 md_check_recovery(mddev
);
1871 blk_finish_plug(&plug
);
1875 static int init_resync(conf_t
*conf
)
1879 buffs
= RESYNC_WINDOW
/ RESYNC_BLOCK_SIZE
;
1880 BUG_ON(conf
->r10buf_pool
);
1881 conf
->r10buf_pool
= mempool_create(buffs
, r10buf_pool_alloc
, r10buf_pool_free
, conf
);
1882 if (!conf
->r10buf_pool
)
1884 conf
->next_resync
= 0;
1889 * perform a "sync" on one "block"
1891 * We need to make sure that no normal I/O request - particularly write
1892 * requests - conflict with active sync requests.
1894 * This is achieved by tracking pending requests and a 'barrier' concept
1895 * that can be installed to exclude normal IO requests.
1897 * Resync and recovery are handled very differently.
1898 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
1900 * For resync, we iterate over virtual addresses, read all copies,
1901 * and update if there are differences. If only one copy is live,
1903 * For recovery, we iterate over physical addresses, read a good
1904 * value for each non-in_sync drive, and over-write.
1906 * So, for recovery we may have several outstanding complex requests for a
1907 * given address, one for each out-of-sync device. We model this by allocating
1908 * a number of r10_bio structures, one for each out-of-sync device.
1909 * As we setup these structures, we collect all bio's together into a list
1910 * which we then process collectively to add pages, and then process again
1911 * to pass to generic_make_request.
1913 * The r10_bio structures are linked using a borrowed master_bio pointer.
1914 * This link is counted in ->remaining. When the r10_bio that points to NULL
1915 * has its remaining count decremented to 0, the whole complex operation
1920 static sector_t
sync_request(mddev_t
*mddev
, sector_t sector_nr
,
1921 int *skipped
, int go_faster
)
1923 conf_t
*conf
= mddev
->private;
1925 struct bio
*biolist
= NULL
, *bio
;
1926 sector_t max_sector
, nr_sectors
;
1929 sector_t sync_blocks
;
1931 sector_t sectors_skipped
= 0;
1932 int chunks_skipped
= 0;
1934 if (!conf
->r10buf_pool
)
1935 if (init_resync(conf
))
1939 max_sector
= mddev
->dev_sectors
;
1940 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
))
1941 max_sector
= mddev
->resync_max_sectors
;
1942 if (sector_nr
>= max_sector
) {
1943 /* If we aborted, we need to abort the
1944 * sync on the 'current' bitmap chucks (there can
1945 * be several when recovering multiple devices).
1946 * as we may have started syncing it but not finished.
1947 * We can find the current address in
1948 * mddev->curr_resync, but for recovery,
1949 * we need to convert that to several
1950 * virtual addresses.
1952 if (mddev
->curr_resync
< max_sector
) { /* aborted */
1953 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
))
1954 bitmap_end_sync(mddev
->bitmap
, mddev
->curr_resync
,
1956 else for (i
=0; i
<conf
->raid_disks
; i
++) {
1958 raid10_find_virt(conf
, mddev
->curr_resync
, i
);
1959 bitmap_end_sync(mddev
->bitmap
, sect
,
1962 } else /* completed sync */
1965 bitmap_close_sync(mddev
->bitmap
);
1968 return sectors_skipped
;
1970 if (chunks_skipped
>= conf
->raid_disks
) {
1971 /* if there has been nothing to do on any drive,
1972 * then there is nothing to do at all..
1975 return (max_sector
- sector_nr
) + sectors_skipped
;
1978 if (max_sector
> mddev
->resync_max
)
1979 max_sector
= mddev
->resync_max
; /* Don't do IO beyond here */
1981 /* make sure whole request will fit in a chunk - if chunks
1984 if (conf
->near_copies
< conf
->raid_disks
&&
1985 max_sector
> (sector_nr
| conf
->chunk_mask
))
1986 max_sector
= (sector_nr
| conf
->chunk_mask
) + 1;
1988 * If there is non-resync activity waiting for us then
1989 * put in a delay to throttle resync.
1991 if (!go_faster
&& conf
->nr_waiting
)
1992 msleep_interruptible(1000);
1994 /* Again, very different code for resync and recovery.
1995 * Both must result in an r10bio with a list of bios that
1996 * have bi_end_io, bi_sector, bi_bdev set,
1997 * and bi_private set to the r10bio.
1998 * For recovery, we may actually create several r10bios
1999 * with 2 bios in each, that correspond to the bios in the main one.
2000 * In this case, the subordinate r10bios link back through a
2001 * borrowed master_bio pointer, and the counter in the master
2002 * includes a ref from each subordinate.
2004 /* First, we decide what to do and set ->bi_end_io
2005 * To end_sync_read if we want to read, and
2006 * end_sync_write if we will want to write.
2009 max_sync
= RESYNC_PAGES
<< (PAGE_SHIFT
-9);
2010 if (!test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
2011 /* recovery... the complicated one */
2015 for (i
=0 ; i
<conf
->raid_disks
; i
++) {
2021 if (conf
->mirrors
[i
].rdev
== NULL
||
2022 test_bit(In_sync
, &conf
->mirrors
[i
].rdev
->flags
))
2026 /* want to reconstruct this device */
2028 sect
= raid10_find_virt(conf
, sector_nr
, i
);
2029 /* Unless we are doing a full sync, we only need
2030 * to recover the block if it is set in the bitmap
2032 must_sync
= bitmap_start_sync(mddev
->bitmap
, sect
,
2034 if (sync_blocks
< max_sync
)
2035 max_sync
= sync_blocks
;
2038 /* yep, skip the sync_blocks here, but don't assume
2039 * that there will never be anything to do here
2041 chunks_skipped
= -1;
2045 r10_bio
= mempool_alloc(conf
->r10buf_pool
, GFP_NOIO
);
2046 raise_barrier(conf
, rb2
!= NULL
);
2047 atomic_set(&r10_bio
->remaining
, 0);
2049 r10_bio
->master_bio
= (struct bio
*)rb2
;
2051 atomic_inc(&rb2
->remaining
);
2052 r10_bio
->mddev
= mddev
;
2053 set_bit(R10BIO_IsRecover
, &r10_bio
->state
);
2054 r10_bio
->sector
= sect
;
2056 raid10_find_phys(conf
, r10_bio
);
2058 /* Need to check if the array will still be
2061 for (j
=0; j
<conf
->raid_disks
; j
++)
2062 if (conf
->mirrors
[j
].rdev
== NULL
||
2063 test_bit(Faulty
, &conf
->mirrors
[j
].rdev
->flags
)) {
2068 must_sync
= bitmap_start_sync(mddev
->bitmap
, sect
,
2069 &sync_blocks
, still_degraded
);
2071 for (j
=0; j
<conf
->copies
;j
++) {
2072 int d
= r10_bio
->devs
[j
].devnum
;
2073 if (!conf
->mirrors
[d
].rdev
||
2074 !test_bit(In_sync
, &conf
->mirrors
[d
].rdev
->flags
))
2076 /* This is where we read from */
2077 bio
= r10_bio
->devs
[0].bio
;
2078 bio
->bi_next
= biolist
;
2080 bio
->bi_private
= r10_bio
;
2081 bio
->bi_end_io
= end_sync_read
;
2083 bio
->bi_sector
= r10_bio
->devs
[j
].addr
+
2084 conf
->mirrors
[d
].rdev
->data_offset
;
2085 bio
->bi_bdev
= conf
->mirrors
[d
].rdev
->bdev
;
2086 atomic_inc(&conf
->mirrors
[d
].rdev
->nr_pending
);
2087 atomic_inc(&r10_bio
->remaining
);
2088 /* and we write to 'i' */
2090 for (k
=0; k
<conf
->copies
; k
++)
2091 if (r10_bio
->devs
[k
].devnum
== i
)
2093 BUG_ON(k
== conf
->copies
);
2094 bio
= r10_bio
->devs
[1].bio
;
2095 bio
->bi_next
= biolist
;
2097 bio
->bi_private
= r10_bio
;
2098 bio
->bi_end_io
= end_sync_write
;
2100 bio
->bi_sector
= r10_bio
->devs
[k
].addr
+
2101 conf
->mirrors
[i
].rdev
->data_offset
;
2102 bio
->bi_bdev
= conf
->mirrors
[i
].rdev
->bdev
;
2104 r10_bio
->devs
[0].devnum
= d
;
2105 r10_bio
->devs
[1].devnum
= i
;
2109 if (j
== conf
->copies
) {
2110 /* Cannot recover, so abort the recovery */
2113 atomic_dec(&rb2
->remaining
);
2115 if (!test_and_set_bit(MD_RECOVERY_INTR
,
2117 printk(KERN_INFO
"md/raid10:%s: insufficient "
2118 "working devices for recovery.\n",
2123 if (biolist
== NULL
) {
2125 r10bio_t
*rb2
= r10_bio
;
2126 r10_bio
= (r10bio_t
*) rb2
->master_bio
;
2127 rb2
->master_bio
= NULL
;
2133 /* resync. Schedule a read for every block at this virt offset */
2136 bitmap_cond_end_sync(mddev
->bitmap
, sector_nr
);
2138 if (!bitmap_start_sync(mddev
->bitmap
, sector_nr
,
2139 &sync_blocks
, mddev
->degraded
) &&
2140 !conf
->fullsync
&& !test_bit(MD_RECOVERY_REQUESTED
,
2141 &mddev
->recovery
)) {
2142 /* We can skip this block */
2144 return sync_blocks
+ sectors_skipped
;
2146 if (sync_blocks
< max_sync
)
2147 max_sync
= sync_blocks
;
2148 r10_bio
= mempool_alloc(conf
->r10buf_pool
, GFP_NOIO
);
2150 r10_bio
->mddev
= mddev
;
2151 atomic_set(&r10_bio
->remaining
, 0);
2152 raise_barrier(conf
, 0);
2153 conf
->next_resync
= sector_nr
;
2155 r10_bio
->master_bio
= NULL
;
2156 r10_bio
->sector
= sector_nr
;
2157 set_bit(R10BIO_IsSync
, &r10_bio
->state
);
2158 raid10_find_phys(conf
, r10_bio
);
2159 r10_bio
->sectors
= (sector_nr
| conf
->chunk_mask
) - sector_nr
+1;
2161 for (i
=0; i
<conf
->copies
; i
++) {
2162 int d
= r10_bio
->devs
[i
].devnum
;
2163 bio
= r10_bio
->devs
[i
].bio
;
2164 bio
->bi_end_io
= NULL
;
2165 clear_bit(BIO_UPTODATE
, &bio
->bi_flags
);
2166 if (conf
->mirrors
[d
].rdev
== NULL
||
2167 test_bit(Faulty
, &conf
->mirrors
[d
].rdev
->flags
))
2169 atomic_inc(&conf
->mirrors
[d
].rdev
->nr_pending
);
2170 atomic_inc(&r10_bio
->remaining
);
2171 bio
->bi_next
= biolist
;
2173 bio
->bi_private
= r10_bio
;
2174 bio
->bi_end_io
= end_sync_read
;
2176 bio
->bi_sector
= r10_bio
->devs
[i
].addr
+
2177 conf
->mirrors
[d
].rdev
->data_offset
;
2178 bio
->bi_bdev
= conf
->mirrors
[d
].rdev
->bdev
;
2183 for (i
=0; i
<conf
->copies
; i
++) {
2184 int d
= r10_bio
->devs
[i
].devnum
;
2185 if (r10_bio
->devs
[i
].bio
->bi_end_io
)
2186 rdev_dec_pending(conf
->mirrors
[d
].rdev
,
2195 for (bio
= biolist
; bio
; bio
=bio
->bi_next
) {
2197 bio
->bi_flags
&= ~(BIO_POOL_MASK
- 1);
2199 bio
->bi_flags
|= 1 << BIO_UPTODATE
;
2202 bio
->bi_phys_segments
= 0;
2207 if (sector_nr
+ max_sync
< max_sector
)
2208 max_sector
= sector_nr
+ max_sync
;
2211 int len
= PAGE_SIZE
;
2212 if (sector_nr
+ (len
>>9) > max_sector
)
2213 len
= (max_sector
- sector_nr
) << 9;
2216 for (bio
= biolist
; bio
; bio
=bio
->bi_next
) {
2218 page
= bio
->bi_io_vec
[bio
->bi_vcnt
].bv_page
;
2219 if (bio_add_page(bio
, page
, len
, 0))
2223 bio
->bi_io_vec
[bio
->bi_vcnt
].bv_page
= page
;
2224 for (bio2
= biolist
;
2225 bio2
&& bio2
!= bio
;
2226 bio2
= bio2
->bi_next
) {
2227 /* remove last page from this bio */
2229 bio2
->bi_size
-= len
;
2230 bio2
->bi_flags
&= ~(1<< BIO_SEG_VALID
);
2234 nr_sectors
+= len
>>9;
2235 sector_nr
+= len
>>9;
2236 } while (biolist
->bi_vcnt
< RESYNC_PAGES
);
2238 r10_bio
->sectors
= nr_sectors
;
2242 biolist
= biolist
->bi_next
;
2244 bio
->bi_next
= NULL
;
2245 r10_bio
= bio
->bi_private
;
2246 r10_bio
->sectors
= nr_sectors
;
2248 if (bio
->bi_end_io
== end_sync_read
) {
2249 md_sync_acct(bio
->bi_bdev
, nr_sectors
);
2250 generic_make_request(bio
);
2254 if (sectors_skipped
)
2255 /* pretend they weren't skipped, it makes
2256 * no important difference in this case
2258 md_done_sync(mddev
, sectors_skipped
, 1);
2260 return sectors_skipped
+ nr_sectors
;
2262 /* There is nowhere to write, so all non-sync
2263 * drives must be failed, so try the next chunk...
2265 if (sector_nr
+ max_sync
< max_sector
)
2266 max_sector
= sector_nr
+ max_sync
;
2268 sectors_skipped
+= (max_sector
- sector_nr
);
2270 sector_nr
= max_sector
;
2275 raid10_size(mddev_t
*mddev
, sector_t sectors
, int raid_disks
)
2278 conf_t
*conf
= mddev
->private;
2281 raid_disks
= conf
->raid_disks
;
2283 sectors
= conf
->dev_sectors
;
2285 size
= sectors
>> conf
->chunk_shift
;
2286 sector_div(size
, conf
->far_copies
);
2287 size
= size
* raid_disks
;
2288 sector_div(size
, conf
->near_copies
);
2290 return size
<< conf
->chunk_shift
;
2294 static conf_t
*setup_conf(mddev_t
*mddev
)
2296 conf_t
*conf
= NULL
;
2298 sector_t stride
, size
;
2301 if (mddev
->new_chunk_sectors
< (PAGE_SIZE
>> 9) ||
2302 !is_power_of_2(mddev
->new_chunk_sectors
)) {
2303 printk(KERN_ERR
"md/raid10:%s: chunk size must be "
2304 "at least PAGE_SIZE(%ld) and be a power of 2.\n",
2305 mdname(mddev
), PAGE_SIZE
);
2309 nc
= mddev
->new_layout
& 255;
2310 fc
= (mddev
->new_layout
>> 8) & 255;
2311 fo
= mddev
->new_layout
& (1<<16);
2313 if ((nc
*fc
) <2 || (nc
*fc
) > mddev
->raid_disks
||
2314 (mddev
->new_layout
>> 17)) {
2315 printk(KERN_ERR
"md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
2316 mdname(mddev
), mddev
->new_layout
);
2321 conf
= kzalloc(sizeof(conf_t
), GFP_KERNEL
);
2325 conf
->mirrors
= kzalloc(sizeof(struct mirror_info
)*mddev
->raid_disks
,
2330 conf
->tmppage
= alloc_page(GFP_KERNEL
);
2335 conf
->raid_disks
= mddev
->raid_disks
;
2336 conf
->near_copies
= nc
;
2337 conf
->far_copies
= fc
;
2338 conf
->copies
= nc
*fc
;
2339 conf
->far_offset
= fo
;
2340 conf
->chunk_mask
= mddev
->new_chunk_sectors
- 1;
2341 conf
->chunk_shift
= ffz(~mddev
->new_chunk_sectors
);
2343 conf
->r10bio_pool
= mempool_create(NR_RAID10_BIOS
, r10bio_pool_alloc
,
2344 r10bio_pool_free
, conf
);
2345 if (!conf
->r10bio_pool
)
2348 size
= mddev
->dev_sectors
>> conf
->chunk_shift
;
2349 sector_div(size
, fc
);
2350 size
= size
* conf
->raid_disks
;
2351 sector_div(size
, nc
);
2352 /* 'size' is now the number of chunks in the array */
2353 /* calculate "used chunks per device" in 'stride' */
2354 stride
= size
* conf
->copies
;
2356 /* We need to round up when dividing by raid_disks to
2357 * get the stride size.
2359 stride
+= conf
->raid_disks
- 1;
2360 sector_div(stride
, conf
->raid_disks
);
2362 conf
->dev_sectors
= stride
<< conf
->chunk_shift
;
2367 sector_div(stride
, fc
);
2368 conf
->stride
= stride
<< conf
->chunk_shift
;
2371 spin_lock_init(&conf
->device_lock
);
2372 INIT_LIST_HEAD(&conf
->retry_list
);
2374 spin_lock_init(&conf
->resync_lock
);
2375 init_waitqueue_head(&conf
->wait_barrier
);
2377 conf
->thread
= md_register_thread(raid10d
, mddev
, NULL
);
2381 conf
->mddev
= mddev
;
2385 printk(KERN_ERR
"md/raid10:%s: couldn't allocate memory.\n",
2388 if (conf
->r10bio_pool
)
2389 mempool_destroy(conf
->r10bio_pool
);
2390 kfree(conf
->mirrors
);
2391 safe_put_page(conf
->tmppage
);
2394 return ERR_PTR(err
);
2397 static int run(mddev_t
*mddev
)
2400 int i
, disk_idx
, chunk_size
;
2401 mirror_info_t
*disk
;
2406 * copy the already verified devices into our private RAID10
2407 * bookkeeping area. [whatever we allocate in run(),
2408 * should be freed in stop()]
2411 if (mddev
->private == NULL
) {
2412 conf
= setup_conf(mddev
);
2414 return PTR_ERR(conf
);
2415 mddev
->private = conf
;
2417 conf
= mddev
->private;
2421 mddev
->thread
= conf
->thread
;
2422 conf
->thread
= NULL
;
2424 chunk_size
= mddev
->chunk_sectors
<< 9;
2425 blk_queue_io_min(mddev
->queue
, chunk_size
);
2426 if (conf
->raid_disks
% conf
->near_copies
)
2427 blk_queue_io_opt(mddev
->queue
, chunk_size
* conf
->raid_disks
);
2429 blk_queue_io_opt(mddev
->queue
, chunk_size
*
2430 (conf
->raid_disks
/ conf
->near_copies
));
2432 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
2434 if (rdev
->badblocks
.count
) {
2435 printk(KERN_ERR
"md/raid10: cannot handle bad blocks yet\n");
2438 disk_idx
= rdev
->raid_disk
;
2439 if (disk_idx
>= conf
->raid_disks
2442 disk
= conf
->mirrors
+ disk_idx
;
2445 disk_stack_limits(mddev
->gendisk
, rdev
->bdev
,
2446 rdev
->data_offset
<< 9);
2447 /* as we don't honour merge_bvec_fn, we must never risk
2448 * violating it, so limit max_segments to 1 lying
2449 * within a single page.
2451 if (rdev
->bdev
->bd_disk
->queue
->merge_bvec_fn
) {
2452 blk_queue_max_segments(mddev
->queue
, 1);
2453 blk_queue_segment_boundary(mddev
->queue
,
2454 PAGE_CACHE_SIZE
- 1);
2457 disk
->head_position
= 0;
2459 /* need to check that every block has at least one working mirror */
2460 if (!enough(conf
, -1)) {
2461 printk(KERN_ERR
"md/raid10:%s: not enough operational mirrors.\n",
2466 mddev
->degraded
= 0;
2467 for (i
= 0; i
< conf
->raid_disks
; i
++) {
2469 disk
= conf
->mirrors
+ i
;
2472 !test_bit(In_sync
, &disk
->rdev
->flags
)) {
2473 disk
->head_position
= 0;
2480 if (mddev
->recovery_cp
!= MaxSector
)
2481 printk(KERN_NOTICE
"md/raid10:%s: not clean"
2482 " -- starting background reconstruction\n",
2485 "md/raid10:%s: active with %d out of %d devices\n",
2486 mdname(mddev
), conf
->raid_disks
- mddev
->degraded
,
2489 * Ok, everything is just fine now
2491 mddev
->dev_sectors
= conf
->dev_sectors
;
2492 size
= raid10_size(mddev
, 0, 0);
2493 md_set_array_sectors(mddev
, size
);
2494 mddev
->resync_max_sectors
= size
;
2496 mddev
->queue
->backing_dev_info
.congested_fn
= raid10_congested
;
2497 mddev
->queue
->backing_dev_info
.congested_data
= mddev
;
2499 /* Calculate max read-ahead size.
2500 * We need to readahead at least twice a whole stripe....
2504 int stripe
= conf
->raid_disks
*
2505 ((mddev
->chunk_sectors
<< 9) / PAGE_SIZE
);
2506 stripe
/= conf
->near_copies
;
2507 if (mddev
->queue
->backing_dev_info
.ra_pages
< 2* stripe
)
2508 mddev
->queue
->backing_dev_info
.ra_pages
= 2* stripe
;
2511 if (conf
->near_copies
< conf
->raid_disks
)
2512 blk_queue_merge_bvec(mddev
->queue
, raid10_mergeable_bvec
);
2514 if (md_integrity_register(mddev
))
2520 md_unregister_thread(mddev
->thread
);
2521 if (conf
->r10bio_pool
)
2522 mempool_destroy(conf
->r10bio_pool
);
2523 safe_put_page(conf
->tmppage
);
2524 kfree(conf
->mirrors
);
2526 mddev
->private = NULL
;
2531 static int stop(mddev_t
*mddev
)
2533 conf_t
*conf
= mddev
->private;
2535 raise_barrier(conf
, 0);
2536 lower_barrier(conf
);
2538 md_unregister_thread(mddev
->thread
);
2539 mddev
->thread
= NULL
;
2540 blk_sync_queue(mddev
->queue
); /* the unplug fn references 'conf'*/
2541 if (conf
->r10bio_pool
)
2542 mempool_destroy(conf
->r10bio_pool
);
2543 kfree(conf
->mirrors
);
2545 mddev
->private = NULL
;
2549 static void raid10_quiesce(mddev_t
*mddev
, int state
)
2551 conf_t
*conf
= mddev
->private;
2555 raise_barrier(conf
, 0);
2558 lower_barrier(conf
);
2563 static void *raid10_takeover_raid0(mddev_t
*mddev
)
2568 if (mddev
->degraded
> 0) {
2569 printk(KERN_ERR
"md/raid10:%s: Error: degraded raid0!\n",
2571 return ERR_PTR(-EINVAL
);
2574 /* Set new parameters */
2575 mddev
->new_level
= 10;
2576 /* new layout: far_copies = 1, near_copies = 2 */
2577 mddev
->new_layout
= (1<<8) + 2;
2578 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
2579 mddev
->delta_disks
= mddev
->raid_disks
;
2580 mddev
->raid_disks
*= 2;
2581 /* make sure it will be not marked as dirty */
2582 mddev
->recovery_cp
= MaxSector
;
2584 conf
= setup_conf(mddev
);
2585 if (!IS_ERR(conf
)) {
2586 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
2587 if (rdev
->raid_disk
>= 0)
2588 rdev
->new_raid_disk
= rdev
->raid_disk
* 2;
2595 static void *raid10_takeover(mddev_t
*mddev
)
2597 struct raid0_private_data
*raid0_priv
;
2599 /* raid10 can take over:
2600 * raid0 - providing it has only two drives
2602 if (mddev
->level
== 0) {
2603 /* for raid0 takeover only one zone is supported */
2604 raid0_priv
= mddev
->private;
2605 if (raid0_priv
->nr_strip_zones
> 1) {
2606 printk(KERN_ERR
"md/raid10:%s: cannot takeover raid 0"
2607 " with more than one zone.\n",
2609 return ERR_PTR(-EINVAL
);
2611 return raid10_takeover_raid0(mddev
);
2613 return ERR_PTR(-EINVAL
);
2616 static struct mdk_personality raid10_personality
=
2620 .owner
= THIS_MODULE
,
2621 .make_request
= make_request
,
2625 .error_handler
= error
,
2626 .hot_add_disk
= raid10_add_disk
,
2627 .hot_remove_disk
= raid10_remove_disk
,
2628 .spare_active
= raid10_spare_active
,
2629 .sync_request
= sync_request
,
2630 .quiesce
= raid10_quiesce
,
2631 .size
= raid10_size
,
2632 .takeover
= raid10_takeover
,
2635 static int __init
raid_init(void)
2637 return register_md_personality(&raid10_personality
);
2640 static void raid_exit(void)
2642 unregister_md_personality(&raid10_personality
);
2645 module_init(raid_init
);
2646 module_exit(raid_exit
);
2647 MODULE_LICENSE("GPL");
2648 MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
2649 MODULE_ALIAS("md-personality-9"); /* RAID10 */
2650 MODULE_ALIAS("md-raid10");
2651 MODULE_ALIAS("md-level-10");