2 * raid10.c : Multiple Devices driver for Linux
4 * Copyright (C) 2000-2004 Neil Brown
6 * RAID-10 support for md.
8 * Base on code in raid1.c. See raid1.c for further copyright information.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 #include <linux/slab.h>
22 #include <linux/delay.h>
23 #include <linux/blkdev.h>
24 #include <linux/seq_file.h>
31 * RAID10 provides a combination of RAID0 and RAID1 functionality.
32 * The layout of data is defined by
35 * near_copies (stored in low byte of layout)
36 * far_copies (stored in second byte of layout)
37 * far_offset (stored in bit 16 of layout )
39 * The data to be stored is divided into chunks using chunksize.
40 * Each device is divided into far_copies sections.
41 * In each section, chunks are laid out in a style similar to raid0, but
42 * near_copies copies of each chunk is stored (each on a different drive).
43 * The starting device for each section is offset near_copies from the starting
44 * device of the previous section.
45 * Thus they are (near_copies*far_copies) of each chunk, and each is on a different
47 * near_copies and far_copies must be at least one, and their product is at most
50 * If far_offset is true, then the far_copies are handled a bit differently.
51 * The copies are still in different stripes, but instead of be very far apart
52 * on disk, there are adjacent stripes.
56 * Number of guaranteed r10bios in case of extreme VM load:
58 #define NR_RAID10_BIOS 256
60 static void allow_barrier(conf_t
*conf
);
61 static void lower_barrier(conf_t
*conf
);
63 static void * r10bio_pool_alloc(gfp_t gfp_flags
, void *data
)
66 int size
= offsetof(struct r10bio_s
, devs
[conf
->copies
]);
68 /* allocate a r10bio with room for raid_disks entries in the bios array */
69 return kzalloc(size
, gfp_flags
);
72 static void r10bio_pool_free(void *r10_bio
, void *data
)
77 /* Maximum size of each resync request */
78 #define RESYNC_BLOCK_SIZE (64*1024)
79 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
80 /* amount of memory to reserve for resync requests */
81 #define RESYNC_WINDOW (1024*1024)
82 /* maximum number of concurrent requests, memory permitting */
83 #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
86 * When performing a resync, we need to read and compare, so
87 * we need as many pages are there are copies.
88 * When performing a recovery, we need 2 bios, one for read,
89 * one for write (we recover only one drive per r10buf)
92 static void * r10buf_pool_alloc(gfp_t gfp_flags
, void *data
)
101 r10_bio
= r10bio_pool_alloc(gfp_flags
, conf
);
105 if (test_bit(MD_RECOVERY_SYNC
, &conf
->mddev
->recovery
))
106 nalloc
= conf
->copies
; /* resync */
108 nalloc
= 2; /* recovery */
113 for (j
= nalloc
; j
-- ; ) {
114 bio
= bio_kmalloc(gfp_flags
, RESYNC_PAGES
);
117 r10_bio
->devs
[j
].bio
= bio
;
120 * Allocate RESYNC_PAGES data pages and attach them
123 for (j
= 0 ; j
< nalloc
; j
++) {
124 bio
= r10_bio
->devs
[j
].bio
;
125 for (i
= 0; i
< RESYNC_PAGES
; i
++) {
126 page
= alloc_page(gfp_flags
);
130 bio
->bi_io_vec
[i
].bv_page
= page
;
138 safe_put_page(bio
->bi_io_vec
[i
-1].bv_page
);
140 for (i
= 0; i
< RESYNC_PAGES
; i
++)
141 safe_put_page(r10_bio
->devs
[j
].bio
->bi_io_vec
[i
].bv_page
);
144 while ( ++j
< nalloc
)
145 bio_put(r10_bio
->devs
[j
].bio
);
146 r10bio_pool_free(r10_bio
, conf
);
150 static void r10buf_pool_free(void *__r10_bio
, void *data
)
154 r10bio_t
*r10bio
= __r10_bio
;
157 for (j
=0; j
< conf
->copies
; j
++) {
158 struct bio
*bio
= r10bio
->devs
[j
].bio
;
160 for (i
= 0; i
< RESYNC_PAGES
; i
++) {
161 safe_put_page(bio
->bi_io_vec
[i
].bv_page
);
162 bio
->bi_io_vec
[i
].bv_page
= NULL
;
167 r10bio_pool_free(r10bio
, conf
);
170 static void put_all_bios(conf_t
*conf
, r10bio_t
*r10_bio
)
174 for (i
= 0; i
< conf
->copies
; i
++) {
175 struct bio
**bio
= & r10_bio
->devs
[i
].bio
;
176 if (*bio
&& *bio
!= IO_BLOCKED
)
182 static void free_r10bio(r10bio_t
*r10_bio
)
184 conf_t
*conf
= r10_bio
->mddev
->private;
187 * Wake up any possible resync thread that waits for the device
192 put_all_bios(conf
, r10_bio
);
193 mempool_free(r10_bio
, conf
->r10bio_pool
);
196 static void put_buf(r10bio_t
*r10_bio
)
198 conf_t
*conf
= r10_bio
->mddev
->private;
200 mempool_free(r10_bio
, conf
->r10buf_pool
);
205 static void reschedule_retry(r10bio_t
*r10_bio
)
208 mddev_t
*mddev
= r10_bio
->mddev
;
209 conf_t
*conf
= mddev
->private;
211 spin_lock_irqsave(&conf
->device_lock
, flags
);
212 list_add(&r10_bio
->retry_list
, &conf
->retry_list
);
214 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
216 /* wake up frozen array... */
217 wake_up(&conf
->wait_barrier
);
219 md_wakeup_thread(mddev
->thread
);
223 * raid_end_bio_io() is called when we have finished servicing a mirrored
224 * operation and are ready to return a success/failure code to the buffer
227 static void raid_end_bio_io(r10bio_t
*r10_bio
)
229 struct bio
*bio
= r10_bio
->master_bio
;
232 test_bit(R10BIO_Uptodate
, &r10_bio
->state
) ? 0 : -EIO
);
233 free_r10bio(r10_bio
);
237 * Update disk head position estimator based on IRQ completion info.
239 static inline void update_head_pos(int slot
, r10bio_t
*r10_bio
)
241 conf_t
*conf
= r10_bio
->mddev
->private;
243 conf
->mirrors
[r10_bio
->devs
[slot
].devnum
].head_position
=
244 r10_bio
->devs
[slot
].addr
+ (r10_bio
->sectors
);
247 static void raid10_end_read_request(struct bio
*bio
, int error
)
249 int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
250 r10bio_t
*r10_bio
= bio
->bi_private
;
252 conf_t
*conf
= r10_bio
->mddev
->private;
255 slot
= r10_bio
->read_slot
;
256 dev
= r10_bio
->devs
[slot
].devnum
;
258 * this branch is our 'one mirror IO has finished' event handler:
260 update_head_pos(slot
, r10_bio
);
264 * Set R10BIO_Uptodate in our master bio, so that
265 * we will return a good error code to the higher
266 * levels even if IO on some other mirrored buffer fails.
268 * The 'master' represents the composite IO operation to
269 * user-side. So if something waits for IO, then it will
270 * wait for the 'master' bio.
272 set_bit(R10BIO_Uptodate
, &r10_bio
->state
);
273 raid_end_bio_io(r10_bio
);
278 char b
[BDEVNAME_SIZE
];
279 if (printk_ratelimit())
280 printk(KERN_ERR
"md/raid10:%s: %s: rescheduling sector %llu\n",
282 bdevname(conf
->mirrors
[dev
].rdev
->bdev
,b
), (unsigned long long)r10_bio
->sector
);
283 reschedule_retry(r10_bio
);
286 rdev_dec_pending(conf
->mirrors
[dev
].rdev
, conf
->mddev
);
289 static void raid10_end_write_request(struct bio
*bio
, int error
)
291 int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
292 r10bio_t
*r10_bio
= bio
->bi_private
;
294 conf_t
*conf
= r10_bio
->mddev
->private;
296 for (slot
= 0; slot
< conf
->copies
; slot
++)
297 if (r10_bio
->devs
[slot
].bio
== bio
)
299 dev
= r10_bio
->devs
[slot
].devnum
;
302 * this branch is our 'one mirror IO has finished' event handler:
305 md_error(r10_bio
->mddev
, conf
->mirrors
[dev
].rdev
);
306 /* an I/O failed, we can't clear the bitmap */
307 set_bit(R10BIO_Degraded
, &r10_bio
->state
);
310 * Set R10BIO_Uptodate in our master bio, so that
311 * we will return a good error code for to the higher
312 * levels even if IO on some other mirrored buffer fails.
314 * The 'master' represents the composite IO operation to
315 * user-side. So if something waits for IO, then it will
316 * wait for the 'master' bio.
318 set_bit(R10BIO_Uptodate
, &r10_bio
->state
);
320 update_head_pos(slot
, r10_bio
);
324 * Let's see if all mirrored write operations have finished
327 if (atomic_dec_and_test(&r10_bio
->remaining
)) {
328 /* clear the bitmap if all writes complete successfully */
329 bitmap_endwrite(r10_bio
->mddev
->bitmap
, r10_bio
->sector
,
331 !test_bit(R10BIO_Degraded
, &r10_bio
->state
),
333 md_write_end(r10_bio
->mddev
);
334 raid_end_bio_io(r10_bio
);
337 rdev_dec_pending(conf
->mirrors
[dev
].rdev
, conf
->mddev
);
342 * RAID10 layout manager
343 * As well as the chunksize and raid_disks count, there are two
344 * parameters: near_copies and far_copies.
345 * near_copies * far_copies must be <= raid_disks.
346 * Normally one of these will be 1.
347 * If both are 1, we get raid0.
348 * If near_copies == raid_disks, we get raid1.
350 * Chunks are laid out in raid0 style with near_copies copies of the
351 * first chunk, followed by near_copies copies of the next chunk and
353 * If far_copies > 1, then after 1/far_copies of the array has been assigned
354 * as described above, we start again with a device offset of near_copies.
355 * So we effectively have another copy of the whole array further down all
356 * the drives, but with blocks on different drives.
357 * With this layout, and block is never stored twice on the one device.
359 * raid10_find_phys finds the sector offset of a given virtual sector
360 * on each device that it is on.
362 * raid10_find_virt does the reverse mapping, from a device and a
363 * sector offset to a virtual address
366 static void raid10_find_phys(conf_t
*conf
, r10bio_t
*r10bio
)
376 /* now calculate first sector/dev */
377 chunk
= r10bio
->sector
>> conf
->chunk_shift
;
378 sector
= r10bio
->sector
& conf
->chunk_mask
;
380 chunk
*= conf
->near_copies
;
382 dev
= sector_div(stripe
, conf
->raid_disks
);
383 if (conf
->far_offset
)
384 stripe
*= conf
->far_copies
;
386 sector
+= stripe
<< conf
->chunk_shift
;
388 /* and calculate all the others */
389 for (n
=0; n
< conf
->near_copies
; n
++) {
392 r10bio
->devs
[slot
].addr
= sector
;
393 r10bio
->devs
[slot
].devnum
= d
;
396 for (f
= 1; f
< conf
->far_copies
; f
++) {
397 d
+= conf
->near_copies
;
398 if (d
>= conf
->raid_disks
)
399 d
-= conf
->raid_disks
;
401 r10bio
->devs
[slot
].devnum
= d
;
402 r10bio
->devs
[slot
].addr
= s
;
406 if (dev
>= conf
->raid_disks
) {
408 sector
+= (conf
->chunk_mask
+ 1);
411 BUG_ON(slot
!= conf
->copies
);
414 static sector_t
raid10_find_virt(conf_t
*conf
, sector_t sector
, int dev
)
416 sector_t offset
, chunk
, vchunk
;
418 offset
= sector
& conf
->chunk_mask
;
419 if (conf
->far_offset
) {
421 chunk
= sector
>> conf
->chunk_shift
;
422 fc
= sector_div(chunk
, conf
->far_copies
);
423 dev
-= fc
* conf
->near_copies
;
425 dev
+= conf
->raid_disks
;
427 while (sector
>= conf
->stride
) {
428 sector
-= conf
->stride
;
429 if (dev
< conf
->near_copies
)
430 dev
+= conf
->raid_disks
- conf
->near_copies
;
432 dev
-= conf
->near_copies
;
434 chunk
= sector
>> conf
->chunk_shift
;
436 vchunk
= chunk
* conf
->raid_disks
+ dev
;
437 sector_div(vchunk
, conf
->near_copies
);
438 return (vchunk
<< conf
->chunk_shift
) + offset
;
442 * raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
444 * @bvm: properties of new bio
445 * @biovec: the request that could be merged to it.
447 * Return amount of bytes we can accept at this offset
448 * If near_copies == raid_disk, there are no striping issues,
449 * but in that case, the function isn't called at all.
451 static int raid10_mergeable_bvec(struct request_queue
*q
,
452 struct bvec_merge_data
*bvm
,
453 struct bio_vec
*biovec
)
455 mddev_t
*mddev
= q
->queuedata
;
456 sector_t sector
= bvm
->bi_sector
+ get_start_sect(bvm
->bi_bdev
);
458 unsigned int chunk_sectors
= mddev
->chunk_sectors
;
459 unsigned int bio_sectors
= bvm
->bi_size
>> 9;
461 max
= (chunk_sectors
- ((sector
& (chunk_sectors
- 1)) + bio_sectors
)) << 9;
462 if (max
< 0) max
= 0; /* bio_add cannot handle a negative return */
463 if (max
<= biovec
->bv_len
&& bio_sectors
== 0)
464 return biovec
->bv_len
;
470 * This routine returns the disk from which the requested read should
471 * be done. There is a per-array 'next expected sequential IO' sector
472 * number - if this matches on the next IO then we use the last disk.
473 * There is also a per-disk 'last know head position' sector that is
474 * maintained from IRQ contexts, both the normal and the resync IO
475 * completion handlers update this position correctly. If there is no
476 * perfect sequential match then we pick the disk whose head is closest.
478 * If there are 2 mirrors in the same 2 devices, performance degrades
479 * because position is mirror, not device based.
481 * The rdev for the device selected will have nr_pending incremented.
485 * FIXME: possibly should rethink readbalancing and do it differently
486 * depending on near_copies / far_copies geometry.
488 static int read_balance(conf_t
*conf
, r10bio_t
*r10_bio
)
490 const sector_t this_sector
= r10_bio
->sector
;
492 const int sectors
= r10_bio
->sectors
;
493 sector_t new_distance
, best_dist
;
498 raid10_find_phys(conf
, r10_bio
);
502 best_dist
= MaxSector
;
505 * Check if we can balance. We can balance on the whole
506 * device if no resync is going on (recovery is ok), or below
507 * the resync window. We take the first readable disk when
508 * above the resync window.
510 if (conf
->mddev
->recovery_cp
< MaxSector
511 && (this_sector
+ sectors
>= conf
->next_resync
))
514 for (slot
= 0; slot
< conf
->copies
; slot
++) {
515 if (r10_bio
->devs
[slot
].bio
== IO_BLOCKED
)
517 disk
= r10_bio
->devs
[slot
].devnum
;
518 rdev
= rcu_dereference(conf
->mirrors
[disk
].rdev
);
521 if (!test_bit(In_sync
, &rdev
->flags
))
527 /* This optimisation is debatable, and completely destroys
528 * sequential read speed for 'far copies' arrays. So only
529 * keep it for 'near' arrays, and review those later.
531 if (conf
->near_copies
> 1 && !atomic_read(&rdev
->nr_pending
))
534 /* for far > 1 always use the lowest address */
535 if (conf
->far_copies
> 1)
536 new_distance
= r10_bio
->devs
[slot
].addr
;
538 new_distance
= abs(r10_bio
->devs
[slot
].addr
-
539 conf
->mirrors
[disk
].head_position
);
540 if (new_distance
< best_dist
) {
541 best_dist
= new_distance
;
545 if (slot
== conf
->copies
)
549 disk
= r10_bio
->devs
[slot
].devnum
;
550 rdev
= rcu_dereference(conf
->mirrors
[disk
].rdev
);
553 atomic_inc(&rdev
->nr_pending
);
554 if (test_bit(Faulty
, &rdev
->flags
)) {
555 /* Cannot risk returning a device that failed
556 * before we inc'ed nr_pending
558 rdev_dec_pending(rdev
, conf
->mddev
);
561 r10_bio
->read_slot
= slot
;
569 static int raid10_congested(void *data
, int bits
)
571 mddev_t
*mddev
= data
;
572 conf_t
*conf
= mddev
->private;
575 if (mddev_congested(mddev
, bits
))
578 for (i
= 0; i
< conf
->raid_disks
&& ret
== 0; i
++) {
579 mdk_rdev_t
*rdev
= rcu_dereference(conf
->mirrors
[i
].rdev
);
580 if (rdev
&& !test_bit(Faulty
, &rdev
->flags
)) {
581 struct request_queue
*q
= bdev_get_queue(rdev
->bdev
);
583 ret
|= bdi_congested(&q
->backing_dev_info
, bits
);
590 static void flush_pending_writes(conf_t
*conf
)
592 /* Any writes that have been queued but are awaiting
593 * bitmap updates get flushed here.
595 spin_lock_irq(&conf
->device_lock
);
597 if (conf
->pending_bio_list
.head
) {
599 bio
= bio_list_get(&conf
->pending_bio_list
);
600 spin_unlock_irq(&conf
->device_lock
);
601 /* flush any pending bitmap writes to disk
602 * before proceeding w/ I/O */
603 bitmap_unplug(conf
->mddev
->bitmap
);
605 while (bio
) { /* submit pending writes */
606 struct bio
*next
= bio
->bi_next
;
608 generic_make_request(bio
);
612 spin_unlock_irq(&conf
->device_lock
);
616 * Sometimes we need to suspend IO while we do something else,
617 * either some resync/recovery, or reconfigure the array.
618 * To do this we raise a 'barrier'.
619 * The 'barrier' is a counter that can be raised multiple times
620 * to count how many activities are happening which preclude
622 * We can only raise the barrier if there is no pending IO.
623 * i.e. if nr_pending == 0.
624 * We choose only to raise the barrier if no-one is waiting for the
625 * barrier to go down. This means that as soon as an IO request
626 * is ready, no other operations which require a barrier will start
627 * until the IO request has had a chance.
629 * So: regular IO calls 'wait_barrier'. When that returns there
630 * is no backgroup IO happening, It must arrange to call
631 * allow_barrier when it has finished its IO.
632 * backgroup IO calls must call raise_barrier. Once that returns
633 * there is no normal IO happeing. It must arrange to call
634 * lower_barrier when the particular background IO completes.
637 static void raise_barrier(conf_t
*conf
, int force
)
639 BUG_ON(force
&& !conf
->barrier
);
640 spin_lock_irq(&conf
->resync_lock
);
642 /* Wait until no block IO is waiting (unless 'force') */
643 wait_event_lock_irq(conf
->wait_barrier
, force
|| !conf
->nr_waiting
,
644 conf
->resync_lock
, );
646 /* block any new IO from starting */
649 /* Now wait for all pending IO to complete */
650 wait_event_lock_irq(conf
->wait_barrier
,
651 !conf
->nr_pending
&& conf
->barrier
< RESYNC_DEPTH
,
652 conf
->resync_lock
, );
654 spin_unlock_irq(&conf
->resync_lock
);
657 static void lower_barrier(conf_t
*conf
)
660 spin_lock_irqsave(&conf
->resync_lock
, flags
);
662 spin_unlock_irqrestore(&conf
->resync_lock
, flags
);
663 wake_up(&conf
->wait_barrier
);
666 static void wait_barrier(conf_t
*conf
)
668 spin_lock_irq(&conf
->resync_lock
);
671 wait_event_lock_irq(conf
->wait_barrier
, !conf
->barrier
,
677 spin_unlock_irq(&conf
->resync_lock
);
680 static void allow_barrier(conf_t
*conf
)
683 spin_lock_irqsave(&conf
->resync_lock
, flags
);
685 spin_unlock_irqrestore(&conf
->resync_lock
, flags
);
686 wake_up(&conf
->wait_barrier
);
689 static void freeze_array(conf_t
*conf
)
691 /* stop syncio and normal IO and wait for everything to
693 * We increment barrier and nr_waiting, and then
694 * wait until nr_pending match nr_queued+1
695 * This is called in the context of one normal IO request
696 * that has failed. Thus any sync request that might be pending
697 * will be blocked by nr_pending, and we need to wait for
698 * pending IO requests to complete or be queued for re-try.
699 * Thus the number queued (nr_queued) plus this request (1)
700 * must match the number of pending IOs (nr_pending) before
703 spin_lock_irq(&conf
->resync_lock
);
706 wait_event_lock_irq(conf
->wait_barrier
,
707 conf
->nr_pending
== conf
->nr_queued
+1,
709 flush_pending_writes(conf
));
711 spin_unlock_irq(&conf
->resync_lock
);
714 static void unfreeze_array(conf_t
*conf
)
716 /* reverse the effect of the freeze */
717 spin_lock_irq(&conf
->resync_lock
);
720 wake_up(&conf
->wait_barrier
);
721 spin_unlock_irq(&conf
->resync_lock
);
724 static int make_request(mddev_t
*mddev
, struct bio
* bio
)
726 conf_t
*conf
= mddev
->private;
727 mirror_info_t
*mirror
;
729 struct bio
*read_bio
;
731 int chunk_sects
= conf
->chunk_mask
+ 1;
732 const int rw
= bio_data_dir(bio
);
733 const unsigned long do_sync
= (bio
->bi_rw
& REQ_SYNC
);
734 const unsigned long do_fua
= (bio
->bi_rw
& REQ_FUA
);
736 mdk_rdev_t
*blocked_rdev
;
739 if (unlikely(bio
->bi_rw
& REQ_FLUSH
)) {
740 md_flush_request(mddev
, bio
);
744 /* If this request crosses a chunk boundary, we need to
745 * split it. This will only happen for 1 PAGE (or less) requests.
747 if (unlikely( (bio
->bi_sector
& conf
->chunk_mask
) + (bio
->bi_size
>> 9)
749 conf
->near_copies
< conf
->raid_disks
)) {
751 /* Sanity check -- queue functions should prevent this happening */
752 if (bio
->bi_vcnt
!= 1 ||
755 /* This is a one page bio that upper layers
756 * refuse to split for us, so we need to split it.
759 chunk_sects
- (bio
->bi_sector
& (chunk_sects
- 1)) );
761 /* Each of these 'make_request' calls will call 'wait_barrier'.
762 * If the first succeeds but the second blocks due to the resync
763 * thread raising the barrier, we will deadlock because the
764 * IO to the underlying device will be queued in generic_make_request
765 * and will never complete, so will never reduce nr_pending.
766 * So increment nr_waiting here so no new raise_barriers will
767 * succeed, and so the second wait_barrier cannot block.
769 spin_lock_irq(&conf
->resync_lock
);
771 spin_unlock_irq(&conf
->resync_lock
);
773 if (make_request(mddev
, &bp
->bio1
))
774 generic_make_request(&bp
->bio1
);
775 if (make_request(mddev
, &bp
->bio2
))
776 generic_make_request(&bp
->bio2
);
778 spin_lock_irq(&conf
->resync_lock
);
780 wake_up(&conf
->wait_barrier
);
781 spin_unlock_irq(&conf
->resync_lock
);
783 bio_pair_release(bp
);
786 printk("md/raid10:%s: make_request bug: can't convert block across chunks"
787 " or bigger than %dk %llu %d\n", mdname(mddev
), chunk_sects
/2,
788 (unsigned long long)bio
->bi_sector
, bio
->bi_size
>> 10);
794 md_write_start(mddev
, bio
);
797 * Register the new request and wait if the reconstruction
798 * thread has put up a bar for new requests.
799 * Continue immediately if no resync is active currently.
803 r10_bio
= mempool_alloc(conf
->r10bio_pool
, GFP_NOIO
);
805 r10_bio
->master_bio
= bio
;
806 r10_bio
->sectors
= bio
->bi_size
>> 9;
808 r10_bio
->mddev
= mddev
;
809 r10_bio
->sector
= bio
->bi_sector
;
814 * read balancing logic:
816 int disk
= read_balance(conf
, r10_bio
);
817 int slot
= r10_bio
->read_slot
;
819 raid_end_bio_io(r10_bio
);
822 mirror
= conf
->mirrors
+ disk
;
824 read_bio
= bio_clone_mddev(bio
, GFP_NOIO
, mddev
);
826 r10_bio
->devs
[slot
].bio
= read_bio
;
828 read_bio
->bi_sector
= r10_bio
->devs
[slot
].addr
+
829 mirror
->rdev
->data_offset
;
830 read_bio
->bi_bdev
= mirror
->rdev
->bdev
;
831 read_bio
->bi_end_io
= raid10_end_read_request
;
832 read_bio
->bi_rw
= READ
| do_sync
;
833 read_bio
->bi_private
= r10_bio
;
835 generic_make_request(read_bio
);
842 /* first select target devices under rcu_lock and
843 * inc refcount on their rdev. Record them by setting
846 plugged
= mddev_check_plugged(mddev
);
848 raid10_find_phys(conf
, r10_bio
);
852 for (i
= 0; i
< conf
->copies
; i
++) {
853 int d
= r10_bio
->devs
[i
].devnum
;
854 mdk_rdev_t
*rdev
= rcu_dereference(conf
->mirrors
[d
].rdev
);
855 if (rdev
&& unlikely(test_bit(Blocked
, &rdev
->flags
))) {
856 atomic_inc(&rdev
->nr_pending
);
860 if (rdev
&& !test_bit(Faulty
, &rdev
->flags
)) {
861 atomic_inc(&rdev
->nr_pending
);
862 r10_bio
->devs
[i
].bio
= bio
;
864 r10_bio
->devs
[i
].bio
= NULL
;
865 set_bit(R10BIO_Degraded
, &r10_bio
->state
);
870 if (unlikely(blocked_rdev
)) {
871 /* Have to wait for this device to get unblocked, then retry */
875 for (j
= 0; j
< i
; j
++)
876 if (r10_bio
->devs
[j
].bio
) {
877 d
= r10_bio
->devs
[j
].devnum
;
878 rdev_dec_pending(conf
->mirrors
[d
].rdev
, mddev
);
881 md_wait_for_blocked_rdev(blocked_rdev
, mddev
);
886 atomic_set(&r10_bio
->remaining
, 1);
887 bitmap_startwrite(mddev
->bitmap
, bio
->bi_sector
, r10_bio
->sectors
, 0);
889 for (i
= 0; i
< conf
->copies
; i
++) {
891 int d
= r10_bio
->devs
[i
].devnum
;
892 if (!r10_bio
->devs
[i
].bio
)
895 mbio
= bio_clone_mddev(bio
, GFP_NOIO
, mddev
);
896 r10_bio
->devs
[i
].bio
= mbio
;
898 mbio
->bi_sector
= r10_bio
->devs
[i
].addr
+
899 conf
->mirrors
[d
].rdev
->data_offset
;
900 mbio
->bi_bdev
= conf
->mirrors
[d
].rdev
->bdev
;
901 mbio
->bi_end_io
= raid10_end_write_request
;
902 mbio
->bi_rw
= WRITE
| do_sync
| do_fua
;
903 mbio
->bi_private
= r10_bio
;
905 atomic_inc(&r10_bio
->remaining
);
906 spin_lock_irqsave(&conf
->device_lock
, flags
);
907 bio_list_add(&conf
->pending_bio_list
, mbio
);
908 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
911 if (atomic_dec_and_test(&r10_bio
->remaining
)) {
912 /* This matches the end of raid10_end_write_request() */
913 bitmap_endwrite(r10_bio
->mddev
->bitmap
, r10_bio
->sector
,
915 !test_bit(R10BIO_Degraded
, &r10_bio
->state
),
918 raid_end_bio_io(r10_bio
);
921 /* In case raid10d snuck in to freeze_array */
922 wake_up(&conf
->wait_barrier
);
924 if (do_sync
|| !mddev
->bitmap
|| !plugged
)
925 md_wakeup_thread(mddev
->thread
);
929 static void status(struct seq_file
*seq
, mddev_t
*mddev
)
931 conf_t
*conf
= mddev
->private;
934 if (conf
->near_copies
< conf
->raid_disks
)
935 seq_printf(seq
, " %dK chunks", mddev
->chunk_sectors
/ 2);
936 if (conf
->near_copies
> 1)
937 seq_printf(seq
, " %d near-copies", conf
->near_copies
);
938 if (conf
->far_copies
> 1) {
939 if (conf
->far_offset
)
940 seq_printf(seq
, " %d offset-copies", conf
->far_copies
);
942 seq_printf(seq
, " %d far-copies", conf
->far_copies
);
944 seq_printf(seq
, " [%d/%d] [", conf
->raid_disks
,
945 conf
->raid_disks
- mddev
->degraded
);
946 for (i
= 0; i
< conf
->raid_disks
; i
++)
947 seq_printf(seq
, "%s",
948 conf
->mirrors
[i
].rdev
&&
949 test_bit(In_sync
, &conf
->mirrors
[i
].rdev
->flags
) ? "U" : "_");
950 seq_printf(seq
, "]");
953 static void error(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
955 char b
[BDEVNAME_SIZE
];
956 conf_t
*conf
= mddev
->private;
959 * If it is not operational, then we have already marked it as dead
960 * else if it is the last working disks, ignore the error, let the
961 * next level up know.
962 * else mark the drive as failed
964 if (test_bit(In_sync
, &rdev
->flags
)
965 && conf
->raid_disks
-mddev
->degraded
== 1)
967 * Don't fail the drive, just return an IO error.
968 * The test should really be more sophisticated than
969 * "working_disks == 1", but it isn't critical, and
970 * can wait until we do more sophisticated "is the drive
971 * really dead" tests...
974 if (test_and_clear_bit(In_sync
, &rdev
->flags
)) {
976 spin_lock_irqsave(&conf
->device_lock
, flags
);
978 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
980 * if recovery is running, make sure it aborts.
982 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
984 set_bit(Faulty
, &rdev
->flags
);
985 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
987 "md/raid10:%s: Disk failure on %s, disabling device.\n"
988 "md/raid10:%s: Operation continuing on %d devices.\n",
989 mdname(mddev
), bdevname(rdev
->bdev
, b
),
990 mdname(mddev
), conf
->raid_disks
- mddev
->degraded
);
993 static void print_conf(conf_t
*conf
)
998 printk(KERN_DEBUG
"RAID10 conf printout:\n");
1000 printk(KERN_DEBUG
"(!conf)\n");
1003 printk(KERN_DEBUG
" --- wd:%d rd:%d\n", conf
->raid_disks
- conf
->mddev
->degraded
,
1006 for (i
= 0; i
< conf
->raid_disks
; i
++) {
1007 char b
[BDEVNAME_SIZE
];
1008 tmp
= conf
->mirrors
+ i
;
1010 printk(KERN_DEBUG
" disk %d, wo:%d, o:%d, dev:%s\n",
1011 i
, !test_bit(In_sync
, &tmp
->rdev
->flags
),
1012 !test_bit(Faulty
, &tmp
->rdev
->flags
),
1013 bdevname(tmp
->rdev
->bdev
,b
));
1017 static void close_sync(conf_t
*conf
)
1020 allow_barrier(conf
);
1022 mempool_destroy(conf
->r10buf_pool
);
1023 conf
->r10buf_pool
= NULL
;
1026 /* check if there are enough drives for
1027 * every block to appear on atleast one
1029 static int enough(conf_t
*conf
)
1034 int n
= conf
->copies
;
1037 if (conf
->mirrors
[first
].rdev
)
1039 first
= (first
+1) % conf
->raid_disks
;
1043 } while (first
!= 0);
1047 static int raid10_spare_active(mddev_t
*mddev
)
1050 conf_t
*conf
= mddev
->private;
1053 unsigned long flags
;
1056 * Find all non-in_sync disks within the RAID10 configuration
1057 * and mark them in_sync
1059 for (i
= 0; i
< conf
->raid_disks
; i
++) {
1060 tmp
= conf
->mirrors
+ i
;
1062 && !test_bit(Faulty
, &tmp
->rdev
->flags
)
1063 && !test_and_set_bit(In_sync
, &tmp
->rdev
->flags
)) {
1065 sysfs_notify_dirent(tmp
->rdev
->sysfs_state
);
1068 spin_lock_irqsave(&conf
->device_lock
, flags
);
1069 mddev
->degraded
-= count
;
1070 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1077 static int raid10_add_disk(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1079 conf_t
*conf
= mddev
->private;
1084 int last
= conf
->raid_disks
- 1;
1086 if (mddev
->recovery_cp
< MaxSector
)
1087 /* only hot-add to in-sync arrays, as recovery is
1088 * very different from resync
1094 if (rdev
->raid_disk
>= 0)
1095 first
= last
= rdev
->raid_disk
;
1097 if (rdev
->saved_raid_disk
>= 0 &&
1098 rdev
->saved_raid_disk
>= first
&&
1099 conf
->mirrors
[rdev
->saved_raid_disk
].rdev
== NULL
)
1100 mirror
= rdev
->saved_raid_disk
;
1103 for ( ; mirror
<= last
; mirror
++)
1104 if ( !(p
=conf
->mirrors
+mirror
)->rdev
) {
1106 disk_stack_limits(mddev
->gendisk
, rdev
->bdev
,
1107 rdev
->data_offset
<< 9);
1108 /* as we don't honour merge_bvec_fn, we must
1109 * never risk violating it, so limit
1110 * ->max_segments to one lying with a single
1111 * page, as a one page request is never in
1114 if (rdev
->bdev
->bd_disk
->queue
->merge_bvec_fn
) {
1115 blk_queue_max_segments(mddev
->queue
, 1);
1116 blk_queue_segment_boundary(mddev
->queue
,
1117 PAGE_CACHE_SIZE
- 1);
1120 p
->head_position
= 0;
1121 rdev
->raid_disk
= mirror
;
1123 if (rdev
->saved_raid_disk
!= mirror
)
1125 rcu_assign_pointer(p
->rdev
, rdev
);
1129 md_integrity_add_rdev(rdev
, mddev
);
1134 static int raid10_remove_disk(mddev_t
*mddev
, int number
)
1136 conf_t
*conf
= mddev
->private;
1139 mirror_info_t
*p
= conf
->mirrors
+ number
;
1144 if (test_bit(In_sync
, &rdev
->flags
) ||
1145 atomic_read(&rdev
->nr_pending
)) {
1149 /* Only remove faulty devices in recovery
1152 if (!test_bit(Faulty
, &rdev
->flags
) &&
1159 if (atomic_read(&rdev
->nr_pending
)) {
1160 /* lost the race, try later */
1165 err
= md_integrity_register(mddev
);
1174 static void end_sync_read(struct bio
*bio
, int error
)
1176 r10bio_t
*r10_bio
= bio
->bi_private
;
1177 conf_t
*conf
= r10_bio
->mddev
->private;
1180 for (i
=0; i
<conf
->copies
; i
++)
1181 if (r10_bio
->devs
[i
].bio
== bio
)
1183 BUG_ON(i
== conf
->copies
);
1184 update_head_pos(i
, r10_bio
);
1185 d
= r10_bio
->devs
[i
].devnum
;
1187 if (test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
1188 set_bit(R10BIO_Uptodate
, &r10_bio
->state
);
1190 atomic_add(r10_bio
->sectors
,
1191 &conf
->mirrors
[d
].rdev
->corrected_errors
);
1192 if (!test_bit(MD_RECOVERY_SYNC
, &conf
->mddev
->recovery
))
1193 md_error(r10_bio
->mddev
,
1194 conf
->mirrors
[d
].rdev
);
1197 /* for reconstruct, we always reschedule after a read.
1198 * for resync, only after all reads
1200 rdev_dec_pending(conf
->mirrors
[d
].rdev
, conf
->mddev
);
1201 if (test_bit(R10BIO_IsRecover
, &r10_bio
->state
) ||
1202 atomic_dec_and_test(&r10_bio
->remaining
)) {
1203 /* we have read all the blocks,
1204 * do the comparison in process context in raid10d
1206 reschedule_retry(r10_bio
);
1210 static void end_sync_write(struct bio
*bio
, int error
)
1212 int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
1213 r10bio_t
*r10_bio
= bio
->bi_private
;
1214 mddev_t
*mddev
= r10_bio
->mddev
;
1215 conf_t
*conf
= mddev
->private;
1218 for (i
= 0; i
< conf
->copies
; i
++)
1219 if (r10_bio
->devs
[i
].bio
== bio
)
1221 d
= r10_bio
->devs
[i
].devnum
;
1224 md_error(mddev
, conf
->mirrors
[d
].rdev
);
1226 update_head_pos(i
, r10_bio
);
1228 rdev_dec_pending(conf
->mirrors
[d
].rdev
, mddev
);
1229 while (atomic_dec_and_test(&r10_bio
->remaining
)) {
1230 if (r10_bio
->master_bio
== NULL
) {
1231 /* the primary of several recovery bios */
1232 sector_t s
= r10_bio
->sectors
;
1234 md_done_sync(mddev
, s
, 1);
1237 r10bio_t
*r10_bio2
= (r10bio_t
*)r10_bio
->master_bio
;
1245 * Note: sync and recover and handled very differently for raid10
1246 * This code is for resync.
1247 * For resync, we read through virtual addresses and read all blocks.
1248 * If there is any error, we schedule a write. The lowest numbered
1249 * drive is authoritative.
1250 * However requests come for physical address, so we need to map.
1251 * For every physical address there are raid_disks/copies virtual addresses,
1252 * which is always are least one, but is not necessarly an integer.
1253 * This means that a physical address can span multiple chunks, so we may
1254 * have to submit multiple io requests for a single sync request.
1257 * We check if all blocks are in-sync and only write to blocks that
1260 static void sync_request_write(mddev_t
*mddev
, r10bio_t
*r10_bio
)
1262 conf_t
*conf
= mddev
->private;
1264 struct bio
*tbio
, *fbio
;
1266 atomic_set(&r10_bio
->remaining
, 1);
1268 /* find the first device with a block */
1269 for (i
=0; i
<conf
->copies
; i
++)
1270 if (test_bit(BIO_UPTODATE
, &r10_bio
->devs
[i
].bio
->bi_flags
))
1273 if (i
== conf
->copies
)
1277 fbio
= r10_bio
->devs
[i
].bio
;
1279 /* now find blocks with errors */
1280 for (i
=0 ; i
< conf
->copies
; i
++) {
1282 int vcnt
= r10_bio
->sectors
>> (PAGE_SHIFT
-9);
1284 tbio
= r10_bio
->devs
[i
].bio
;
1286 if (tbio
->bi_end_io
!= end_sync_read
)
1290 if (test_bit(BIO_UPTODATE
, &r10_bio
->devs
[i
].bio
->bi_flags
)) {
1291 /* We know that the bi_io_vec layout is the same for
1292 * both 'first' and 'i', so we just compare them.
1293 * All vec entries are PAGE_SIZE;
1295 for (j
= 0; j
< vcnt
; j
++)
1296 if (memcmp(page_address(fbio
->bi_io_vec
[j
].bv_page
),
1297 page_address(tbio
->bi_io_vec
[j
].bv_page
),
1302 mddev
->resync_mismatches
+= r10_bio
->sectors
;
1304 if (test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
))
1305 /* Don't fix anything. */
1307 /* Ok, we need to write this bio
1308 * First we need to fixup bv_offset, bv_len and
1309 * bi_vecs, as the read request might have corrupted these
1311 tbio
->bi_vcnt
= vcnt
;
1312 tbio
->bi_size
= r10_bio
->sectors
<< 9;
1314 tbio
->bi_phys_segments
= 0;
1315 tbio
->bi_flags
&= ~(BIO_POOL_MASK
- 1);
1316 tbio
->bi_flags
|= 1 << BIO_UPTODATE
;
1317 tbio
->bi_next
= NULL
;
1318 tbio
->bi_rw
= WRITE
;
1319 tbio
->bi_private
= r10_bio
;
1320 tbio
->bi_sector
= r10_bio
->devs
[i
].addr
;
1322 for (j
=0; j
< vcnt
; j
++) {
1323 tbio
->bi_io_vec
[j
].bv_offset
= 0;
1324 tbio
->bi_io_vec
[j
].bv_len
= PAGE_SIZE
;
1326 memcpy(page_address(tbio
->bi_io_vec
[j
].bv_page
),
1327 page_address(fbio
->bi_io_vec
[j
].bv_page
),
1330 tbio
->bi_end_io
= end_sync_write
;
1332 d
= r10_bio
->devs
[i
].devnum
;
1333 atomic_inc(&conf
->mirrors
[d
].rdev
->nr_pending
);
1334 atomic_inc(&r10_bio
->remaining
);
1335 md_sync_acct(conf
->mirrors
[d
].rdev
->bdev
, tbio
->bi_size
>> 9);
1337 tbio
->bi_sector
+= conf
->mirrors
[d
].rdev
->data_offset
;
1338 tbio
->bi_bdev
= conf
->mirrors
[d
].rdev
->bdev
;
1339 generic_make_request(tbio
);
1343 if (atomic_dec_and_test(&r10_bio
->remaining
)) {
1344 md_done_sync(mddev
, r10_bio
->sectors
, 1);
1350 * Now for the recovery code.
1351 * Recovery happens across physical sectors.
1352 * We recover all non-is_sync drives by finding the virtual address of
1353 * each, and then choose a working drive that also has that virt address.
1354 * There is a separate r10_bio for each non-in_sync drive.
1355 * Only the first two slots are in use. The first for reading,
1356 * The second for writing.
1360 static void recovery_request_write(mddev_t
*mddev
, r10bio_t
*r10_bio
)
1362 conf_t
*conf
= mddev
->private;
1364 struct bio
*bio
, *wbio
;
1367 /* move the pages across to the second bio
1368 * and submit the write request
1370 bio
= r10_bio
->devs
[0].bio
;
1371 wbio
= r10_bio
->devs
[1].bio
;
1372 for (i
=0; i
< wbio
->bi_vcnt
; i
++) {
1373 struct page
*p
= bio
->bi_io_vec
[i
].bv_page
;
1374 bio
->bi_io_vec
[i
].bv_page
= wbio
->bi_io_vec
[i
].bv_page
;
1375 wbio
->bi_io_vec
[i
].bv_page
= p
;
1377 d
= r10_bio
->devs
[1].devnum
;
1379 atomic_inc(&conf
->mirrors
[d
].rdev
->nr_pending
);
1380 md_sync_acct(conf
->mirrors
[d
].rdev
->bdev
, wbio
->bi_size
>> 9);
1381 if (test_bit(R10BIO_Uptodate
, &r10_bio
->state
))
1382 generic_make_request(wbio
);
1384 bio_endio(wbio
, -EIO
);
1389 * Used by fix_read_error() to decay the per rdev read_errors.
1390 * We halve the read error count for every hour that has elapsed
1391 * since the last recorded read error.
1394 static void check_decay_read_errors(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1396 struct timespec cur_time_mon
;
1397 unsigned long hours_since_last
;
1398 unsigned int read_errors
= atomic_read(&rdev
->read_errors
);
1400 ktime_get_ts(&cur_time_mon
);
1402 if (rdev
->last_read_error
.tv_sec
== 0 &&
1403 rdev
->last_read_error
.tv_nsec
== 0) {
1404 /* first time we've seen a read error */
1405 rdev
->last_read_error
= cur_time_mon
;
1409 hours_since_last
= (cur_time_mon
.tv_sec
-
1410 rdev
->last_read_error
.tv_sec
) / 3600;
1412 rdev
->last_read_error
= cur_time_mon
;
1415 * if hours_since_last is > the number of bits in read_errors
1416 * just set read errors to 0. We do this to avoid
1417 * overflowing the shift of read_errors by hours_since_last.
1419 if (hours_since_last
>= 8 * sizeof(read_errors
))
1420 atomic_set(&rdev
->read_errors
, 0);
1422 atomic_set(&rdev
->read_errors
, read_errors
>> hours_since_last
);
1426 * This is a kernel thread which:
1428 * 1. Retries failed read operations on working mirrors.
1429 * 2. Updates the raid superblock when problems encounter.
1430 * 3. Performs writes following reads for array synchronising.
1433 static void fix_read_error(conf_t
*conf
, mddev_t
*mddev
, r10bio_t
*r10_bio
)
1435 int sect
= 0; /* Offset from r10_bio->sector */
1436 int sectors
= r10_bio
->sectors
;
1438 int max_read_errors
= atomic_read(&mddev
->max_corr_read_errors
);
1439 int d
= r10_bio
->devs
[r10_bio
->read_slot
].devnum
;
1442 rdev
= rcu_dereference(conf
->mirrors
[d
].rdev
);
1443 if (rdev
) { /* If rdev is not NULL */
1444 char b
[BDEVNAME_SIZE
];
1445 int cur_read_error_count
= 0;
1447 bdevname(rdev
->bdev
, b
);
1449 if (test_bit(Faulty
, &rdev
->flags
)) {
1451 /* drive has already been failed, just ignore any
1452 more fix_read_error() attempts */
1456 check_decay_read_errors(mddev
, rdev
);
1457 atomic_inc(&rdev
->read_errors
);
1458 cur_read_error_count
= atomic_read(&rdev
->read_errors
);
1459 if (cur_read_error_count
> max_read_errors
) {
1462 "md/raid10:%s: %s: Raid device exceeded "
1463 "read_error threshold "
1464 "[cur %d:max %d]\n",
1466 b
, cur_read_error_count
, max_read_errors
);
1468 "md/raid10:%s: %s: Failing raid "
1469 "device\n", mdname(mddev
), b
);
1470 md_error(mddev
, conf
->mirrors
[d
].rdev
);
1478 int sl
= r10_bio
->read_slot
;
1482 if (s
> (PAGE_SIZE
>>9))
1487 d
= r10_bio
->devs
[sl
].devnum
;
1488 rdev
= rcu_dereference(conf
->mirrors
[d
].rdev
);
1490 test_bit(In_sync
, &rdev
->flags
)) {
1491 atomic_inc(&rdev
->nr_pending
);
1493 success
= sync_page_io(rdev
,
1494 r10_bio
->devs
[sl
].addr
+
1497 conf
->tmppage
, READ
, false);
1498 rdev_dec_pending(rdev
, mddev
);
1504 if (sl
== conf
->copies
)
1506 } while (!success
&& sl
!= r10_bio
->read_slot
);
1510 /* Cannot read from anywhere -- bye bye array */
1511 int dn
= r10_bio
->devs
[r10_bio
->read_slot
].devnum
;
1512 md_error(mddev
, conf
->mirrors
[dn
].rdev
);
1517 /* write it back and re-read */
1519 while (sl
!= r10_bio
->read_slot
) {
1520 char b
[BDEVNAME_SIZE
];
1525 d
= r10_bio
->devs
[sl
].devnum
;
1526 rdev
= rcu_dereference(conf
->mirrors
[d
].rdev
);
1528 test_bit(In_sync
, &rdev
->flags
)) {
1529 atomic_inc(&rdev
->nr_pending
);
1531 atomic_add(s
, &rdev
->corrected_errors
);
1532 if (sync_page_io(rdev
,
1533 r10_bio
->devs
[sl
].addr
+
1535 s
<<9, conf
->tmppage
, WRITE
, false)
1537 /* Well, this device is dead */
1539 "md/raid10:%s: read correction "
1541 " (%d sectors at %llu on %s)\n",
1543 (unsigned long long)(sect
+
1545 bdevname(rdev
->bdev
, b
));
1546 printk(KERN_NOTICE
"md/raid10:%s: %s: failing "
1549 bdevname(rdev
->bdev
, b
));
1550 md_error(mddev
, rdev
);
1552 rdev_dec_pending(rdev
, mddev
);
1557 while (sl
!= r10_bio
->read_slot
) {
1562 d
= r10_bio
->devs
[sl
].devnum
;
1563 rdev
= rcu_dereference(conf
->mirrors
[d
].rdev
);
1565 test_bit(In_sync
, &rdev
->flags
)) {
1566 char b
[BDEVNAME_SIZE
];
1567 atomic_inc(&rdev
->nr_pending
);
1569 if (sync_page_io(rdev
,
1570 r10_bio
->devs
[sl
].addr
+
1572 s
<<9, conf
->tmppage
,
1573 READ
, false) == 0) {
1574 /* Well, this device is dead */
1576 "md/raid10:%s: unable to read back "
1578 " (%d sectors at %llu on %s)\n",
1580 (unsigned long long)(sect
+
1582 bdevname(rdev
->bdev
, b
));
1583 printk(KERN_NOTICE
"md/raid10:%s: %s: failing drive\n",
1585 bdevname(rdev
->bdev
, b
));
1587 md_error(mddev
, rdev
);
1590 "md/raid10:%s: read error corrected"
1591 " (%d sectors at %llu on %s)\n",
1593 (unsigned long long)(sect
+
1595 bdevname(rdev
->bdev
, b
));
1598 rdev_dec_pending(rdev
, mddev
);
1609 static void raid10d(mddev_t
*mddev
)
1613 unsigned long flags
;
1614 conf_t
*conf
= mddev
->private;
1615 struct list_head
*head
= &conf
->retry_list
;
1617 struct blk_plug plug
;
1619 md_check_recovery(mddev
);
1621 blk_start_plug(&plug
);
1623 char b
[BDEVNAME_SIZE
];
1625 flush_pending_writes(conf
);
1627 spin_lock_irqsave(&conf
->device_lock
, flags
);
1628 if (list_empty(head
)) {
1629 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1632 r10_bio
= list_entry(head
->prev
, r10bio_t
, retry_list
);
1633 list_del(head
->prev
);
1635 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1637 mddev
= r10_bio
->mddev
;
1638 conf
= mddev
->private;
1639 if (test_bit(R10BIO_IsSync
, &r10_bio
->state
))
1640 sync_request_write(mddev
, r10_bio
);
1641 else if (test_bit(R10BIO_IsRecover
, &r10_bio
->state
))
1642 recovery_request_write(mddev
, r10_bio
);
1645 /* we got a read error. Maybe the drive is bad. Maybe just
1646 * the block and we can fix it.
1647 * We freeze all other IO, and try reading the block from
1648 * other devices. When we find one, we re-write
1649 * and check it that fixes the read error.
1650 * This is all done synchronously while the array is
1653 if (mddev
->ro
== 0) {
1655 fix_read_error(conf
, mddev
, r10_bio
);
1656 unfreeze_array(conf
);
1659 bio
= r10_bio
->devs
[r10_bio
->read_slot
].bio
;
1660 r10_bio
->devs
[r10_bio
->read_slot
].bio
=
1661 mddev
->ro
? IO_BLOCKED
: NULL
;
1662 mirror
= read_balance(conf
, r10_bio
);
1664 printk(KERN_ALERT
"md/raid10:%s: %s: unrecoverable I/O"
1665 " read error for block %llu\n",
1667 bdevname(bio
->bi_bdev
,b
),
1668 (unsigned long long)r10_bio
->sector
);
1669 raid_end_bio_io(r10_bio
);
1672 const unsigned long do_sync
= (r10_bio
->master_bio
->bi_rw
& REQ_SYNC
);
1674 rdev
= conf
->mirrors
[mirror
].rdev
;
1675 if (printk_ratelimit())
1676 printk(KERN_ERR
"md/raid10:%s: %s: redirecting sector %llu to"
1677 " another mirror\n",
1679 bdevname(rdev
->bdev
,b
),
1680 (unsigned long long)r10_bio
->sector
);
1681 bio
= bio_clone_mddev(r10_bio
->master_bio
,
1683 r10_bio
->devs
[r10_bio
->read_slot
].bio
= bio
;
1684 bio
->bi_sector
= r10_bio
->devs
[r10_bio
->read_slot
].addr
1685 + rdev
->data_offset
;
1686 bio
->bi_bdev
= rdev
->bdev
;
1687 bio
->bi_rw
= READ
| do_sync
;
1688 bio
->bi_private
= r10_bio
;
1689 bio
->bi_end_io
= raid10_end_read_request
;
1690 generic_make_request(bio
);
1695 blk_finish_plug(&plug
);
1699 static int init_resync(conf_t
*conf
)
1703 buffs
= RESYNC_WINDOW
/ RESYNC_BLOCK_SIZE
;
1704 BUG_ON(conf
->r10buf_pool
);
1705 conf
->r10buf_pool
= mempool_create(buffs
, r10buf_pool_alloc
, r10buf_pool_free
, conf
);
1706 if (!conf
->r10buf_pool
)
1708 conf
->next_resync
= 0;
1713 * perform a "sync" on one "block"
1715 * We need to make sure that no normal I/O request - particularly write
1716 * requests - conflict with active sync requests.
1718 * This is achieved by tracking pending requests and a 'barrier' concept
1719 * that can be installed to exclude normal IO requests.
1721 * Resync and recovery are handled very differently.
1722 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
1724 * For resync, we iterate over virtual addresses, read all copies,
1725 * and update if there are differences. If only one copy is live,
1727 * For recovery, we iterate over physical addresses, read a good
1728 * value for each non-in_sync drive, and over-write.
1730 * So, for recovery we may have several outstanding complex requests for a
1731 * given address, one for each out-of-sync device. We model this by allocating
1732 * a number of r10_bio structures, one for each out-of-sync device.
1733 * As we setup these structures, we collect all bio's together into a list
1734 * which we then process collectively to add pages, and then process again
1735 * to pass to generic_make_request.
1737 * The r10_bio structures are linked using a borrowed master_bio pointer.
1738 * This link is counted in ->remaining. When the r10_bio that points to NULL
1739 * has its remaining count decremented to 0, the whole complex operation
1744 static sector_t
sync_request(mddev_t
*mddev
, sector_t sector_nr
, int *skipped
, int go_faster
)
1746 conf_t
*conf
= mddev
->private;
1748 struct bio
*biolist
= NULL
, *bio
;
1749 sector_t max_sector
, nr_sectors
;
1753 sector_t sync_blocks
;
1755 sector_t sectors_skipped
= 0;
1756 int chunks_skipped
= 0;
1758 if (!conf
->r10buf_pool
)
1759 if (init_resync(conf
))
1763 max_sector
= mddev
->dev_sectors
;
1764 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
))
1765 max_sector
= mddev
->resync_max_sectors
;
1766 if (sector_nr
>= max_sector
) {
1767 /* If we aborted, we need to abort the
1768 * sync on the 'current' bitmap chucks (there can
1769 * be several when recovering multiple devices).
1770 * as we may have started syncing it but not finished.
1771 * We can find the current address in
1772 * mddev->curr_resync, but for recovery,
1773 * we need to convert that to several
1774 * virtual addresses.
1776 if (mddev
->curr_resync
< max_sector
) { /* aborted */
1777 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
))
1778 bitmap_end_sync(mddev
->bitmap
, mddev
->curr_resync
,
1780 else for (i
=0; i
<conf
->raid_disks
; i
++) {
1782 raid10_find_virt(conf
, mddev
->curr_resync
, i
);
1783 bitmap_end_sync(mddev
->bitmap
, sect
,
1786 } else /* completed sync */
1789 bitmap_close_sync(mddev
->bitmap
);
1792 return sectors_skipped
;
1794 if (chunks_skipped
>= conf
->raid_disks
) {
1795 /* if there has been nothing to do on any drive,
1796 * then there is nothing to do at all..
1799 return (max_sector
- sector_nr
) + sectors_skipped
;
1802 if (max_sector
> mddev
->resync_max
)
1803 max_sector
= mddev
->resync_max
; /* Don't do IO beyond here */
1805 /* make sure whole request will fit in a chunk - if chunks
1808 if (conf
->near_copies
< conf
->raid_disks
&&
1809 max_sector
> (sector_nr
| conf
->chunk_mask
))
1810 max_sector
= (sector_nr
| conf
->chunk_mask
) + 1;
1812 * If there is non-resync activity waiting for us then
1813 * put in a delay to throttle resync.
1815 if (!go_faster
&& conf
->nr_waiting
)
1816 msleep_interruptible(1000);
1818 /* Again, very different code for resync and recovery.
1819 * Both must result in an r10bio with a list of bios that
1820 * have bi_end_io, bi_sector, bi_bdev set,
1821 * and bi_private set to the r10bio.
1822 * For recovery, we may actually create several r10bios
1823 * with 2 bios in each, that correspond to the bios in the main one.
1824 * In this case, the subordinate r10bios link back through a
1825 * borrowed master_bio pointer, and the counter in the master
1826 * includes a ref from each subordinate.
1828 /* First, we decide what to do and set ->bi_end_io
1829 * To end_sync_read if we want to read, and
1830 * end_sync_write if we will want to write.
1833 max_sync
= RESYNC_PAGES
<< (PAGE_SHIFT
-9);
1834 if (!test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
1835 /* recovery... the complicated one */
1839 for (i
=0 ; i
<conf
->raid_disks
; i
++)
1840 if (conf
->mirrors
[i
].rdev
&&
1841 !test_bit(In_sync
, &conf
->mirrors
[i
].rdev
->flags
)) {
1842 int still_degraded
= 0;
1843 /* want to reconstruct this device */
1844 r10bio_t
*rb2
= r10_bio
;
1845 sector_t sect
= raid10_find_virt(conf
, sector_nr
, i
);
1847 /* Unless we are doing a full sync, we only need
1848 * to recover the block if it is set in the bitmap
1850 must_sync
= bitmap_start_sync(mddev
->bitmap
, sect
,
1852 if (sync_blocks
< max_sync
)
1853 max_sync
= sync_blocks
;
1856 /* yep, skip the sync_blocks here, but don't assume
1857 * that there will never be anything to do here
1859 chunks_skipped
= -1;
1863 r10_bio
= mempool_alloc(conf
->r10buf_pool
, GFP_NOIO
);
1864 raise_barrier(conf
, rb2
!= NULL
);
1865 atomic_set(&r10_bio
->remaining
, 0);
1867 r10_bio
->master_bio
= (struct bio
*)rb2
;
1869 atomic_inc(&rb2
->remaining
);
1870 r10_bio
->mddev
= mddev
;
1871 set_bit(R10BIO_IsRecover
, &r10_bio
->state
);
1872 r10_bio
->sector
= sect
;
1874 raid10_find_phys(conf
, r10_bio
);
1876 /* Need to check if the array will still be
1879 for (j
=0; j
<conf
->raid_disks
; j
++)
1880 if (conf
->mirrors
[j
].rdev
== NULL
||
1881 test_bit(Faulty
, &conf
->mirrors
[j
].rdev
->flags
)) {
1886 must_sync
= bitmap_start_sync(mddev
->bitmap
, sect
,
1887 &sync_blocks
, still_degraded
);
1889 for (j
=0; j
<conf
->copies
;j
++) {
1890 int d
= r10_bio
->devs
[j
].devnum
;
1891 if (conf
->mirrors
[d
].rdev
&&
1892 test_bit(In_sync
, &conf
->mirrors
[d
].rdev
->flags
)) {
1893 /* This is where we read from */
1894 bio
= r10_bio
->devs
[0].bio
;
1895 bio
->bi_next
= biolist
;
1897 bio
->bi_private
= r10_bio
;
1898 bio
->bi_end_io
= end_sync_read
;
1900 bio
->bi_sector
= r10_bio
->devs
[j
].addr
+
1901 conf
->mirrors
[d
].rdev
->data_offset
;
1902 bio
->bi_bdev
= conf
->mirrors
[d
].rdev
->bdev
;
1903 atomic_inc(&conf
->mirrors
[d
].rdev
->nr_pending
);
1904 atomic_inc(&r10_bio
->remaining
);
1905 /* and we write to 'i' */
1907 for (k
=0; k
<conf
->copies
; k
++)
1908 if (r10_bio
->devs
[k
].devnum
== i
)
1910 BUG_ON(k
== conf
->copies
);
1911 bio
= r10_bio
->devs
[1].bio
;
1912 bio
->bi_next
= biolist
;
1914 bio
->bi_private
= r10_bio
;
1915 bio
->bi_end_io
= end_sync_write
;
1917 bio
->bi_sector
= r10_bio
->devs
[k
].addr
+
1918 conf
->mirrors
[i
].rdev
->data_offset
;
1919 bio
->bi_bdev
= conf
->mirrors
[i
].rdev
->bdev
;
1921 r10_bio
->devs
[0].devnum
= d
;
1922 r10_bio
->devs
[1].devnum
= i
;
1927 if (j
== conf
->copies
) {
1928 /* Cannot recover, so abort the recovery */
1931 atomic_dec(&rb2
->remaining
);
1933 if (!test_and_set_bit(MD_RECOVERY_INTR
,
1935 printk(KERN_INFO
"md/raid10:%s: insufficient "
1936 "working devices for recovery.\n",
1941 if (biolist
== NULL
) {
1943 r10bio_t
*rb2
= r10_bio
;
1944 r10_bio
= (r10bio_t
*) rb2
->master_bio
;
1945 rb2
->master_bio
= NULL
;
1951 /* resync. Schedule a read for every block at this virt offset */
1954 bitmap_cond_end_sync(mddev
->bitmap
, sector_nr
);
1956 if (!bitmap_start_sync(mddev
->bitmap
, sector_nr
,
1957 &sync_blocks
, mddev
->degraded
) &&
1958 !conf
->fullsync
&& !test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
)) {
1959 /* We can skip this block */
1961 return sync_blocks
+ sectors_skipped
;
1963 if (sync_blocks
< max_sync
)
1964 max_sync
= sync_blocks
;
1965 r10_bio
= mempool_alloc(conf
->r10buf_pool
, GFP_NOIO
);
1967 r10_bio
->mddev
= mddev
;
1968 atomic_set(&r10_bio
->remaining
, 0);
1969 raise_barrier(conf
, 0);
1970 conf
->next_resync
= sector_nr
;
1972 r10_bio
->master_bio
= NULL
;
1973 r10_bio
->sector
= sector_nr
;
1974 set_bit(R10BIO_IsSync
, &r10_bio
->state
);
1975 raid10_find_phys(conf
, r10_bio
);
1976 r10_bio
->sectors
= (sector_nr
| conf
->chunk_mask
) - sector_nr
+1;
1978 for (i
=0; i
<conf
->copies
; i
++) {
1979 int d
= r10_bio
->devs
[i
].devnum
;
1980 bio
= r10_bio
->devs
[i
].bio
;
1981 bio
->bi_end_io
= NULL
;
1982 clear_bit(BIO_UPTODATE
, &bio
->bi_flags
);
1983 if (conf
->mirrors
[d
].rdev
== NULL
||
1984 test_bit(Faulty
, &conf
->mirrors
[d
].rdev
->flags
))
1986 atomic_inc(&conf
->mirrors
[d
].rdev
->nr_pending
);
1987 atomic_inc(&r10_bio
->remaining
);
1988 bio
->bi_next
= biolist
;
1990 bio
->bi_private
= r10_bio
;
1991 bio
->bi_end_io
= end_sync_read
;
1993 bio
->bi_sector
= r10_bio
->devs
[i
].addr
+
1994 conf
->mirrors
[d
].rdev
->data_offset
;
1995 bio
->bi_bdev
= conf
->mirrors
[d
].rdev
->bdev
;
2000 for (i
=0; i
<conf
->copies
; i
++) {
2001 int d
= r10_bio
->devs
[i
].devnum
;
2002 if (r10_bio
->devs
[i
].bio
->bi_end_io
)
2003 rdev_dec_pending(conf
->mirrors
[d
].rdev
, mddev
);
2011 for (bio
= biolist
; bio
; bio
=bio
->bi_next
) {
2013 bio
->bi_flags
&= ~(BIO_POOL_MASK
- 1);
2015 bio
->bi_flags
|= 1 << BIO_UPTODATE
;
2018 bio
->bi_phys_segments
= 0;
2023 if (sector_nr
+ max_sync
< max_sector
)
2024 max_sector
= sector_nr
+ max_sync
;
2027 int len
= PAGE_SIZE
;
2029 if (sector_nr
+ (len
>>9) > max_sector
)
2030 len
= (max_sector
- sector_nr
) << 9;
2033 for (bio
= biolist
; bio
; bio
=bio
->bi_next
) {
2034 page
= bio
->bi_io_vec
[bio
->bi_vcnt
].bv_page
;
2035 if (bio_add_page(bio
, page
, len
, 0) == 0) {
2038 bio
->bi_io_vec
[bio
->bi_vcnt
].bv_page
= page
;
2039 for (bio2
= biolist
; bio2
&& bio2
!= bio
; bio2
= bio2
->bi_next
) {
2040 /* remove last page from this bio */
2042 bio2
->bi_size
-= len
;
2043 bio2
->bi_flags
&= ~(1<< BIO_SEG_VALID
);
2049 nr_sectors
+= len
>>9;
2050 sector_nr
+= len
>>9;
2051 } while (biolist
->bi_vcnt
< RESYNC_PAGES
);
2053 r10_bio
->sectors
= nr_sectors
;
2057 biolist
= biolist
->bi_next
;
2059 bio
->bi_next
= NULL
;
2060 r10_bio
= bio
->bi_private
;
2061 r10_bio
->sectors
= nr_sectors
;
2063 if (bio
->bi_end_io
== end_sync_read
) {
2064 md_sync_acct(bio
->bi_bdev
, nr_sectors
);
2065 generic_make_request(bio
);
2069 if (sectors_skipped
)
2070 /* pretend they weren't skipped, it makes
2071 * no important difference in this case
2073 md_done_sync(mddev
, sectors_skipped
, 1);
2075 return sectors_skipped
+ nr_sectors
;
2077 /* There is nowhere to write, so all non-sync
2078 * drives must be failed, so try the next chunk...
2080 if (sector_nr
+ max_sync
< max_sector
)
2081 max_sector
= sector_nr
+ max_sync
;
2083 sectors_skipped
+= (max_sector
- sector_nr
);
2085 sector_nr
= max_sector
;
2090 raid10_size(mddev_t
*mddev
, sector_t sectors
, int raid_disks
)
2093 conf_t
*conf
= mddev
->private;
2096 raid_disks
= conf
->raid_disks
;
2098 sectors
= conf
->dev_sectors
;
2100 size
= sectors
>> conf
->chunk_shift
;
2101 sector_div(size
, conf
->far_copies
);
2102 size
= size
* raid_disks
;
2103 sector_div(size
, conf
->near_copies
);
2105 return size
<< conf
->chunk_shift
;
2109 static conf_t
*setup_conf(mddev_t
*mddev
)
2111 conf_t
*conf
= NULL
;
2113 sector_t stride
, size
;
2116 if (mddev
->new_chunk_sectors
< (PAGE_SIZE
>> 9) ||
2117 !is_power_of_2(mddev
->new_chunk_sectors
)) {
2118 printk(KERN_ERR
"md/raid10:%s: chunk size must be "
2119 "at least PAGE_SIZE(%ld) and be a power of 2.\n",
2120 mdname(mddev
), PAGE_SIZE
);
2124 nc
= mddev
->new_layout
& 255;
2125 fc
= (mddev
->new_layout
>> 8) & 255;
2126 fo
= mddev
->new_layout
& (1<<16);
2128 if ((nc
*fc
) <2 || (nc
*fc
) > mddev
->raid_disks
||
2129 (mddev
->new_layout
>> 17)) {
2130 printk(KERN_ERR
"md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
2131 mdname(mddev
), mddev
->new_layout
);
2136 conf
= kzalloc(sizeof(conf_t
), GFP_KERNEL
);
2140 conf
->mirrors
= kzalloc(sizeof(struct mirror_info
)*mddev
->raid_disks
,
2145 conf
->tmppage
= alloc_page(GFP_KERNEL
);
2150 conf
->raid_disks
= mddev
->raid_disks
;
2151 conf
->near_copies
= nc
;
2152 conf
->far_copies
= fc
;
2153 conf
->copies
= nc
*fc
;
2154 conf
->far_offset
= fo
;
2155 conf
->chunk_mask
= mddev
->new_chunk_sectors
- 1;
2156 conf
->chunk_shift
= ffz(~mddev
->new_chunk_sectors
);
2158 conf
->r10bio_pool
= mempool_create(NR_RAID10_BIOS
, r10bio_pool_alloc
,
2159 r10bio_pool_free
, conf
);
2160 if (!conf
->r10bio_pool
)
2163 size
= mddev
->dev_sectors
>> conf
->chunk_shift
;
2164 sector_div(size
, fc
);
2165 size
= size
* conf
->raid_disks
;
2166 sector_div(size
, nc
);
2167 /* 'size' is now the number of chunks in the array */
2168 /* calculate "used chunks per device" in 'stride' */
2169 stride
= size
* conf
->copies
;
2171 /* We need to round up when dividing by raid_disks to
2172 * get the stride size.
2174 stride
+= conf
->raid_disks
- 1;
2175 sector_div(stride
, conf
->raid_disks
);
2177 conf
->dev_sectors
= stride
<< conf
->chunk_shift
;
2182 sector_div(stride
, fc
);
2183 conf
->stride
= stride
<< conf
->chunk_shift
;
2186 spin_lock_init(&conf
->device_lock
);
2187 INIT_LIST_HEAD(&conf
->retry_list
);
2189 spin_lock_init(&conf
->resync_lock
);
2190 init_waitqueue_head(&conf
->wait_barrier
);
2192 conf
->thread
= md_register_thread(raid10d
, mddev
, NULL
);
2196 conf
->mddev
= mddev
;
2200 printk(KERN_ERR
"md/raid10:%s: couldn't allocate memory.\n",
2203 if (conf
->r10bio_pool
)
2204 mempool_destroy(conf
->r10bio_pool
);
2205 kfree(conf
->mirrors
);
2206 safe_put_page(conf
->tmppage
);
2209 return ERR_PTR(err
);
2212 static int run(mddev_t
*mddev
)
2215 int i
, disk_idx
, chunk_size
;
2216 mirror_info_t
*disk
;
2221 * copy the already verified devices into our private RAID10
2222 * bookkeeping area. [whatever we allocate in run(),
2223 * should be freed in stop()]
2226 if (mddev
->private == NULL
) {
2227 conf
= setup_conf(mddev
);
2229 return PTR_ERR(conf
);
2230 mddev
->private = conf
;
2232 conf
= mddev
->private;
2236 mddev
->thread
= conf
->thread
;
2237 conf
->thread
= NULL
;
2239 chunk_size
= mddev
->chunk_sectors
<< 9;
2240 blk_queue_io_min(mddev
->queue
, chunk_size
);
2241 if (conf
->raid_disks
% conf
->near_copies
)
2242 blk_queue_io_opt(mddev
->queue
, chunk_size
* conf
->raid_disks
);
2244 blk_queue_io_opt(mddev
->queue
, chunk_size
*
2245 (conf
->raid_disks
/ conf
->near_copies
));
2247 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
2248 disk_idx
= rdev
->raid_disk
;
2249 if (disk_idx
>= conf
->raid_disks
2252 disk
= conf
->mirrors
+ disk_idx
;
2255 disk_stack_limits(mddev
->gendisk
, rdev
->bdev
,
2256 rdev
->data_offset
<< 9);
2257 /* as we don't honour merge_bvec_fn, we must never risk
2258 * violating it, so limit max_segments to 1 lying
2259 * within a single page.
2261 if (rdev
->bdev
->bd_disk
->queue
->merge_bvec_fn
) {
2262 blk_queue_max_segments(mddev
->queue
, 1);
2263 blk_queue_segment_boundary(mddev
->queue
,
2264 PAGE_CACHE_SIZE
- 1);
2267 disk
->head_position
= 0;
2269 /* need to check that every block has at least one working mirror */
2270 if (!enough(conf
)) {
2271 printk(KERN_ERR
"md/raid10:%s: not enough operational mirrors.\n",
2276 mddev
->degraded
= 0;
2277 for (i
= 0; i
< conf
->raid_disks
; i
++) {
2279 disk
= conf
->mirrors
+ i
;
2282 !test_bit(In_sync
, &disk
->rdev
->flags
)) {
2283 disk
->head_position
= 0;
2290 if (mddev
->recovery_cp
!= MaxSector
)
2291 printk(KERN_NOTICE
"md/raid10:%s: not clean"
2292 " -- starting background reconstruction\n",
2295 "md/raid10:%s: active with %d out of %d devices\n",
2296 mdname(mddev
), conf
->raid_disks
- mddev
->degraded
,
2299 * Ok, everything is just fine now
2301 mddev
->dev_sectors
= conf
->dev_sectors
;
2302 size
= raid10_size(mddev
, 0, 0);
2303 md_set_array_sectors(mddev
, size
);
2304 mddev
->resync_max_sectors
= size
;
2306 mddev
->queue
->backing_dev_info
.congested_fn
= raid10_congested
;
2307 mddev
->queue
->backing_dev_info
.congested_data
= mddev
;
2309 /* Calculate max read-ahead size.
2310 * We need to readahead at least twice a whole stripe....
2314 int stripe
= conf
->raid_disks
*
2315 ((mddev
->chunk_sectors
<< 9) / PAGE_SIZE
);
2316 stripe
/= conf
->near_copies
;
2317 if (mddev
->queue
->backing_dev_info
.ra_pages
< 2* stripe
)
2318 mddev
->queue
->backing_dev_info
.ra_pages
= 2* stripe
;
2321 if (conf
->near_copies
< conf
->raid_disks
)
2322 blk_queue_merge_bvec(mddev
->queue
, raid10_mergeable_bvec
);
2324 if (md_integrity_register(mddev
))
2330 md_unregister_thread(mddev
->thread
);
2331 if (conf
->r10bio_pool
)
2332 mempool_destroy(conf
->r10bio_pool
);
2333 safe_put_page(conf
->tmppage
);
2334 kfree(conf
->mirrors
);
2336 mddev
->private = NULL
;
2341 static int stop(mddev_t
*mddev
)
2343 conf_t
*conf
= mddev
->private;
2345 raise_barrier(conf
, 0);
2346 lower_barrier(conf
);
2348 md_unregister_thread(mddev
->thread
);
2349 mddev
->thread
= NULL
;
2350 blk_sync_queue(mddev
->queue
); /* the unplug fn references 'conf'*/
2351 if (conf
->r10bio_pool
)
2352 mempool_destroy(conf
->r10bio_pool
);
2353 kfree(conf
->mirrors
);
2355 mddev
->private = NULL
;
2359 static void raid10_quiesce(mddev_t
*mddev
, int state
)
2361 conf_t
*conf
= mddev
->private;
2365 raise_barrier(conf
, 0);
2368 lower_barrier(conf
);
2373 static void *raid10_takeover_raid0(mddev_t
*mddev
)
2378 if (mddev
->degraded
> 0) {
2379 printk(KERN_ERR
"md/raid10:%s: Error: degraded raid0!\n",
2381 return ERR_PTR(-EINVAL
);
2384 /* Set new parameters */
2385 mddev
->new_level
= 10;
2386 /* new layout: far_copies = 1, near_copies = 2 */
2387 mddev
->new_layout
= (1<<8) + 2;
2388 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
2389 mddev
->delta_disks
= mddev
->raid_disks
;
2390 mddev
->raid_disks
*= 2;
2391 /* make sure it will be not marked as dirty */
2392 mddev
->recovery_cp
= MaxSector
;
2394 conf
= setup_conf(mddev
);
2395 if (!IS_ERR(conf
)) {
2396 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
2397 if (rdev
->raid_disk
>= 0)
2398 rdev
->new_raid_disk
= rdev
->raid_disk
* 2;
2405 static void *raid10_takeover(mddev_t
*mddev
)
2407 struct raid0_private_data
*raid0_priv
;
2409 /* raid10 can take over:
2410 * raid0 - providing it has only two drives
2412 if (mddev
->level
== 0) {
2413 /* for raid0 takeover only one zone is supported */
2414 raid0_priv
= mddev
->private;
2415 if (raid0_priv
->nr_strip_zones
> 1) {
2416 printk(KERN_ERR
"md/raid10:%s: cannot takeover raid 0"
2417 " with more than one zone.\n",
2419 return ERR_PTR(-EINVAL
);
2421 return raid10_takeover_raid0(mddev
);
2423 return ERR_PTR(-EINVAL
);
2426 static struct mdk_personality raid10_personality
=
2430 .owner
= THIS_MODULE
,
2431 .make_request
= make_request
,
2435 .error_handler
= error
,
2436 .hot_add_disk
= raid10_add_disk
,
2437 .hot_remove_disk
= raid10_remove_disk
,
2438 .spare_active
= raid10_spare_active
,
2439 .sync_request
= sync_request
,
2440 .quiesce
= raid10_quiesce
,
2441 .size
= raid10_size
,
2442 .takeover
= raid10_takeover
,
2445 static int __init
raid_init(void)
2447 return register_md_personality(&raid10_personality
);
2450 static void raid_exit(void)
2452 unregister_md_personality(&raid10_personality
);
2455 module_init(raid_init
);
2456 module_exit(raid_exit
);
2457 MODULE_LICENSE("GPL");
2458 MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
2459 MODULE_ALIAS("md-personality-9"); /* RAID10 */
2460 MODULE_ALIAS("md-raid10");
2461 MODULE_ALIAS("md-level-10");