2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
5 * Copyright (C) 2002, 2003 H. Peter Anvin
7 * RAID-4/5/6 management functions.
8 * Thanks to Penguin Computing for making the RAID-6 development possible
9 * by donating a test server!
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 * The sequencing for updating the bitmap reliably is a little
25 * subtle (and I got it wrong the first time) so it deserves some
28 * We group bitmap updates into batches. Each batch has a number.
29 * We may write out several batches at once, but that isn't very important.
30 * conf->bm_write is the number of the last batch successfully written.
31 * conf->bm_flush is the number of the last batch that was closed to
33 * When we discover that we will need to write to any block in a stripe
34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
35 * the number of the batch it will be in. This is bm_flush+1.
36 * When we are ready to do a write, if that batch hasn't been written yet,
37 * we plug the array and queue the stripe for later.
38 * When an unplug happens, we increment bm_flush, thus closing the current
40 * When we notice that bm_flush > bm_write, we write out all pending updates
41 * to the bitmap, and advance bm_write to where bm_flush was.
42 * This may occasionally write a bit out twice, but is sure never to
46 #include <linux/module.h>
47 #include <linux/slab.h>
48 #include <linux/highmem.h>
49 #include <linux/bitops.h>
50 #include <linux/kthread.h>
51 #include <asm/atomic.h>
54 #include <linux/raid/bitmap.h>
55 #include <linux/async_tx.h>
61 #define NR_STRIPES 256
62 #define STRIPE_SIZE PAGE_SIZE
63 #define STRIPE_SHIFT (PAGE_SHIFT - 9)
64 #define STRIPE_SECTORS (STRIPE_SIZE>>9)
65 #define IO_THRESHOLD 1
66 #define BYPASS_THRESHOLD 1
67 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
68 #define HASH_MASK (NR_HASH - 1)
70 #define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
72 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
73 * order without overlap. There may be several bio's per stripe+device, and
74 * a bio could span several devices.
75 * When walking this list for a particular stripe+device, we must never proceed
76 * beyond a bio that extends past this device, as the next bio might no longer
78 * This macro is used to determine the 'next' bio in the list, given the sector
79 * of the current stripe+device
81 #define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
83 * The following can be used to debug the driver
85 #define RAID5_PARANOIA 1
86 #if RAID5_PARANOIA && defined(CONFIG_SMP)
87 # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
89 # define CHECK_DEVLOCK()
97 #define printk_rl(args...) ((void) (printk_ratelimit() && printk(args)))
99 #if !RAID6_USE_EMPTY_ZERO_PAGE
100 /* In .bss so it's zeroed */
101 const char raid6_empty_zero_page
[PAGE_SIZE
] __attribute__((aligned(256)));
104 static inline int raid6_next_disk(int disk
, int raid_disks
)
107 return (disk
< raid_disks
) ? disk
: 0;
110 static void return_io(struct bio
*return_bi
)
112 struct bio
*bi
= return_bi
;
115 return_bi
= bi
->bi_next
;
119 test_bit(BIO_UPTODATE
, &bi
->bi_flags
)
125 static void print_raid5_conf (raid5_conf_t
*conf
);
127 static void __release_stripe(raid5_conf_t
*conf
, struct stripe_head
*sh
)
129 if (atomic_dec_and_test(&sh
->count
)) {
130 BUG_ON(!list_empty(&sh
->lru
));
131 BUG_ON(atomic_read(&conf
->active_stripes
)==0);
132 if (test_bit(STRIPE_HANDLE
, &sh
->state
)) {
133 if (test_bit(STRIPE_DELAYED
, &sh
->state
)) {
134 list_add_tail(&sh
->lru
, &conf
->delayed_list
);
135 blk_plug_device(conf
->mddev
->queue
);
136 } else if (test_bit(STRIPE_BIT_DELAY
, &sh
->state
) &&
137 sh
->bm_seq
- conf
->seq_write
> 0) {
138 list_add_tail(&sh
->lru
, &conf
->bitmap_list
);
139 blk_plug_device(conf
->mddev
->queue
);
141 clear_bit(STRIPE_BIT_DELAY
, &sh
->state
);
142 list_add_tail(&sh
->lru
, &conf
->handle_list
);
144 md_wakeup_thread(conf
->mddev
->thread
);
146 BUG_ON(sh
->ops
.pending
);
147 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
148 atomic_dec(&conf
->preread_active_stripes
);
149 if (atomic_read(&conf
->preread_active_stripes
) < IO_THRESHOLD
)
150 md_wakeup_thread(conf
->mddev
->thread
);
152 atomic_dec(&conf
->active_stripes
);
153 if (!test_bit(STRIPE_EXPANDING
, &sh
->state
)) {
154 list_add_tail(&sh
->lru
, &conf
->inactive_list
);
155 wake_up(&conf
->wait_for_stripe
);
156 if (conf
->retry_read_aligned
)
157 md_wakeup_thread(conf
->mddev
->thread
);
162 static void release_stripe(struct stripe_head
*sh
)
164 raid5_conf_t
*conf
= sh
->raid_conf
;
167 spin_lock_irqsave(&conf
->device_lock
, flags
);
168 __release_stripe(conf
, sh
);
169 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
172 static inline void remove_hash(struct stripe_head
*sh
)
174 pr_debug("remove_hash(), stripe %llu\n",
175 (unsigned long long)sh
->sector
);
177 hlist_del_init(&sh
->hash
);
180 static inline void insert_hash(raid5_conf_t
*conf
, struct stripe_head
*sh
)
182 struct hlist_head
*hp
= stripe_hash(conf
, sh
->sector
);
184 pr_debug("insert_hash(), stripe %llu\n",
185 (unsigned long long)sh
->sector
);
188 hlist_add_head(&sh
->hash
, hp
);
192 /* find an idle stripe, make sure it is unhashed, and return it. */
193 static struct stripe_head
*get_free_stripe(raid5_conf_t
*conf
)
195 struct stripe_head
*sh
= NULL
;
196 struct list_head
*first
;
199 if (list_empty(&conf
->inactive_list
))
201 first
= conf
->inactive_list
.next
;
202 sh
= list_entry(first
, struct stripe_head
, lru
);
203 list_del_init(first
);
205 atomic_inc(&conf
->active_stripes
);
210 static void shrink_buffers(struct stripe_head
*sh
, int num
)
215 for (i
=0; i
<num
; i
++) {
219 sh
->dev
[i
].page
= NULL
;
224 static int grow_buffers(struct stripe_head
*sh
, int num
)
228 for (i
=0; i
<num
; i
++) {
231 if (!(page
= alloc_page(GFP_KERNEL
))) {
234 sh
->dev
[i
].page
= page
;
239 static void raid5_build_block (struct stripe_head
*sh
, int i
);
241 static void init_stripe(struct stripe_head
*sh
, sector_t sector
, int pd_idx
, int disks
)
243 raid5_conf_t
*conf
= sh
->raid_conf
;
246 BUG_ON(atomic_read(&sh
->count
) != 0);
247 BUG_ON(test_bit(STRIPE_HANDLE
, &sh
->state
));
248 BUG_ON(sh
->ops
.pending
|| sh
->ops
.ack
|| sh
->ops
.complete
);
251 pr_debug("init_stripe called, stripe %llu\n",
252 (unsigned long long)sh
->sector
);
262 for (i
= sh
->disks
; i
--; ) {
263 struct r5dev
*dev
= &sh
->dev
[i
];
265 if (dev
->toread
|| dev
->read
|| dev
->towrite
|| dev
->written
||
266 test_bit(R5_LOCKED
, &dev
->flags
)) {
267 printk(KERN_ERR
"sector=%llx i=%d %p %p %p %p %d\n",
268 (unsigned long long)sh
->sector
, i
, dev
->toread
,
269 dev
->read
, dev
->towrite
, dev
->written
,
270 test_bit(R5_LOCKED
, &dev
->flags
));
274 raid5_build_block(sh
, i
);
276 insert_hash(conf
, sh
);
279 static struct stripe_head
*__find_stripe(raid5_conf_t
*conf
, sector_t sector
, int disks
)
281 struct stripe_head
*sh
;
282 struct hlist_node
*hn
;
285 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector
);
286 hlist_for_each_entry(sh
, hn
, stripe_hash(conf
, sector
), hash
)
287 if (sh
->sector
== sector
&& sh
->disks
== disks
)
289 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector
);
293 static void unplug_slaves(mddev_t
*mddev
);
294 static void raid5_unplug_device(struct request_queue
*q
);
296 static struct stripe_head
*get_active_stripe(raid5_conf_t
*conf
, sector_t sector
, int disks
,
297 int pd_idx
, int noblock
)
299 struct stripe_head
*sh
;
301 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector
);
303 spin_lock_irq(&conf
->device_lock
);
306 wait_event_lock_irq(conf
->wait_for_stripe
,
308 conf
->device_lock
, /* nothing */);
309 sh
= __find_stripe(conf
, sector
, disks
);
311 if (!conf
->inactive_blocked
)
312 sh
= get_free_stripe(conf
);
313 if (noblock
&& sh
== NULL
)
316 conf
->inactive_blocked
= 1;
317 wait_event_lock_irq(conf
->wait_for_stripe
,
318 !list_empty(&conf
->inactive_list
) &&
319 (atomic_read(&conf
->active_stripes
)
320 < (conf
->max_nr_stripes
*3/4)
321 || !conf
->inactive_blocked
),
323 raid5_unplug_device(conf
->mddev
->queue
)
325 conf
->inactive_blocked
= 0;
327 init_stripe(sh
, sector
, pd_idx
, disks
);
329 if (atomic_read(&sh
->count
)) {
330 BUG_ON(!list_empty(&sh
->lru
));
332 if (!test_bit(STRIPE_HANDLE
, &sh
->state
))
333 atomic_inc(&conf
->active_stripes
);
334 if (list_empty(&sh
->lru
) &&
335 !test_bit(STRIPE_EXPANDING
, &sh
->state
))
337 list_del_init(&sh
->lru
);
340 } while (sh
== NULL
);
343 atomic_inc(&sh
->count
);
345 spin_unlock_irq(&conf
->device_lock
);
349 /* test_and_ack_op() ensures that we only dequeue an operation once */
350 #define test_and_ack_op(op, pend) \
352 if (test_bit(op, &sh->ops.pending) && \
353 !test_bit(op, &sh->ops.complete)) { \
354 if (test_and_set_bit(op, &sh->ops.ack)) \
355 clear_bit(op, &pend); \
359 clear_bit(op, &pend); \
362 /* find new work to run, do not resubmit work that is already
365 static unsigned long get_stripe_work(struct stripe_head
*sh
)
367 unsigned long pending
;
370 pending
= sh
->ops
.pending
;
372 test_and_ack_op(STRIPE_OP_BIOFILL
, pending
);
373 test_and_ack_op(STRIPE_OP_COMPUTE_BLK
, pending
);
374 test_and_ack_op(STRIPE_OP_PREXOR
, pending
);
375 test_and_ack_op(STRIPE_OP_BIODRAIN
, pending
);
376 test_and_ack_op(STRIPE_OP_POSTXOR
, pending
);
377 test_and_ack_op(STRIPE_OP_CHECK
, pending
);
378 if (test_and_clear_bit(STRIPE_OP_IO
, &sh
->ops
.pending
))
381 sh
->ops
.count
-= ack
;
382 if (unlikely(sh
->ops
.count
< 0)) {
383 printk(KERN_ERR
"pending: %#lx ops.pending: %#lx ops.ack: %#lx "
384 "ops.complete: %#lx\n", pending
, sh
->ops
.pending
,
385 sh
->ops
.ack
, sh
->ops
.complete
);
393 raid5_end_read_request(struct bio
*bi
, int error
);
395 raid5_end_write_request(struct bio
*bi
, int error
);
397 static void ops_run_io(struct stripe_head
*sh
)
399 raid5_conf_t
*conf
= sh
->raid_conf
;
400 int i
, disks
= sh
->disks
;
404 set_bit(STRIPE_IO_STARTED
, &sh
->state
);
405 for (i
= disks
; i
--; ) {
409 if (test_and_clear_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
))
411 else if (test_and_clear_bit(R5_Wantread
, &sh
->dev
[i
].flags
))
416 bi
= &sh
->dev
[i
].req
;
420 bi
->bi_end_io
= raid5_end_write_request
;
422 bi
->bi_end_io
= raid5_end_read_request
;
425 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
426 if (rdev
&& test_bit(Faulty
, &rdev
->flags
))
429 atomic_inc(&rdev
->nr_pending
);
433 if (test_bit(STRIPE_SYNCING
, &sh
->state
) ||
434 test_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
) ||
435 test_bit(STRIPE_EXPAND_READY
, &sh
->state
))
436 md_sync_acct(rdev
->bdev
, STRIPE_SECTORS
);
438 bi
->bi_bdev
= rdev
->bdev
;
439 pr_debug("%s: for %llu schedule op %ld on disc %d\n",
440 __func__
, (unsigned long long)sh
->sector
,
442 atomic_inc(&sh
->count
);
443 bi
->bi_sector
= sh
->sector
+ rdev
->data_offset
;
444 bi
->bi_flags
= 1 << BIO_UPTODATE
;
448 bi
->bi_io_vec
= &sh
->dev
[i
].vec
;
449 bi
->bi_io_vec
[0].bv_len
= STRIPE_SIZE
;
450 bi
->bi_io_vec
[0].bv_offset
= 0;
451 bi
->bi_size
= STRIPE_SIZE
;
454 test_bit(R5_ReWrite
, &sh
->dev
[i
].flags
))
455 atomic_add(STRIPE_SECTORS
,
456 &rdev
->corrected_errors
);
457 generic_make_request(bi
);
460 set_bit(STRIPE_DEGRADED
, &sh
->state
);
461 pr_debug("skip op %ld on disc %d for sector %llu\n",
462 bi
->bi_rw
, i
, (unsigned long long)sh
->sector
);
463 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
464 set_bit(STRIPE_HANDLE
, &sh
->state
);
469 static struct dma_async_tx_descriptor
*
470 async_copy_data(int frombio
, struct bio
*bio
, struct page
*page
,
471 sector_t sector
, struct dma_async_tx_descriptor
*tx
)
474 struct page
*bio_page
;
478 if (bio
->bi_sector
>= sector
)
479 page_offset
= (signed)(bio
->bi_sector
- sector
) * 512;
481 page_offset
= (signed)(sector
- bio
->bi_sector
) * -512;
482 bio_for_each_segment(bvl
, bio
, i
) {
483 int len
= bio_iovec_idx(bio
, i
)->bv_len
;
487 if (page_offset
< 0) {
488 b_offset
= -page_offset
;
489 page_offset
+= b_offset
;
493 if (len
> 0 && page_offset
+ len
> STRIPE_SIZE
)
494 clen
= STRIPE_SIZE
- page_offset
;
499 b_offset
+= bio_iovec_idx(bio
, i
)->bv_offset
;
500 bio_page
= bio_iovec_idx(bio
, i
)->bv_page
;
502 tx
= async_memcpy(page
, bio_page
, page_offset
,
507 tx
= async_memcpy(bio_page
, page
, b_offset
,
512 if (clen
< len
) /* hit end of page */
520 static void ops_complete_biofill(void *stripe_head_ref
)
522 struct stripe_head
*sh
= stripe_head_ref
;
523 struct bio
*return_bi
= NULL
;
524 raid5_conf_t
*conf
= sh
->raid_conf
;
527 pr_debug("%s: stripe %llu\n", __func__
,
528 (unsigned long long)sh
->sector
);
530 /* clear completed biofills */
531 for (i
= sh
->disks
; i
--; ) {
532 struct r5dev
*dev
= &sh
->dev
[i
];
534 /* acknowledge completion of a biofill operation */
535 /* and check if we need to reply to a read request,
536 * new R5_Wantfill requests are held off until
537 * !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending)
539 if (test_and_clear_bit(R5_Wantfill
, &dev
->flags
)) {
540 struct bio
*rbi
, *rbi2
;
542 /* The access to dev->read is outside of the
543 * spin_lock_irq(&conf->device_lock), but is protected
544 * by the STRIPE_OP_BIOFILL pending bit
549 while (rbi
&& rbi
->bi_sector
<
550 dev
->sector
+ STRIPE_SECTORS
) {
551 rbi2
= r5_next_bio(rbi
, dev
->sector
);
552 spin_lock_irq(&conf
->device_lock
);
553 if (--rbi
->bi_phys_segments
== 0) {
554 rbi
->bi_next
= return_bi
;
557 spin_unlock_irq(&conf
->device_lock
);
562 set_bit(STRIPE_OP_BIOFILL
, &sh
->ops
.complete
);
564 return_io(return_bi
);
566 set_bit(STRIPE_HANDLE
, &sh
->state
);
570 static void ops_run_biofill(struct stripe_head
*sh
)
572 struct dma_async_tx_descriptor
*tx
= NULL
;
573 raid5_conf_t
*conf
= sh
->raid_conf
;
576 pr_debug("%s: stripe %llu\n", __func__
,
577 (unsigned long long)sh
->sector
);
579 for (i
= sh
->disks
; i
--; ) {
580 struct r5dev
*dev
= &sh
->dev
[i
];
581 if (test_bit(R5_Wantfill
, &dev
->flags
)) {
583 spin_lock_irq(&conf
->device_lock
);
584 dev
->read
= rbi
= dev
->toread
;
586 spin_unlock_irq(&conf
->device_lock
);
587 while (rbi
&& rbi
->bi_sector
<
588 dev
->sector
+ STRIPE_SECTORS
) {
589 tx
= async_copy_data(0, rbi
, dev
->page
,
591 rbi
= r5_next_bio(rbi
, dev
->sector
);
596 atomic_inc(&sh
->count
);
597 async_trigger_callback(ASYNC_TX_DEP_ACK
| ASYNC_TX_ACK
, tx
,
598 ops_complete_biofill
, sh
);
601 static void ops_complete_compute5(void *stripe_head_ref
)
603 struct stripe_head
*sh
= stripe_head_ref
;
604 int target
= sh
->ops
.target
;
605 struct r5dev
*tgt
= &sh
->dev
[target
];
607 pr_debug("%s: stripe %llu\n", __func__
,
608 (unsigned long long)sh
->sector
);
610 set_bit(R5_UPTODATE
, &tgt
->flags
);
611 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
612 clear_bit(R5_Wantcompute
, &tgt
->flags
);
613 set_bit(STRIPE_OP_COMPUTE_BLK
, &sh
->ops
.complete
);
614 set_bit(STRIPE_HANDLE
, &sh
->state
);
618 static struct dma_async_tx_descriptor
*
619 ops_run_compute5(struct stripe_head
*sh
, unsigned long pending
)
621 /* kernel stack size limits the total number of disks */
622 int disks
= sh
->disks
;
623 struct page
*xor_srcs
[disks
];
624 int target
= sh
->ops
.target
;
625 struct r5dev
*tgt
= &sh
->dev
[target
];
626 struct page
*xor_dest
= tgt
->page
;
628 struct dma_async_tx_descriptor
*tx
;
631 pr_debug("%s: stripe %llu block: %d\n",
632 __func__
, (unsigned long long)sh
->sector
, target
);
633 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
635 for (i
= disks
; i
--; )
637 xor_srcs
[count
++] = sh
->dev
[i
].page
;
639 atomic_inc(&sh
->count
);
641 if (unlikely(count
== 1))
642 tx
= async_memcpy(xor_dest
, xor_srcs
[0], 0, 0, STRIPE_SIZE
,
643 0, NULL
, ops_complete_compute5
, sh
);
645 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
,
646 ASYNC_TX_XOR_ZERO_DST
, NULL
,
647 ops_complete_compute5
, sh
);
649 /* ack now if postxor is not set to be run */
650 if (tx
&& !test_bit(STRIPE_OP_POSTXOR
, &pending
))
656 static void ops_complete_prexor(void *stripe_head_ref
)
658 struct stripe_head
*sh
= stripe_head_ref
;
660 pr_debug("%s: stripe %llu\n", __func__
,
661 (unsigned long long)sh
->sector
);
663 set_bit(STRIPE_OP_PREXOR
, &sh
->ops
.complete
);
666 static struct dma_async_tx_descriptor
*
667 ops_run_prexor(struct stripe_head
*sh
, struct dma_async_tx_descriptor
*tx
)
669 /* kernel stack size limits the total number of disks */
670 int disks
= sh
->disks
;
671 struct page
*xor_srcs
[disks
];
672 int count
= 0, pd_idx
= sh
->pd_idx
, i
;
674 /* existing parity data subtracted */
675 struct page
*xor_dest
= xor_srcs
[count
++] = sh
->dev
[pd_idx
].page
;
677 pr_debug("%s: stripe %llu\n", __func__
,
678 (unsigned long long)sh
->sector
);
680 for (i
= disks
; i
--; ) {
681 struct r5dev
*dev
= &sh
->dev
[i
];
682 /* Only process blocks that are known to be uptodate */
683 if (dev
->towrite
&& test_bit(R5_Wantprexor
, &dev
->flags
))
684 xor_srcs
[count
++] = dev
->page
;
687 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
,
688 ASYNC_TX_DEP_ACK
| ASYNC_TX_XOR_DROP_DST
, tx
,
689 ops_complete_prexor
, sh
);
694 static struct dma_async_tx_descriptor
*
695 ops_run_biodrain(struct stripe_head
*sh
, struct dma_async_tx_descriptor
*tx
,
696 unsigned long pending
)
698 int disks
= sh
->disks
;
699 int pd_idx
= sh
->pd_idx
, i
;
701 /* check if prexor is active which means only process blocks
702 * that are part of a read-modify-write (Wantprexor)
704 int prexor
= test_bit(STRIPE_OP_PREXOR
, &pending
);
706 pr_debug("%s: stripe %llu\n", __func__
,
707 (unsigned long long)sh
->sector
);
709 for (i
= disks
; i
--; ) {
710 struct r5dev
*dev
= &sh
->dev
[i
];
715 if (prexor
) { /* rmw */
717 test_bit(R5_Wantprexor
, &dev
->flags
))
720 if (i
!= pd_idx
&& dev
->towrite
&&
721 test_bit(R5_LOCKED
, &dev
->flags
))
728 spin_lock(&sh
->lock
);
729 chosen
= dev
->towrite
;
731 BUG_ON(dev
->written
);
732 wbi
= dev
->written
= chosen
;
733 spin_unlock(&sh
->lock
);
735 while (wbi
&& wbi
->bi_sector
<
736 dev
->sector
+ STRIPE_SECTORS
) {
737 tx
= async_copy_data(1, wbi
, dev
->page
,
739 wbi
= r5_next_bio(wbi
, dev
->sector
);
747 static void ops_complete_postxor(void *stripe_head_ref
)
749 struct stripe_head
*sh
= stripe_head_ref
;
751 pr_debug("%s: stripe %llu\n", __func__
,
752 (unsigned long long)sh
->sector
);
754 set_bit(STRIPE_OP_POSTXOR
, &sh
->ops
.complete
);
755 set_bit(STRIPE_HANDLE
, &sh
->state
);
759 static void ops_complete_write(void *stripe_head_ref
)
761 struct stripe_head
*sh
= stripe_head_ref
;
762 int disks
= sh
->disks
, i
, pd_idx
= sh
->pd_idx
;
764 pr_debug("%s: stripe %llu\n", __func__
,
765 (unsigned long long)sh
->sector
);
767 for (i
= disks
; i
--; ) {
768 struct r5dev
*dev
= &sh
->dev
[i
];
769 if (dev
->written
|| i
== pd_idx
)
770 set_bit(R5_UPTODATE
, &dev
->flags
);
773 set_bit(STRIPE_OP_BIODRAIN
, &sh
->ops
.complete
);
774 set_bit(STRIPE_OP_POSTXOR
, &sh
->ops
.complete
);
776 set_bit(STRIPE_HANDLE
, &sh
->state
);
781 ops_run_postxor(struct stripe_head
*sh
, struct dma_async_tx_descriptor
*tx
,
782 unsigned long pending
)
784 /* kernel stack size limits the total number of disks */
785 int disks
= sh
->disks
;
786 struct page
*xor_srcs
[disks
];
788 int count
= 0, pd_idx
= sh
->pd_idx
, i
;
789 struct page
*xor_dest
;
790 int prexor
= test_bit(STRIPE_OP_PREXOR
, &pending
);
792 dma_async_tx_callback callback
;
794 pr_debug("%s: stripe %llu\n", __func__
,
795 (unsigned long long)sh
->sector
);
797 /* check if prexor is active which means only process blocks
798 * that are part of a read-modify-write (written)
801 xor_dest
= xor_srcs
[count
++] = sh
->dev
[pd_idx
].page
;
802 for (i
= disks
; i
--; ) {
803 struct r5dev
*dev
= &sh
->dev
[i
];
805 xor_srcs
[count
++] = dev
->page
;
808 xor_dest
= sh
->dev
[pd_idx
].page
;
809 for (i
= disks
; i
--; ) {
810 struct r5dev
*dev
= &sh
->dev
[i
];
812 xor_srcs
[count
++] = dev
->page
;
816 /* check whether this postxor is part of a write */
817 callback
= test_bit(STRIPE_OP_BIODRAIN
, &pending
) ?
818 ops_complete_write
: ops_complete_postxor
;
820 /* 1/ if we prexor'd then the dest is reused as a source
821 * 2/ if we did not prexor then we are redoing the parity
822 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
823 * for the synchronous xor case
825 flags
= ASYNC_TX_DEP_ACK
| ASYNC_TX_ACK
|
826 (prexor
? ASYNC_TX_XOR_DROP_DST
: ASYNC_TX_XOR_ZERO_DST
);
828 atomic_inc(&sh
->count
);
830 if (unlikely(count
== 1)) {
831 flags
&= ~(ASYNC_TX_XOR_DROP_DST
| ASYNC_TX_XOR_ZERO_DST
);
832 tx
= async_memcpy(xor_dest
, xor_srcs
[0], 0, 0, STRIPE_SIZE
,
833 flags
, tx
, callback
, sh
);
835 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
,
836 flags
, tx
, callback
, sh
);
839 static void ops_complete_check(void *stripe_head_ref
)
841 struct stripe_head
*sh
= stripe_head_ref
;
842 int pd_idx
= sh
->pd_idx
;
844 pr_debug("%s: stripe %llu\n", __func__
,
845 (unsigned long long)sh
->sector
);
847 if (test_and_clear_bit(STRIPE_OP_MOD_DMA_CHECK
, &sh
->ops
.pending
) &&
848 sh
->ops
.zero_sum_result
== 0)
849 set_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
);
851 set_bit(STRIPE_OP_CHECK
, &sh
->ops
.complete
);
852 set_bit(STRIPE_HANDLE
, &sh
->state
);
856 static void ops_run_check(struct stripe_head
*sh
)
858 /* kernel stack size limits the total number of disks */
859 int disks
= sh
->disks
;
860 struct page
*xor_srcs
[disks
];
861 struct dma_async_tx_descriptor
*tx
;
863 int count
= 0, pd_idx
= sh
->pd_idx
, i
;
864 struct page
*xor_dest
= xor_srcs
[count
++] = sh
->dev
[pd_idx
].page
;
866 pr_debug("%s: stripe %llu\n", __func__
,
867 (unsigned long long)sh
->sector
);
869 for (i
= disks
; i
--; ) {
870 struct r5dev
*dev
= &sh
->dev
[i
];
872 xor_srcs
[count
++] = dev
->page
;
875 tx
= async_xor_zero_sum(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
,
876 &sh
->ops
.zero_sum_result
, 0, NULL
, NULL
, NULL
);
879 set_bit(STRIPE_OP_MOD_DMA_CHECK
, &sh
->ops
.pending
);
881 clear_bit(STRIPE_OP_MOD_DMA_CHECK
, &sh
->ops
.pending
);
883 atomic_inc(&sh
->count
);
884 tx
= async_trigger_callback(ASYNC_TX_DEP_ACK
| ASYNC_TX_ACK
, tx
,
885 ops_complete_check
, sh
);
888 static void raid5_run_ops(struct stripe_head
*sh
, unsigned long pending
)
890 int overlap_clear
= 0, i
, disks
= sh
->disks
;
891 struct dma_async_tx_descriptor
*tx
= NULL
;
893 if (test_bit(STRIPE_OP_BIOFILL
, &pending
)) {
898 if (test_bit(STRIPE_OP_COMPUTE_BLK
, &pending
))
899 tx
= ops_run_compute5(sh
, pending
);
901 if (test_bit(STRIPE_OP_PREXOR
, &pending
))
902 tx
= ops_run_prexor(sh
, tx
);
904 if (test_bit(STRIPE_OP_BIODRAIN
, &pending
)) {
905 tx
= ops_run_biodrain(sh
, tx
, pending
);
909 if (test_bit(STRIPE_OP_POSTXOR
, &pending
))
910 ops_run_postxor(sh
, tx
, pending
);
912 if (test_bit(STRIPE_OP_CHECK
, &pending
))
915 if (test_bit(STRIPE_OP_IO
, &pending
))
919 for (i
= disks
; i
--; ) {
920 struct r5dev
*dev
= &sh
->dev
[i
];
921 if (test_and_clear_bit(R5_Overlap
, &dev
->flags
))
922 wake_up(&sh
->raid_conf
->wait_for_overlap
);
926 static int grow_one_stripe(raid5_conf_t
*conf
)
928 struct stripe_head
*sh
;
929 sh
= kmem_cache_alloc(conf
->slab_cache
, GFP_KERNEL
);
932 memset(sh
, 0, sizeof(*sh
) + (conf
->raid_disks
-1)*sizeof(struct r5dev
));
933 sh
->raid_conf
= conf
;
934 spin_lock_init(&sh
->lock
);
936 if (grow_buffers(sh
, conf
->raid_disks
)) {
937 shrink_buffers(sh
, conf
->raid_disks
);
938 kmem_cache_free(conf
->slab_cache
, sh
);
941 sh
->disks
= conf
->raid_disks
;
942 /* we just created an active stripe so... */
943 atomic_set(&sh
->count
, 1);
944 atomic_inc(&conf
->active_stripes
);
945 INIT_LIST_HEAD(&sh
->lru
);
950 static int grow_stripes(raid5_conf_t
*conf
, int num
)
952 struct kmem_cache
*sc
;
953 int devs
= conf
->raid_disks
;
955 sprintf(conf
->cache_name
[0], "raid5-%s", mdname(conf
->mddev
));
956 sprintf(conf
->cache_name
[1], "raid5-%s-alt", mdname(conf
->mddev
));
957 conf
->active_name
= 0;
958 sc
= kmem_cache_create(conf
->cache_name
[conf
->active_name
],
959 sizeof(struct stripe_head
)+(devs
-1)*sizeof(struct r5dev
),
963 conf
->slab_cache
= sc
;
964 conf
->pool_size
= devs
;
966 if (!grow_one_stripe(conf
))
971 #ifdef CONFIG_MD_RAID5_RESHAPE
972 static int resize_stripes(raid5_conf_t
*conf
, int newsize
)
974 /* Make all the stripes able to hold 'newsize' devices.
975 * New slots in each stripe get 'page' set to a new page.
977 * This happens in stages:
978 * 1/ create a new kmem_cache and allocate the required number of
980 * 2/ gather all the old stripe_heads and tranfer the pages across
981 * to the new stripe_heads. This will have the side effect of
982 * freezing the array as once all stripe_heads have been collected,
983 * no IO will be possible. Old stripe heads are freed once their
984 * pages have been transferred over, and the old kmem_cache is
985 * freed when all stripes are done.
986 * 3/ reallocate conf->disks to be suitable bigger. If this fails,
987 * we simple return a failre status - no need to clean anything up.
988 * 4/ allocate new pages for the new slots in the new stripe_heads.
989 * If this fails, we don't bother trying the shrink the
990 * stripe_heads down again, we just leave them as they are.
991 * As each stripe_head is processed the new one is released into
994 * Once step2 is started, we cannot afford to wait for a write,
995 * so we use GFP_NOIO allocations.
997 struct stripe_head
*osh
, *nsh
;
998 LIST_HEAD(newstripes
);
999 struct disk_info
*ndisks
;
1001 struct kmem_cache
*sc
;
1004 if (newsize
<= conf
->pool_size
)
1005 return 0; /* never bother to shrink */
1007 md_allow_write(conf
->mddev
);
1010 sc
= kmem_cache_create(conf
->cache_name
[1-conf
->active_name
],
1011 sizeof(struct stripe_head
)+(newsize
-1)*sizeof(struct r5dev
),
1016 for (i
= conf
->max_nr_stripes
; i
; i
--) {
1017 nsh
= kmem_cache_alloc(sc
, GFP_KERNEL
);
1021 memset(nsh
, 0, sizeof(*nsh
) + (newsize
-1)*sizeof(struct r5dev
));
1023 nsh
->raid_conf
= conf
;
1024 spin_lock_init(&nsh
->lock
);
1026 list_add(&nsh
->lru
, &newstripes
);
1029 /* didn't get enough, give up */
1030 while (!list_empty(&newstripes
)) {
1031 nsh
= list_entry(newstripes
.next
, struct stripe_head
, lru
);
1032 list_del(&nsh
->lru
);
1033 kmem_cache_free(sc
, nsh
);
1035 kmem_cache_destroy(sc
);
1038 /* Step 2 - Must use GFP_NOIO now.
1039 * OK, we have enough stripes, start collecting inactive
1040 * stripes and copying them over
1042 list_for_each_entry(nsh
, &newstripes
, lru
) {
1043 spin_lock_irq(&conf
->device_lock
);
1044 wait_event_lock_irq(conf
->wait_for_stripe
,
1045 !list_empty(&conf
->inactive_list
),
1047 unplug_slaves(conf
->mddev
)
1049 osh
= get_free_stripe(conf
);
1050 spin_unlock_irq(&conf
->device_lock
);
1051 atomic_set(&nsh
->count
, 1);
1052 for(i
=0; i
<conf
->pool_size
; i
++)
1053 nsh
->dev
[i
].page
= osh
->dev
[i
].page
;
1054 for( ; i
<newsize
; i
++)
1055 nsh
->dev
[i
].page
= NULL
;
1056 kmem_cache_free(conf
->slab_cache
, osh
);
1058 kmem_cache_destroy(conf
->slab_cache
);
1061 * At this point, we are holding all the stripes so the array
1062 * is completely stalled, so now is a good time to resize
1065 ndisks
= kzalloc(newsize
* sizeof(struct disk_info
), GFP_NOIO
);
1067 for (i
=0; i
<conf
->raid_disks
; i
++)
1068 ndisks
[i
] = conf
->disks
[i
];
1070 conf
->disks
= ndisks
;
1074 /* Step 4, return new stripes to service */
1075 while(!list_empty(&newstripes
)) {
1076 nsh
= list_entry(newstripes
.next
, struct stripe_head
, lru
);
1077 list_del_init(&nsh
->lru
);
1078 for (i
=conf
->raid_disks
; i
< newsize
; i
++)
1079 if (nsh
->dev
[i
].page
== NULL
) {
1080 struct page
*p
= alloc_page(GFP_NOIO
);
1081 nsh
->dev
[i
].page
= p
;
1085 release_stripe(nsh
);
1087 /* critical section pass, GFP_NOIO no longer needed */
1089 conf
->slab_cache
= sc
;
1090 conf
->active_name
= 1-conf
->active_name
;
1091 conf
->pool_size
= newsize
;
1096 static int drop_one_stripe(raid5_conf_t
*conf
)
1098 struct stripe_head
*sh
;
1100 spin_lock_irq(&conf
->device_lock
);
1101 sh
= get_free_stripe(conf
);
1102 spin_unlock_irq(&conf
->device_lock
);
1105 BUG_ON(atomic_read(&sh
->count
));
1106 shrink_buffers(sh
, conf
->pool_size
);
1107 kmem_cache_free(conf
->slab_cache
, sh
);
1108 atomic_dec(&conf
->active_stripes
);
1112 static void shrink_stripes(raid5_conf_t
*conf
)
1114 while (drop_one_stripe(conf
))
1117 if (conf
->slab_cache
)
1118 kmem_cache_destroy(conf
->slab_cache
);
1119 conf
->slab_cache
= NULL
;
1122 static void raid5_end_read_request(struct bio
* bi
, int error
)
1124 struct stripe_head
*sh
= bi
->bi_private
;
1125 raid5_conf_t
*conf
= sh
->raid_conf
;
1126 int disks
= sh
->disks
, i
;
1127 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
1128 char b
[BDEVNAME_SIZE
];
1132 for (i
=0 ; i
<disks
; i
++)
1133 if (bi
== &sh
->dev
[i
].req
)
1136 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
1137 (unsigned long long)sh
->sector
, i
, atomic_read(&sh
->count
),
1145 set_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
);
1146 if (test_bit(R5_ReadError
, &sh
->dev
[i
].flags
)) {
1147 rdev
= conf
->disks
[i
].rdev
;
1148 printk_rl(KERN_INFO
"raid5:%s: read error corrected"
1149 " (%lu sectors at %llu on %s)\n",
1150 mdname(conf
->mddev
), STRIPE_SECTORS
,
1151 (unsigned long long)(sh
->sector
1152 + rdev
->data_offset
),
1153 bdevname(rdev
->bdev
, b
));
1154 clear_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
1155 clear_bit(R5_ReWrite
, &sh
->dev
[i
].flags
);
1157 if (atomic_read(&conf
->disks
[i
].rdev
->read_errors
))
1158 atomic_set(&conf
->disks
[i
].rdev
->read_errors
, 0);
1160 const char *bdn
= bdevname(conf
->disks
[i
].rdev
->bdev
, b
);
1162 rdev
= conf
->disks
[i
].rdev
;
1164 clear_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
);
1165 atomic_inc(&rdev
->read_errors
);
1166 if (conf
->mddev
->degraded
)
1167 printk_rl(KERN_WARNING
1168 "raid5:%s: read error not correctable "
1169 "(sector %llu on %s).\n",
1170 mdname(conf
->mddev
),
1171 (unsigned long long)(sh
->sector
1172 + rdev
->data_offset
),
1174 else if (test_bit(R5_ReWrite
, &sh
->dev
[i
].flags
))
1176 printk_rl(KERN_WARNING
1177 "raid5:%s: read error NOT corrected!! "
1178 "(sector %llu on %s).\n",
1179 mdname(conf
->mddev
),
1180 (unsigned long long)(sh
->sector
1181 + rdev
->data_offset
),
1183 else if (atomic_read(&rdev
->read_errors
)
1184 > conf
->max_nr_stripes
)
1186 "raid5:%s: Too many read errors, failing device %s.\n",
1187 mdname(conf
->mddev
), bdn
);
1191 set_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
1193 clear_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
1194 clear_bit(R5_ReWrite
, &sh
->dev
[i
].flags
);
1195 md_error(conf
->mddev
, rdev
);
1198 rdev_dec_pending(conf
->disks
[i
].rdev
, conf
->mddev
);
1199 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
1200 set_bit(STRIPE_HANDLE
, &sh
->state
);
1204 static void raid5_end_write_request (struct bio
*bi
, int error
)
1206 struct stripe_head
*sh
= bi
->bi_private
;
1207 raid5_conf_t
*conf
= sh
->raid_conf
;
1208 int disks
= sh
->disks
, i
;
1209 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
1211 for (i
=0 ; i
<disks
; i
++)
1212 if (bi
== &sh
->dev
[i
].req
)
1215 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
1216 (unsigned long long)sh
->sector
, i
, atomic_read(&sh
->count
),
1224 md_error(conf
->mddev
, conf
->disks
[i
].rdev
);
1226 rdev_dec_pending(conf
->disks
[i
].rdev
, conf
->mddev
);
1228 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
1229 set_bit(STRIPE_HANDLE
, &sh
->state
);
1234 static sector_t
compute_blocknr(struct stripe_head
*sh
, int i
);
1236 static void raid5_build_block (struct stripe_head
*sh
, int i
)
1238 struct r5dev
*dev
= &sh
->dev
[i
];
1240 bio_init(&dev
->req
);
1241 dev
->req
.bi_io_vec
= &dev
->vec
;
1243 dev
->req
.bi_max_vecs
++;
1244 dev
->vec
.bv_page
= dev
->page
;
1245 dev
->vec
.bv_len
= STRIPE_SIZE
;
1246 dev
->vec
.bv_offset
= 0;
1248 dev
->req
.bi_sector
= sh
->sector
;
1249 dev
->req
.bi_private
= sh
;
1252 dev
->sector
= compute_blocknr(sh
, i
);
1255 static void error(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1257 char b
[BDEVNAME_SIZE
];
1258 raid5_conf_t
*conf
= (raid5_conf_t
*) mddev
->private;
1259 pr_debug("raid5: error called\n");
1261 if (!test_bit(Faulty
, &rdev
->flags
)) {
1262 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
1263 if (test_and_clear_bit(In_sync
, &rdev
->flags
)) {
1264 unsigned long flags
;
1265 spin_lock_irqsave(&conf
->device_lock
, flags
);
1267 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1269 * if recovery was running, make sure it aborts.
1271 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
1273 set_bit(Faulty
, &rdev
->flags
);
1275 "raid5: Disk failure on %s, disabling device.\n"
1276 "raid5: Operation continuing on %d devices.\n",
1277 bdevname(rdev
->bdev
,b
), conf
->raid_disks
- mddev
->degraded
);
1282 * Input: a 'big' sector number,
1283 * Output: index of the data and parity disk, and the sector # in them.
1285 static sector_t
raid5_compute_sector(sector_t r_sector
, unsigned int raid_disks
,
1286 unsigned int data_disks
, unsigned int * dd_idx
,
1287 unsigned int * pd_idx
, raid5_conf_t
*conf
)
1290 unsigned long chunk_number
;
1291 unsigned int chunk_offset
;
1292 sector_t new_sector
;
1293 int sectors_per_chunk
= conf
->chunk_size
>> 9;
1295 /* First compute the information on this sector */
1298 * Compute the chunk number and the sector offset inside the chunk
1300 chunk_offset
= sector_div(r_sector
, sectors_per_chunk
);
1301 chunk_number
= r_sector
;
1302 BUG_ON(r_sector
!= chunk_number
);
1305 * Compute the stripe number
1307 stripe
= chunk_number
/ data_disks
;
1310 * Compute the data disk and parity disk indexes inside the stripe
1312 *dd_idx
= chunk_number
% data_disks
;
1315 * Select the parity disk based on the user selected algorithm.
1317 switch(conf
->level
) {
1319 *pd_idx
= data_disks
;
1322 switch (conf
->algorithm
) {
1323 case ALGORITHM_LEFT_ASYMMETRIC
:
1324 *pd_idx
= data_disks
- stripe
% raid_disks
;
1325 if (*dd_idx
>= *pd_idx
)
1328 case ALGORITHM_RIGHT_ASYMMETRIC
:
1329 *pd_idx
= stripe
% raid_disks
;
1330 if (*dd_idx
>= *pd_idx
)
1333 case ALGORITHM_LEFT_SYMMETRIC
:
1334 *pd_idx
= data_disks
- stripe
% raid_disks
;
1335 *dd_idx
= (*pd_idx
+ 1 + *dd_idx
) % raid_disks
;
1337 case ALGORITHM_RIGHT_SYMMETRIC
:
1338 *pd_idx
= stripe
% raid_disks
;
1339 *dd_idx
= (*pd_idx
+ 1 + *dd_idx
) % raid_disks
;
1342 printk(KERN_ERR
"raid5: unsupported algorithm %d\n",
1348 /**** FIX THIS ****/
1349 switch (conf
->algorithm
) {
1350 case ALGORITHM_LEFT_ASYMMETRIC
:
1351 *pd_idx
= raid_disks
- 1 - (stripe
% raid_disks
);
1352 if (*pd_idx
== raid_disks
-1)
1353 (*dd_idx
)++; /* Q D D D P */
1354 else if (*dd_idx
>= *pd_idx
)
1355 (*dd_idx
) += 2; /* D D P Q D */
1357 case ALGORITHM_RIGHT_ASYMMETRIC
:
1358 *pd_idx
= stripe
% raid_disks
;
1359 if (*pd_idx
== raid_disks
-1)
1360 (*dd_idx
)++; /* Q D D D P */
1361 else if (*dd_idx
>= *pd_idx
)
1362 (*dd_idx
) += 2; /* D D P Q D */
1364 case ALGORITHM_LEFT_SYMMETRIC
:
1365 *pd_idx
= raid_disks
- 1 - (stripe
% raid_disks
);
1366 *dd_idx
= (*pd_idx
+ 2 + *dd_idx
) % raid_disks
;
1368 case ALGORITHM_RIGHT_SYMMETRIC
:
1369 *pd_idx
= stripe
% raid_disks
;
1370 *dd_idx
= (*pd_idx
+ 2 + *dd_idx
) % raid_disks
;
1373 printk (KERN_CRIT
"raid6: unsupported algorithm %d\n",
1380 * Finally, compute the new sector number
1382 new_sector
= (sector_t
)stripe
* sectors_per_chunk
+ chunk_offset
;
1387 static sector_t
compute_blocknr(struct stripe_head
*sh
, int i
)
1389 raid5_conf_t
*conf
= sh
->raid_conf
;
1390 int raid_disks
= sh
->disks
;
1391 int data_disks
= raid_disks
- conf
->max_degraded
;
1392 sector_t new_sector
= sh
->sector
, check
;
1393 int sectors_per_chunk
= conf
->chunk_size
>> 9;
1396 int chunk_number
, dummy1
, dummy2
, dd_idx
= i
;
1400 chunk_offset
= sector_div(new_sector
, sectors_per_chunk
);
1401 stripe
= new_sector
;
1402 BUG_ON(new_sector
!= stripe
);
1404 if (i
== sh
->pd_idx
)
1406 switch(conf
->level
) {
1409 switch (conf
->algorithm
) {
1410 case ALGORITHM_LEFT_ASYMMETRIC
:
1411 case ALGORITHM_RIGHT_ASYMMETRIC
:
1415 case ALGORITHM_LEFT_SYMMETRIC
:
1416 case ALGORITHM_RIGHT_SYMMETRIC
:
1419 i
-= (sh
->pd_idx
+ 1);
1422 printk(KERN_ERR
"raid5: unsupported algorithm %d\n",
1427 if (i
== raid6_next_disk(sh
->pd_idx
, raid_disks
))
1428 return 0; /* It is the Q disk */
1429 switch (conf
->algorithm
) {
1430 case ALGORITHM_LEFT_ASYMMETRIC
:
1431 case ALGORITHM_RIGHT_ASYMMETRIC
:
1432 if (sh
->pd_idx
== raid_disks
-1)
1433 i
--; /* Q D D D P */
1434 else if (i
> sh
->pd_idx
)
1435 i
-= 2; /* D D P Q D */
1437 case ALGORITHM_LEFT_SYMMETRIC
:
1438 case ALGORITHM_RIGHT_SYMMETRIC
:
1439 if (sh
->pd_idx
== raid_disks
-1)
1440 i
--; /* Q D D D P */
1445 i
-= (sh
->pd_idx
+ 2);
1449 printk (KERN_CRIT
"raid6: unsupported algorithm %d\n",
1455 chunk_number
= stripe
* data_disks
+ i
;
1456 r_sector
= (sector_t
)chunk_number
* sectors_per_chunk
+ chunk_offset
;
1458 check
= raid5_compute_sector (r_sector
, raid_disks
, data_disks
, &dummy1
, &dummy2
, conf
);
1459 if (check
!= sh
->sector
|| dummy1
!= dd_idx
|| dummy2
!= sh
->pd_idx
) {
1460 printk(KERN_ERR
"compute_blocknr: map not correct\n");
1469 * Copy data between a page in the stripe cache, and one or more bion
1470 * The page could align with the middle of the bio, or there could be
1471 * several bion, each with several bio_vecs, which cover part of the page
1472 * Multiple bion are linked together on bi_next. There may be extras
1473 * at the end of this list. We ignore them.
1475 static void copy_data(int frombio
, struct bio
*bio
,
1479 char *pa
= page_address(page
);
1480 struct bio_vec
*bvl
;
1484 if (bio
->bi_sector
>= sector
)
1485 page_offset
= (signed)(bio
->bi_sector
- sector
) * 512;
1487 page_offset
= (signed)(sector
- bio
->bi_sector
) * -512;
1488 bio_for_each_segment(bvl
, bio
, i
) {
1489 int len
= bio_iovec_idx(bio
,i
)->bv_len
;
1493 if (page_offset
< 0) {
1494 b_offset
= -page_offset
;
1495 page_offset
+= b_offset
;
1499 if (len
> 0 && page_offset
+ len
> STRIPE_SIZE
)
1500 clen
= STRIPE_SIZE
- page_offset
;
1504 char *ba
= __bio_kmap_atomic(bio
, i
, KM_USER0
);
1506 memcpy(pa
+page_offset
, ba
+b_offset
, clen
);
1508 memcpy(ba
+b_offset
, pa
+page_offset
, clen
);
1509 __bio_kunmap_atomic(ba
, KM_USER0
);
1511 if (clen
< len
) /* hit end of page */
1517 #define check_xor() do { \
1518 if (count == MAX_XOR_BLOCKS) { \
1519 xor_blocks(count, STRIPE_SIZE, dest, ptr);\
1524 static void compute_parity6(struct stripe_head
*sh
, int method
)
1526 raid6_conf_t
*conf
= sh
->raid_conf
;
1527 int i
, pd_idx
= sh
->pd_idx
, qd_idx
, d0_idx
, disks
= sh
->disks
, count
;
1529 /**** FIX THIS: This could be very bad if disks is close to 256 ****/
1532 qd_idx
= raid6_next_disk(pd_idx
, disks
);
1533 d0_idx
= raid6_next_disk(qd_idx
, disks
);
1535 pr_debug("compute_parity, stripe %llu, method %d\n",
1536 (unsigned long long)sh
->sector
, method
);
1539 case READ_MODIFY_WRITE
:
1540 BUG(); /* READ_MODIFY_WRITE N/A for RAID-6 */
1541 case RECONSTRUCT_WRITE
:
1542 for (i
= disks
; i
-- ;)
1543 if ( i
!= pd_idx
&& i
!= qd_idx
&& sh
->dev
[i
].towrite
) {
1544 chosen
= sh
->dev
[i
].towrite
;
1545 sh
->dev
[i
].towrite
= NULL
;
1547 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[i
].flags
))
1548 wake_up(&conf
->wait_for_overlap
);
1550 BUG_ON(sh
->dev
[i
].written
);
1551 sh
->dev
[i
].written
= chosen
;
1555 BUG(); /* Not implemented yet */
1558 for (i
= disks
; i
--;)
1559 if (sh
->dev
[i
].written
) {
1560 sector_t sector
= sh
->dev
[i
].sector
;
1561 struct bio
*wbi
= sh
->dev
[i
].written
;
1562 while (wbi
&& wbi
->bi_sector
< sector
+ STRIPE_SECTORS
) {
1563 copy_data(1, wbi
, sh
->dev
[i
].page
, sector
);
1564 wbi
= r5_next_bio(wbi
, sector
);
1567 set_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
1568 set_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
);
1572 // case RECONSTRUCT_WRITE:
1573 // case CHECK_PARITY:
1574 // case UPDATE_PARITY:
1575 /* Note that unlike RAID-5, the ordering of the disks matters greatly. */
1576 /* FIX: Is this ordering of drives even remotely optimal? */
1580 ptrs
[count
++] = page_address(sh
->dev
[i
].page
);
1581 if (count
<= disks
-2 && !test_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
))
1582 printk("block %d/%d not uptodate on parity calc\n", i
,count
);
1583 i
= raid6_next_disk(i
, disks
);
1584 } while ( i
!= d0_idx
);
1588 raid6_call
.gen_syndrome(disks
, STRIPE_SIZE
, ptrs
);
1591 case RECONSTRUCT_WRITE
:
1592 set_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
);
1593 set_bit(R5_UPTODATE
, &sh
->dev
[qd_idx
].flags
);
1594 set_bit(R5_LOCKED
, &sh
->dev
[pd_idx
].flags
);
1595 set_bit(R5_LOCKED
, &sh
->dev
[qd_idx
].flags
);
1598 set_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
);
1599 set_bit(R5_UPTODATE
, &sh
->dev
[qd_idx
].flags
);
1605 /* Compute one missing block */
1606 static void compute_block_1(struct stripe_head
*sh
, int dd_idx
, int nozero
)
1608 int i
, count
, disks
= sh
->disks
;
1609 void *ptr
[MAX_XOR_BLOCKS
], *dest
, *p
;
1610 int pd_idx
= sh
->pd_idx
;
1611 int qd_idx
= raid6_next_disk(pd_idx
, disks
);
1613 pr_debug("compute_block_1, stripe %llu, idx %d\n",
1614 (unsigned long long)sh
->sector
, dd_idx
);
1616 if ( dd_idx
== qd_idx
) {
1617 /* We're actually computing the Q drive */
1618 compute_parity6(sh
, UPDATE_PARITY
);
1620 dest
= page_address(sh
->dev
[dd_idx
].page
);
1621 if (!nozero
) memset(dest
, 0, STRIPE_SIZE
);
1623 for (i
= disks
; i
--; ) {
1624 if (i
== dd_idx
|| i
== qd_idx
)
1626 p
= page_address(sh
->dev
[i
].page
);
1627 if (test_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
))
1630 printk("compute_block() %d, stripe %llu, %d"
1631 " not present\n", dd_idx
,
1632 (unsigned long long)sh
->sector
, i
);
1637 xor_blocks(count
, STRIPE_SIZE
, dest
, ptr
);
1638 if (!nozero
) set_bit(R5_UPTODATE
, &sh
->dev
[dd_idx
].flags
);
1639 else clear_bit(R5_UPTODATE
, &sh
->dev
[dd_idx
].flags
);
1643 /* Compute two missing blocks */
1644 static void compute_block_2(struct stripe_head
*sh
, int dd_idx1
, int dd_idx2
)
1646 int i
, count
, disks
= sh
->disks
;
1647 int pd_idx
= sh
->pd_idx
;
1648 int qd_idx
= raid6_next_disk(pd_idx
, disks
);
1649 int d0_idx
= raid6_next_disk(qd_idx
, disks
);
1652 /* faila and failb are disk numbers relative to d0_idx */
1653 /* pd_idx become disks-2 and qd_idx become disks-1 */
1654 faila
= (dd_idx1
< d0_idx
) ? dd_idx1
+(disks
-d0_idx
) : dd_idx1
-d0_idx
;
1655 failb
= (dd_idx2
< d0_idx
) ? dd_idx2
+(disks
-d0_idx
) : dd_idx2
-d0_idx
;
1657 BUG_ON(faila
== failb
);
1658 if ( failb
< faila
) { int tmp
= faila
; faila
= failb
; failb
= tmp
; }
1660 pr_debug("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n",
1661 (unsigned long long)sh
->sector
, dd_idx1
, dd_idx2
, faila
, failb
);
1663 if ( failb
== disks
-1 ) {
1664 /* Q disk is one of the missing disks */
1665 if ( faila
== disks
-2 ) {
1666 /* Missing P+Q, just recompute */
1667 compute_parity6(sh
, UPDATE_PARITY
);
1670 /* We're missing D+Q; recompute D from P */
1671 compute_block_1(sh
, (dd_idx1
== qd_idx
) ? dd_idx2
: dd_idx1
, 0);
1672 compute_parity6(sh
, UPDATE_PARITY
); /* Is this necessary? */
1677 /* We're missing D+P or D+D; build pointer table */
1679 /**** FIX THIS: This could be very bad if disks is close to 256 ****/
1685 ptrs
[count
++] = page_address(sh
->dev
[i
].page
);
1686 i
= raid6_next_disk(i
, disks
);
1687 if (i
!= dd_idx1
&& i
!= dd_idx2
&&
1688 !test_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
))
1689 printk("compute_2 with missing block %d/%d\n", count
, i
);
1690 } while ( i
!= d0_idx
);
1692 if ( failb
== disks
-2 ) {
1693 /* We're missing D+P. */
1694 raid6_datap_recov(disks
, STRIPE_SIZE
, faila
, ptrs
);
1696 /* We're missing D+D. */
1697 raid6_2data_recov(disks
, STRIPE_SIZE
, faila
, failb
, ptrs
);
1700 /* Both the above update both missing blocks */
1701 set_bit(R5_UPTODATE
, &sh
->dev
[dd_idx1
].flags
);
1702 set_bit(R5_UPTODATE
, &sh
->dev
[dd_idx2
].flags
);
1707 handle_write_operations5(struct stripe_head
*sh
, int rcw
, int expand
)
1709 int i
, pd_idx
= sh
->pd_idx
, disks
= sh
->disks
;
1713 /* if we are not expanding this is a proper write request, and
1714 * there will be bios with new data to be drained into the
1718 set_bit(STRIPE_OP_BIODRAIN
, &sh
->ops
.pending
);
1722 set_bit(STRIPE_OP_POSTXOR
, &sh
->ops
.pending
);
1725 for (i
= disks
; i
--; ) {
1726 struct r5dev
*dev
= &sh
->dev
[i
];
1729 set_bit(R5_LOCKED
, &dev
->flags
);
1731 clear_bit(R5_UPTODATE
, &dev
->flags
);
1735 if (locked
+ 1 == disks
)
1736 if (!test_and_set_bit(STRIPE_FULL_WRITE
, &sh
->state
))
1737 atomic_inc(&sh
->raid_conf
->pending_full_writes
);
1739 BUG_ON(!(test_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
) ||
1740 test_bit(R5_Wantcompute
, &sh
->dev
[pd_idx
].flags
)));
1742 set_bit(STRIPE_OP_PREXOR
, &sh
->ops
.pending
);
1743 set_bit(STRIPE_OP_BIODRAIN
, &sh
->ops
.pending
);
1744 set_bit(STRIPE_OP_POSTXOR
, &sh
->ops
.pending
);
1748 for (i
= disks
; i
--; ) {
1749 struct r5dev
*dev
= &sh
->dev
[i
];
1753 /* For a read-modify write there may be blocks that are
1754 * locked for reading while others are ready to be
1755 * written so we distinguish these blocks by the
1759 (test_bit(R5_UPTODATE
, &dev
->flags
) ||
1760 test_bit(R5_Wantcompute
, &dev
->flags
))) {
1761 set_bit(R5_Wantprexor
, &dev
->flags
);
1762 set_bit(R5_LOCKED
, &dev
->flags
);
1763 clear_bit(R5_UPTODATE
, &dev
->flags
);
1769 /* keep the parity disk locked while asynchronous operations
1772 set_bit(R5_LOCKED
, &sh
->dev
[pd_idx
].flags
);
1773 clear_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
);
1776 pr_debug("%s: stripe %llu locked: %d pending: %lx\n",
1777 __func__
, (unsigned long long)sh
->sector
,
1778 locked
, sh
->ops
.pending
);
1784 * Each stripe/dev can have one or more bion attached.
1785 * toread/towrite point to the first in a chain.
1786 * The bi_next chain must be in order.
1788 static int add_stripe_bio(struct stripe_head
*sh
, struct bio
*bi
, int dd_idx
, int forwrite
)
1791 raid5_conf_t
*conf
= sh
->raid_conf
;
1794 pr_debug("adding bh b#%llu to stripe s#%llu\n",
1795 (unsigned long long)bi
->bi_sector
,
1796 (unsigned long long)sh
->sector
);
1799 spin_lock(&sh
->lock
);
1800 spin_lock_irq(&conf
->device_lock
);
1802 bip
= &sh
->dev
[dd_idx
].towrite
;
1803 if (*bip
== NULL
&& sh
->dev
[dd_idx
].written
== NULL
)
1806 bip
= &sh
->dev
[dd_idx
].toread
;
1807 while (*bip
&& (*bip
)->bi_sector
< bi
->bi_sector
) {
1808 if ((*bip
)->bi_sector
+ ((*bip
)->bi_size
>> 9) > bi
->bi_sector
)
1810 bip
= & (*bip
)->bi_next
;
1812 if (*bip
&& (*bip
)->bi_sector
< bi
->bi_sector
+ ((bi
->bi_size
)>>9))
1815 BUG_ON(*bip
&& bi
->bi_next
&& (*bip
) != bi
->bi_next
);
1819 bi
->bi_phys_segments
++;
1820 spin_unlock_irq(&conf
->device_lock
);
1821 spin_unlock(&sh
->lock
);
1823 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
1824 (unsigned long long)bi
->bi_sector
,
1825 (unsigned long long)sh
->sector
, dd_idx
);
1827 if (conf
->mddev
->bitmap
&& firstwrite
) {
1828 bitmap_startwrite(conf
->mddev
->bitmap
, sh
->sector
,
1830 sh
->bm_seq
= conf
->seq_flush
+1;
1831 set_bit(STRIPE_BIT_DELAY
, &sh
->state
);
1835 /* check if page is covered */
1836 sector_t sector
= sh
->dev
[dd_idx
].sector
;
1837 for (bi
=sh
->dev
[dd_idx
].towrite
;
1838 sector
< sh
->dev
[dd_idx
].sector
+ STRIPE_SECTORS
&&
1839 bi
&& bi
->bi_sector
<= sector
;
1840 bi
= r5_next_bio(bi
, sh
->dev
[dd_idx
].sector
)) {
1841 if (bi
->bi_sector
+ (bi
->bi_size
>>9) >= sector
)
1842 sector
= bi
->bi_sector
+ (bi
->bi_size
>>9);
1844 if (sector
>= sh
->dev
[dd_idx
].sector
+ STRIPE_SECTORS
)
1845 set_bit(R5_OVERWRITE
, &sh
->dev
[dd_idx
].flags
);
1850 set_bit(R5_Overlap
, &sh
->dev
[dd_idx
].flags
);
1851 spin_unlock_irq(&conf
->device_lock
);
1852 spin_unlock(&sh
->lock
);
1856 static void end_reshape(raid5_conf_t
*conf
);
1858 static int page_is_zero(struct page
*p
)
1860 char *a
= page_address(p
);
1861 return ((*(u32
*)a
) == 0 &&
1862 memcmp(a
, a
+4, STRIPE_SIZE
-4)==0);
1865 static int stripe_to_pdidx(sector_t stripe
, raid5_conf_t
*conf
, int disks
)
1867 int sectors_per_chunk
= conf
->chunk_size
>> 9;
1869 int chunk_offset
= sector_div(stripe
, sectors_per_chunk
);
1871 raid5_compute_sector(stripe
* (disks
- conf
->max_degraded
)
1872 *sectors_per_chunk
+ chunk_offset
,
1873 disks
, disks
- conf
->max_degraded
,
1874 &dd_idx
, &pd_idx
, conf
);
1879 handle_requests_to_failed_array(raid5_conf_t
*conf
, struct stripe_head
*sh
,
1880 struct stripe_head_state
*s
, int disks
,
1881 struct bio
**return_bi
)
1884 for (i
= disks
; i
--; ) {
1888 if (test_bit(R5_ReadError
, &sh
->dev
[i
].flags
)) {
1891 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
1892 if (rdev
&& test_bit(In_sync
, &rdev
->flags
))
1893 /* multiple read failures in one stripe */
1894 md_error(conf
->mddev
, rdev
);
1897 spin_lock_irq(&conf
->device_lock
);
1898 /* fail all writes first */
1899 bi
= sh
->dev
[i
].towrite
;
1900 sh
->dev
[i
].towrite
= NULL
;
1906 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[i
].flags
))
1907 wake_up(&conf
->wait_for_overlap
);
1909 while (bi
&& bi
->bi_sector
<
1910 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
1911 struct bio
*nextbi
= r5_next_bio(bi
, sh
->dev
[i
].sector
);
1912 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
1913 if (--bi
->bi_phys_segments
== 0) {
1914 md_write_end(conf
->mddev
);
1915 bi
->bi_next
= *return_bi
;
1920 /* and fail all 'written' */
1921 bi
= sh
->dev
[i
].written
;
1922 sh
->dev
[i
].written
= NULL
;
1923 if (bi
) bitmap_end
= 1;
1924 while (bi
&& bi
->bi_sector
<
1925 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
1926 struct bio
*bi2
= r5_next_bio(bi
, sh
->dev
[i
].sector
);
1927 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
1928 if (--bi
->bi_phys_segments
== 0) {
1929 md_write_end(conf
->mddev
);
1930 bi
->bi_next
= *return_bi
;
1936 /* fail any reads if this device is non-operational and
1937 * the data has not reached the cache yet.
1939 if (!test_bit(R5_Wantfill
, &sh
->dev
[i
].flags
) &&
1940 (!test_bit(R5_Insync
, &sh
->dev
[i
].flags
) ||
1941 test_bit(R5_ReadError
, &sh
->dev
[i
].flags
))) {
1942 bi
= sh
->dev
[i
].toread
;
1943 sh
->dev
[i
].toread
= NULL
;
1944 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[i
].flags
))
1945 wake_up(&conf
->wait_for_overlap
);
1946 if (bi
) s
->to_read
--;
1947 while (bi
&& bi
->bi_sector
<
1948 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
1949 struct bio
*nextbi
=
1950 r5_next_bio(bi
, sh
->dev
[i
].sector
);
1951 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
1952 if (--bi
->bi_phys_segments
== 0) {
1953 bi
->bi_next
= *return_bi
;
1959 spin_unlock_irq(&conf
->device_lock
);
1961 bitmap_endwrite(conf
->mddev
->bitmap
, sh
->sector
,
1962 STRIPE_SECTORS
, 0, 0);
1965 if (test_and_clear_bit(STRIPE_FULL_WRITE
, &sh
->state
))
1966 if (atomic_dec_and_test(&conf
->pending_full_writes
))
1967 md_wakeup_thread(conf
->mddev
->thread
);
1970 /* __handle_issuing_new_read_requests5 - returns 0 if there are no more disks
1973 static int __handle_issuing_new_read_requests5(struct stripe_head
*sh
,
1974 struct stripe_head_state
*s
, int disk_idx
, int disks
)
1976 struct r5dev
*dev
= &sh
->dev
[disk_idx
];
1977 struct r5dev
*failed_dev
= &sh
->dev
[s
->failed_num
];
1979 /* don't schedule compute operations or reads on the parity block while
1980 * a check is in flight
1982 if ((disk_idx
== sh
->pd_idx
) &&
1983 test_bit(STRIPE_OP_CHECK
, &sh
->ops
.pending
))
1986 /* is the data in this block needed, and can we get it? */
1987 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
1988 !test_bit(R5_UPTODATE
, &dev
->flags
) && (dev
->toread
||
1989 (dev
->towrite
&& !test_bit(R5_OVERWRITE
, &dev
->flags
)) ||
1990 s
->syncing
|| s
->expanding
|| (s
->failed
&&
1991 (failed_dev
->toread
|| (failed_dev
->towrite
&&
1992 !test_bit(R5_OVERWRITE
, &failed_dev
->flags
)
1994 /* 1/ We would like to get this block, possibly by computing it,
1995 * but we might not be able to.
1997 * 2/ Since parity check operations potentially make the parity
1998 * block !uptodate it will need to be refreshed before any
1999 * compute operations on data disks are scheduled.
2001 * 3/ We hold off parity block re-reads until check operations
2004 if ((s
->uptodate
== disks
- 1) &&
2005 !test_bit(STRIPE_OP_CHECK
, &sh
->ops
.pending
)) {
2006 set_bit(STRIPE_OP_COMPUTE_BLK
, &sh
->ops
.pending
);
2007 set_bit(R5_Wantcompute
, &dev
->flags
);
2008 sh
->ops
.target
= disk_idx
;
2011 /* Careful: from this point on 'uptodate' is in the eye
2012 * of raid5_run_ops which services 'compute' operations
2013 * before writes. R5_Wantcompute flags a block that will
2014 * be R5_UPTODATE by the time it is needed for a
2015 * subsequent operation.
2018 return 0; /* uptodate + compute == disks */
2019 } else if ((s
->uptodate
< disks
- 1) &&
2020 test_bit(R5_Insync
, &dev
->flags
)) {
2021 /* Note: we hold off compute operations while checks are
2022 * in flight, but we still prefer 'compute' over 'read'
2023 * hence we only read if (uptodate < * disks-1)
2025 set_bit(R5_LOCKED
, &dev
->flags
);
2026 set_bit(R5_Wantread
, &dev
->flags
);
2027 if (!test_and_set_bit(STRIPE_OP_IO
, &sh
->ops
.pending
))
2030 pr_debug("Reading block %d (sync=%d)\n", disk_idx
,
2038 static void handle_issuing_new_read_requests5(struct stripe_head
*sh
,
2039 struct stripe_head_state
*s
, int disks
)
2043 /* Clear completed compute operations. Parity recovery
2044 * (STRIPE_OP_MOD_REPAIR_PD) implies a write-back which is handled
2045 * later on in this routine
2047 if (test_bit(STRIPE_OP_COMPUTE_BLK
, &sh
->ops
.complete
) &&
2048 !test_bit(STRIPE_OP_MOD_REPAIR_PD
, &sh
->ops
.pending
)) {
2049 clear_bit(STRIPE_OP_COMPUTE_BLK
, &sh
->ops
.complete
);
2050 clear_bit(STRIPE_OP_COMPUTE_BLK
, &sh
->ops
.ack
);
2051 clear_bit(STRIPE_OP_COMPUTE_BLK
, &sh
->ops
.pending
);
2054 /* look for blocks to read/compute, skip this if a compute
2055 * is already in flight, or if the stripe contents are in the
2056 * midst of changing due to a write
2058 if (!test_bit(STRIPE_OP_COMPUTE_BLK
, &sh
->ops
.pending
) &&
2059 !test_bit(STRIPE_OP_PREXOR
, &sh
->ops
.pending
) &&
2060 !test_bit(STRIPE_OP_POSTXOR
, &sh
->ops
.pending
)) {
2061 for (i
= disks
; i
--; )
2062 if (__handle_issuing_new_read_requests5(
2063 sh
, s
, i
, disks
) == 0)
2066 set_bit(STRIPE_HANDLE
, &sh
->state
);
2069 static void handle_issuing_new_read_requests6(struct stripe_head
*sh
,
2070 struct stripe_head_state
*s
, struct r6_state
*r6s
,
2074 for (i
= disks
; i
--; ) {
2075 struct r5dev
*dev
= &sh
->dev
[i
];
2076 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
2077 !test_bit(R5_UPTODATE
, &dev
->flags
) &&
2078 (dev
->toread
|| (dev
->towrite
&&
2079 !test_bit(R5_OVERWRITE
, &dev
->flags
)) ||
2080 s
->syncing
|| s
->expanding
||
2082 (sh
->dev
[r6s
->failed_num
[0]].toread
||
2085 (sh
->dev
[r6s
->failed_num
[1]].toread
||
2087 /* we would like to get this block, possibly
2088 * by computing it, but we might not be able to
2090 if (s
->uptodate
== disks
-1) {
2091 pr_debug("Computing stripe %llu block %d\n",
2092 (unsigned long long)sh
->sector
, i
);
2093 compute_block_1(sh
, i
, 0);
2095 } else if ( s
->uptodate
== disks
-2 && s
->failed
>= 2 ) {
2096 /* Computing 2-failure is *very* expensive; only
2097 * do it if failed >= 2
2100 for (other
= disks
; other
--; ) {
2103 if (!test_bit(R5_UPTODATE
,
2104 &sh
->dev
[other
].flags
))
2108 pr_debug("Computing stripe %llu blocks %d,%d\n",
2109 (unsigned long long)sh
->sector
,
2111 compute_block_2(sh
, i
, other
);
2113 } else if (test_bit(R5_Insync
, &dev
->flags
)) {
2114 set_bit(R5_LOCKED
, &dev
->flags
);
2115 set_bit(R5_Wantread
, &dev
->flags
);
2117 pr_debug("Reading block %d (sync=%d)\n",
2122 set_bit(STRIPE_HANDLE
, &sh
->state
);
2126 /* handle_completed_write_requests
2127 * any written block on an uptodate or failed drive can be returned.
2128 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
2129 * never LOCKED, so we don't need to test 'failed' directly.
2131 static void handle_completed_write_requests(raid5_conf_t
*conf
,
2132 struct stripe_head
*sh
, int disks
, struct bio
**return_bi
)
2137 for (i
= disks
; i
--; )
2138 if (sh
->dev
[i
].written
) {
2140 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
2141 test_bit(R5_UPTODATE
, &dev
->flags
)) {
2142 /* We can return any write requests */
2143 struct bio
*wbi
, *wbi2
;
2145 pr_debug("Return write for disc %d\n", i
);
2146 spin_lock_irq(&conf
->device_lock
);
2148 dev
->written
= NULL
;
2149 while (wbi
&& wbi
->bi_sector
<
2150 dev
->sector
+ STRIPE_SECTORS
) {
2151 wbi2
= r5_next_bio(wbi
, dev
->sector
);
2152 if (--wbi
->bi_phys_segments
== 0) {
2153 md_write_end(conf
->mddev
);
2154 wbi
->bi_next
= *return_bi
;
2159 if (dev
->towrite
== NULL
)
2161 spin_unlock_irq(&conf
->device_lock
);
2163 bitmap_endwrite(conf
->mddev
->bitmap
,
2166 !test_bit(STRIPE_DEGRADED
, &sh
->state
),
2171 if (test_and_clear_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2172 if (atomic_dec_and_test(&conf
->pending_full_writes
))
2173 md_wakeup_thread(conf
->mddev
->thread
);
2176 static void handle_issuing_new_write_requests5(raid5_conf_t
*conf
,
2177 struct stripe_head
*sh
, struct stripe_head_state
*s
, int disks
)
2179 int rmw
= 0, rcw
= 0, i
;
2180 for (i
= disks
; i
--; ) {
2181 /* would I have to read this buffer for read_modify_write */
2182 struct r5dev
*dev
= &sh
->dev
[i
];
2183 if ((dev
->towrite
|| i
== sh
->pd_idx
) &&
2184 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2185 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2186 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2187 if (test_bit(R5_Insync
, &dev
->flags
))
2190 rmw
+= 2*disks
; /* cannot read it */
2192 /* Would I have to read this buffer for reconstruct_write */
2193 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) && i
!= sh
->pd_idx
&&
2194 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2195 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2196 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2197 if (test_bit(R5_Insync
, &dev
->flags
)) rcw
++;
2202 pr_debug("for sector %llu, rmw=%d rcw=%d\n",
2203 (unsigned long long)sh
->sector
, rmw
, rcw
);
2204 set_bit(STRIPE_HANDLE
, &sh
->state
);
2205 if (rmw
< rcw
&& rmw
> 0)
2206 /* prefer read-modify-write, but need to get some data */
2207 for (i
= disks
; i
--; ) {
2208 struct r5dev
*dev
= &sh
->dev
[i
];
2209 if ((dev
->towrite
|| i
== sh
->pd_idx
) &&
2210 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2211 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2212 test_bit(R5_Wantcompute
, &dev
->flags
)) &&
2213 test_bit(R5_Insync
, &dev
->flags
)) {
2215 test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2216 pr_debug("Read_old block "
2217 "%d for r-m-w\n", i
);
2218 set_bit(R5_LOCKED
, &dev
->flags
);
2219 set_bit(R5_Wantread
, &dev
->flags
);
2220 if (!test_and_set_bit(
2221 STRIPE_OP_IO
, &sh
->ops
.pending
))
2225 set_bit(STRIPE_DELAYED
, &sh
->state
);
2226 set_bit(STRIPE_HANDLE
, &sh
->state
);
2230 if (rcw
<= rmw
&& rcw
> 0)
2231 /* want reconstruct write, but need to get some data */
2232 for (i
= disks
; i
--; ) {
2233 struct r5dev
*dev
= &sh
->dev
[i
];
2234 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) &&
2236 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2237 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2238 test_bit(R5_Wantcompute
, &dev
->flags
)) &&
2239 test_bit(R5_Insync
, &dev
->flags
)) {
2241 test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2242 pr_debug("Read_old block "
2243 "%d for Reconstruct\n", i
);
2244 set_bit(R5_LOCKED
, &dev
->flags
);
2245 set_bit(R5_Wantread
, &dev
->flags
);
2246 if (!test_and_set_bit(
2247 STRIPE_OP_IO
, &sh
->ops
.pending
))
2251 set_bit(STRIPE_DELAYED
, &sh
->state
);
2252 set_bit(STRIPE_HANDLE
, &sh
->state
);
2256 /* now if nothing is locked, and if we have enough data,
2257 * we can start a write request
2259 /* since handle_stripe can be called at any time we need to handle the
2260 * case where a compute block operation has been submitted and then a
2261 * subsequent call wants to start a write request. raid5_run_ops only
2262 * handles the case where compute block and postxor are requested
2263 * simultaneously. If this is not the case then new writes need to be
2264 * held off until the compute completes.
2266 if ((s
->req_compute
||
2267 !test_bit(STRIPE_OP_COMPUTE_BLK
, &sh
->ops
.pending
)) &&
2268 (s
->locked
== 0 && (rcw
== 0 || rmw
== 0) &&
2269 !test_bit(STRIPE_BIT_DELAY
, &sh
->state
)))
2270 s
->locked
+= handle_write_operations5(sh
, rcw
== 0, 0);
2273 static void handle_issuing_new_write_requests6(raid5_conf_t
*conf
,
2274 struct stripe_head
*sh
, struct stripe_head_state
*s
,
2275 struct r6_state
*r6s
, int disks
)
2277 int rcw
= 0, must_compute
= 0, pd_idx
= sh
->pd_idx
, i
;
2278 int qd_idx
= r6s
->qd_idx
;
2279 for (i
= disks
; i
--; ) {
2280 struct r5dev
*dev
= &sh
->dev
[i
];
2281 /* Would I have to read this buffer for reconstruct_write */
2282 if (!test_bit(R5_OVERWRITE
, &dev
->flags
)
2283 && i
!= pd_idx
&& i
!= qd_idx
2284 && (!test_bit(R5_LOCKED
, &dev
->flags
)
2286 !test_bit(R5_UPTODATE
, &dev
->flags
)) {
2287 if (test_bit(R5_Insync
, &dev
->flags
)) rcw
++;
2289 pr_debug("raid6: must_compute: "
2290 "disk %d flags=%#lx\n", i
, dev
->flags
);
2295 pr_debug("for sector %llu, rcw=%d, must_compute=%d\n",
2296 (unsigned long long)sh
->sector
, rcw
, must_compute
);
2297 set_bit(STRIPE_HANDLE
, &sh
->state
);
2300 /* want reconstruct write, but need to get some data */
2301 for (i
= disks
; i
--; ) {
2302 struct r5dev
*dev
= &sh
->dev
[i
];
2303 if (!test_bit(R5_OVERWRITE
, &dev
->flags
)
2304 && !(s
->failed
== 0 && (i
== pd_idx
|| i
== qd_idx
))
2305 && !test_bit(R5_LOCKED
, &dev
->flags
) &&
2306 !test_bit(R5_UPTODATE
, &dev
->flags
) &&
2307 test_bit(R5_Insync
, &dev
->flags
)) {
2309 test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2310 pr_debug("Read_old stripe %llu "
2311 "block %d for Reconstruct\n",
2312 (unsigned long long)sh
->sector
, i
);
2313 set_bit(R5_LOCKED
, &dev
->flags
);
2314 set_bit(R5_Wantread
, &dev
->flags
);
2317 pr_debug("Request delayed stripe %llu "
2318 "block %d for Reconstruct\n",
2319 (unsigned long long)sh
->sector
, i
);
2320 set_bit(STRIPE_DELAYED
, &sh
->state
);
2321 set_bit(STRIPE_HANDLE
, &sh
->state
);
2325 /* now if nothing is locked, and if we have enough data, we can start a
2328 if (s
->locked
== 0 && rcw
== 0 &&
2329 !test_bit(STRIPE_BIT_DELAY
, &sh
->state
)) {
2330 if (must_compute
> 0) {
2331 /* We have failed blocks and need to compute them */
2332 switch (s
->failed
) {
2336 compute_block_1(sh
, r6s
->failed_num
[0], 0);
2339 compute_block_2(sh
, r6s
->failed_num
[0],
2340 r6s
->failed_num
[1]);
2342 default: /* This request should have been failed? */
2347 pr_debug("Computing parity for stripe %llu\n",
2348 (unsigned long long)sh
->sector
);
2349 compute_parity6(sh
, RECONSTRUCT_WRITE
);
2350 /* now every locked buffer is ready to be written */
2351 for (i
= disks
; i
--; )
2352 if (test_bit(R5_LOCKED
, &sh
->dev
[i
].flags
)) {
2353 pr_debug("Writing stripe %llu block %d\n",
2354 (unsigned long long)sh
->sector
, i
);
2356 set_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
);
2358 if (s
->locked
== disks
)
2359 if (!test_and_set_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2360 atomic_inc(&conf
->pending_full_writes
);
2361 /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */
2362 set_bit(STRIPE_INSYNC
, &sh
->state
);
2364 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2365 atomic_dec(&conf
->preread_active_stripes
);
2366 if (atomic_read(&conf
->preread_active_stripes
) <
2368 md_wakeup_thread(conf
->mddev
->thread
);
2373 static void handle_parity_checks5(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2374 struct stripe_head_state
*s
, int disks
)
2376 int canceled_check
= 0;
2378 set_bit(STRIPE_HANDLE
, &sh
->state
);
2380 /* complete a check operation */
2381 if (test_and_clear_bit(STRIPE_OP_CHECK
, &sh
->ops
.complete
)) {
2382 clear_bit(STRIPE_OP_CHECK
, &sh
->ops
.ack
);
2383 clear_bit(STRIPE_OP_CHECK
, &sh
->ops
.pending
);
2384 if (s
->failed
== 0) {
2385 if (sh
->ops
.zero_sum_result
== 0)
2386 /* parity is correct (on disc,
2387 * not in buffer any more)
2389 set_bit(STRIPE_INSYNC
, &sh
->state
);
2391 conf
->mddev
->resync_mismatches
+=
2394 MD_RECOVERY_CHECK
, &conf
->mddev
->recovery
))
2395 /* don't try to repair!! */
2396 set_bit(STRIPE_INSYNC
, &sh
->state
);
2398 set_bit(STRIPE_OP_COMPUTE_BLK
,
2400 set_bit(STRIPE_OP_MOD_REPAIR_PD
,
2402 set_bit(R5_Wantcompute
,
2403 &sh
->dev
[sh
->pd_idx
].flags
);
2404 sh
->ops
.target
= sh
->pd_idx
;
2410 canceled_check
= 1; /* STRIPE_INSYNC is not set */
2413 /* start a new check operation if there are no failures, the stripe is
2414 * not insync, and a repair is not in flight
2416 if (s
->failed
== 0 &&
2417 !test_bit(STRIPE_INSYNC
, &sh
->state
) &&
2418 !test_bit(STRIPE_OP_MOD_REPAIR_PD
, &sh
->ops
.pending
)) {
2419 if (!test_and_set_bit(STRIPE_OP_CHECK
, &sh
->ops
.pending
)) {
2420 BUG_ON(s
->uptodate
!= disks
);
2421 clear_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
);
2427 /* check if we can clear a parity disk reconstruct */
2428 if (test_bit(STRIPE_OP_COMPUTE_BLK
, &sh
->ops
.complete
) &&
2429 test_bit(STRIPE_OP_MOD_REPAIR_PD
, &sh
->ops
.pending
)) {
2431 clear_bit(STRIPE_OP_MOD_REPAIR_PD
, &sh
->ops
.pending
);
2432 clear_bit(STRIPE_OP_COMPUTE_BLK
, &sh
->ops
.complete
);
2433 clear_bit(STRIPE_OP_COMPUTE_BLK
, &sh
->ops
.ack
);
2434 clear_bit(STRIPE_OP_COMPUTE_BLK
, &sh
->ops
.pending
);
2438 /* Wait for check parity and compute block operations to complete
2439 * before write-back. If a failure occurred while the check operation
2440 * was in flight we need to cycle this stripe through handle_stripe
2441 * since the parity block may not be uptodate
2443 if (!canceled_check
&& !test_bit(STRIPE_INSYNC
, &sh
->state
) &&
2444 !test_bit(STRIPE_OP_CHECK
, &sh
->ops
.pending
) &&
2445 !test_bit(STRIPE_OP_COMPUTE_BLK
, &sh
->ops
.pending
)) {
2447 /* either failed parity check, or recovery is happening */
2449 s
->failed_num
= sh
->pd_idx
;
2450 dev
= &sh
->dev
[s
->failed_num
];
2451 BUG_ON(!test_bit(R5_UPTODATE
, &dev
->flags
));
2452 BUG_ON(s
->uptodate
!= disks
);
2454 set_bit(R5_LOCKED
, &dev
->flags
);
2455 set_bit(R5_Wantwrite
, &dev
->flags
);
2456 if (!test_and_set_bit(STRIPE_OP_IO
, &sh
->ops
.pending
))
2459 clear_bit(STRIPE_DEGRADED
, &sh
->state
);
2461 set_bit(STRIPE_INSYNC
, &sh
->state
);
2466 static void handle_parity_checks6(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2467 struct stripe_head_state
*s
,
2468 struct r6_state
*r6s
, struct page
*tmp_page
,
2471 int update_p
= 0, update_q
= 0;
2473 int pd_idx
= sh
->pd_idx
;
2474 int qd_idx
= r6s
->qd_idx
;
2476 set_bit(STRIPE_HANDLE
, &sh
->state
);
2478 BUG_ON(s
->failed
> 2);
2479 BUG_ON(s
->uptodate
< disks
);
2480 /* Want to check and possibly repair P and Q.
2481 * However there could be one 'failed' device, in which
2482 * case we can only check one of them, possibly using the
2483 * other to generate missing data
2486 /* If !tmp_page, we cannot do the calculations,
2487 * but as we have set STRIPE_HANDLE, we will soon be called
2488 * by stripe_handle with a tmp_page - just wait until then.
2491 if (s
->failed
== r6s
->q_failed
) {
2492 /* The only possible failed device holds 'Q', so it
2493 * makes sense to check P (If anything else were failed,
2494 * we would have used P to recreate it).
2496 compute_block_1(sh
, pd_idx
, 1);
2497 if (!page_is_zero(sh
->dev
[pd_idx
].page
)) {
2498 compute_block_1(sh
, pd_idx
, 0);
2502 if (!r6s
->q_failed
&& s
->failed
< 2) {
2503 /* q is not failed, and we didn't use it to generate
2504 * anything, so it makes sense to check it
2506 memcpy(page_address(tmp_page
),
2507 page_address(sh
->dev
[qd_idx
].page
),
2509 compute_parity6(sh
, UPDATE_PARITY
);
2510 if (memcmp(page_address(tmp_page
),
2511 page_address(sh
->dev
[qd_idx
].page
),
2512 STRIPE_SIZE
) != 0) {
2513 clear_bit(STRIPE_INSYNC
, &sh
->state
);
2517 if (update_p
|| update_q
) {
2518 conf
->mddev
->resync_mismatches
+= STRIPE_SECTORS
;
2519 if (test_bit(MD_RECOVERY_CHECK
, &conf
->mddev
->recovery
))
2520 /* don't try to repair!! */
2521 update_p
= update_q
= 0;
2524 /* now write out any block on a failed drive,
2525 * or P or Q if they need it
2528 if (s
->failed
== 2) {
2529 dev
= &sh
->dev
[r6s
->failed_num
[1]];
2531 set_bit(R5_LOCKED
, &dev
->flags
);
2532 set_bit(R5_Wantwrite
, &dev
->flags
);
2534 if (s
->failed
>= 1) {
2535 dev
= &sh
->dev
[r6s
->failed_num
[0]];
2537 set_bit(R5_LOCKED
, &dev
->flags
);
2538 set_bit(R5_Wantwrite
, &dev
->flags
);
2542 dev
= &sh
->dev
[pd_idx
];
2544 set_bit(R5_LOCKED
, &dev
->flags
);
2545 set_bit(R5_Wantwrite
, &dev
->flags
);
2548 dev
= &sh
->dev
[qd_idx
];
2550 set_bit(R5_LOCKED
, &dev
->flags
);
2551 set_bit(R5_Wantwrite
, &dev
->flags
);
2553 clear_bit(STRIPE_DEGRADED
, &sh
->state
);
2555 set_bit(STRIPE_INSYNC
, &sh
->state
);
2559 static void handle_stripe_expansion(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2560 struct r6_state
*r6s
)
2564 /* We have read all the blocks in this stripe and now we need to
2565 * copy some of them into a target stripe for expand.
2567 struct dma_async_tx_descriptor
*tx
= NULL
;
2568 clear_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
2569 for (i
= 0; i
< sh
->disks
; i
++)
2570 if (i
!= sh
->pd_idx
&& (!r6s
|| i
!= r6s
->qd_idx
)) {
2571 int dd_idx
, pd_idx
, j
;
2572 struct stripe_head
*sh2
;
2574 sector_t bn
= compute_blocknr(sh
, i
);
2575 sector_t s
= raid5_compute_sector(bn
, conf
->raid_disks
,
2577 conf
->max_degraded
, &dd_idx
,
2579 sh2
= get_active_stripe(conf
, s
, conf
->raid_disks
,
2582 /* so far only the early blocks of this stripe
2583 * have been requested. When later blocks
2584 * get requested, we will try again
2587 if (!test_bit(STRIPE_EXPANDING
, &sh2
->state
) ||
2588 test_bit(R5_Expanded
, &sh2
->dev
[dd_idx
].flags
)) {
2589 /* must have already done this block */
2590 release_stripe(sh2
);
2594 /* place all the copies on one channel */
2595 tx
= async_memcpy(sh2
->dev
[dd_idx
].page
,
2596 sh
->dev
[i
].page
, 0, 0, STRIPE_SIZE
,
2597 ASYNC_TX_DEP_ACK
, tx
, NULL
, NULL
);
2599 set_bit(R5_Expanded
, &sh2
->dev
[dd_idx
].flags
);
2600 set_bit(R5_UPTODATE
, &sh2
->dev
[dd_idx
].flags
);
2601 for (j
= 0; j
< conf
->raid_disks
; j
++)
2602 if (j
!= sh2
->pd_idx
&&
2603 (!r6s
|| j
!= raid6_next_disk(sh2
->pd_idx
,
2605 !test_bit(R5_Expanded
, &sh2
->dev
[j
].flags
))
2607 if (j
== conf
->raid_disks
) {
2608 set_bit(STRIPE_EXPAND_READY
, &sh2
->state
);
2609 set_bit(STRIPE_HANDLE
, &sh2
->state
);
2611 release_stripe(sh2
);
2614 /* done submitting copies, wait for them to complete */
2617 dma_wait_for_async_tx(tx
);
2623 * handle_stripe - do things to a stripe.
2625 * We lock the stripe and then examine the state of various bits
2626 * to see what needs to be done.
2628 * return some read request which now have data
2629 * return some write requests which are safely on disc
2630 * schedule a read on some buffers
2631 * schedule a write of some buffers
2632 * return confirmation of parity correctness
2634 * buffers are taken off read_list or write_list, and bh_cache buffers
2635 * get BH_Lock set before the stripe lock is released.
2639 static void handle_stripe5(struct stripe_head
*sh
)
2641 raid5_conf_t
*conf
= sh
->raid_conf
;
2642 int disks
= sh
->disks
, i
;
2643 struct bio
*return_bi
= NULL
;
2644 struct stripe_head_state s
;
2646 unsigned long pending
= 0;
2647 mdk_rdev_t
*blocked_rdev
= NULL
;
2649 memset(&s
, 0, sizeof(s
));
2650 pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d "
2651 "ops=%lx:%lx:%lx\n", (unsigned long long)sh
->sector
, sh
->state
,
2652 atomic_read(&sh
->count
), sh
->pd_idx
,
2653 sh
->ops
.pending
, sh
->ops
.ack
, sh
->ops
.complete
);
2655 spin_lock(&sh
->lock
);
2656 clear_bit(STRIPE_HANDLE
, &sh
->state
);
2657 clear_bit(STRIPE_DELAYED
, &sh
->state
);
2659 s
.syncing
= test_bit(STRIPE_SYNCING
, &sh
->state
);
2660 s
.expanding
= test_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
2661 s
.expanded
= test_bit(STRIPE_EXPAND_READY
, &sh
->state
);
2662 /* Now to look around and see what can be done */
2664 /* clean-up completed biofill operations */
2665 if (test_bit(STRIPE_OP_BIOFILL
, &sh
->ops
.complete
)) {
2666 clear_bit(STRIPE_OP_BIOFILL
, &sh
->ops
.pending
);
2667 clear_bit(STRIPE_OP_BIOFILL
, &sh
->ops
.ack
);
2668 clear_bit(STRIPE_OP_BIOFILL
, &sh
->ops
.complete
);
2672 for (i
=disks
; i
--; ) {
2674 struct r5dev
*dev
= &sh
->dev
[i
];
2675 clear_bit(R5_Insync
, &dev
->flags
);
2677 pr_debug("check %d: state 0x%lx toread %p read %p write %p "
2678 "written %p\n", i
, dev
->flags
, dev
->toread
, dev
->read
,
2679 dev
->towrite
, dev
->written
);
2681 /* maybe we can request a biofill operation
2683 * new wantfill requests are only permitted while
2684 * STRIPE_OP_BIOFILL is clear
2686 if (test_bit(R5_UPTODATE
, &dev
->flags
) && dev
->toread
&&
2687 !test_bit(STRIPE_OP_BIOFILL
, &sh
->ops
.pending
))
2688 set_bit(R5_Wantfill
, &dev
->flags
);
2690 /* now count some things */
2691 if (test_bit(R5_LOCKED
, &dev
->flags
)) s
.locked
++;
2692 if (test_bit(R5_UPTODATE
, &dev
->flags
)) s
.uptodate
++;
2693 if (test_bit(R5_Wantcompute
, &dev
->flags
)) s
.compute
++;
2695 if (test_bit(R5_Wantfill
, &dev
->flags
))
2697 else if (dev
->toread
)
2701 if (!test_bit(R5_OVERWRITE
, &dev
->flags
))
2706 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
2707 if (rdev
&& unlikely(test_bit(Blocked
, &rdev
->flags
))) {
2708 blocked_rdev
= rdev
;
2709 atomic_inc(&rdev
->nr_pending
);
2712 if (!rdev
|| !test_bit(In_sync
, &rdev
->flags
)) {
2713 /* The ReadError flag will just be confusing now */
2714 clear_bit(R5_ReadError
, &dev
->flags
);
2715 clear_bit(R5_ReWrite
, &dev
->flags
);
2717 if (!rdev
|| !test_bit(In_sync
, &rdev
->flags
)
2718 || test_bit(R5_ReadError
, &dev
->flags
)) {
2722 set_bit(R5_Insync
, &dev
->flags
);
2726 if (unlikely(blocked_rdev
)) {
2727 set_bit(STRIPE_HANDLE
, &sh
->state
);
2731 if (s
.to_fill
&& !test_and_set_bit(STRIPE_OP_BIOFILL
, &sh
->ops
.pending
))
2734 pr_debug("locked=%d uptodate=%d to_read=%d"
2735 " to_write=%d failed=%d failed_num=%d\n",
2736 s
.locked
, s
.uptodate
, s
.to_read
, s
.to_write
,
2737 s
.failed
, s
.failed_num
);
2738 /* check if the array has lost two devices and, if so, some requests might
2741 if (s
.failed
> 1 && s
.to_read
+s
.to_write
+s
.written
)
2742 handle_requests_to_failed_array(conf
, sh
, &s
, disks
,
2744 if (s
.failed
> 1 && s
.syncing
) {
2745 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,0);
2746 clear_bit(STRIPE_SYNCING
, &sh
->state
);
2750 /* might be able to return some write requests if the parity block
2751 * is safe, or on a failed drive
2753 dev
= &sh
->dev
[sh
->pd_idx
];
2755 ((test_bit(R5_Insync
, &dev
->flags
) &&
2756 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2757 test_bit(R5_UPTODATE
, &dev
->flags
)) ||
2758 (s
.failed
== 1 && s
.failed_num
== sh
->pd_idx
)))
2759 handle_completed_write_requests(conf
, sh
, disks
, &return_bi
);
2761 /* Now we might consider reading some blocks, either to check/generate
2762 * parity, or to satisfy requests
2763 * or to load a block that is being partially written.
2765 if (s
.to_read
|| s
.non_overwrite
||
2766 (s
.syncing
&& (s
.uptodate
+ s
.compute
< disks
)) || s
.expanding
||
2767 test_bit(STRIPE_OP_COMPUTE_BLK
, &sh
->ops
.pending
))
2768 handle_issuing_new_read_requests5(sh
, &s
, disks
);
2770 /* Now we check to see if any write operations have recently
2774 /* leave prexor set until postxor is done, allows us to distinguish
2775 * a rmw from a rcw during biodrain
2777 if (test_bit(STRIPE_OP_PREXOR
, &sh
->ops
.complete
) &&
2778 test_bit(STRIPE_OP_POSTXOR
, &sh
->ops
.complete
)) {
2780 clear_bit(STRIPE_OP_PREXOR
, &sh
->ops
.complete
);
2781 clear_bit(STRIPE_OP_PREXOR
, &sh
->ops
.ack
);
2782 clear_bit(STRIPE_OP_PREXOR
, &sh
->ops
.pending
);
2784 for (i
= disks
; i
--; )
2785 clear_bit(R5_Wantprexor
, &sh
->dev
[i
].flags
);
2788 /* if only POSTXOR is set then this is an 'expand' postxor */
2789 if (test_bit(STRIPE_OP_BIODRAIN
, &sh
->ops
.complete
) &&
2790 test_bit(STRIPE_OP_POSTXOR
, &sh
->ops
.complete
)) {
2792 clear_bit(STRIPE_OP_BIODRAIN
, &sh
->ops
.complete
);
2793 clear_bit(STRIPE_OP_BIODRAIN
, &sh
->ops
.ack
);
2794 clear_bit(STRIPE_OP_BIODRAIN
, &sh
->ops
.pending
);
2796 clear_bit(STRIPE_OP_POSTXOR
, &sh
->ops
.complete
);
2797 clear_bit(STRIPE_OP_POSTXOR
, &sh
->ops
.ack
);
2798 clear_bit(STRIPE_OP_POSTXOR
, &sh
->ops
.pending
);
2800 /* All the 'written' buffers and the parity block are ready to
2801 * be written back to disk
2803 BUG_ON(!test_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
));
2804 for (i
= disks
; i
--; ) {
2806 if (test_bit(R5_LOCKED
, &dev
->flags
) &&
2807 (i
== sh
->pd_idx
|| dev
->written
)) {
2808 pr_debug("Writing block %d\n", i
);
2809 set_bit(R5_Wantwrite
, &dev
->flags
);
2810 if (!test_and_set_bit(
2811 STRIPE_OP_IO
, &sh
->ops
.pending
))
2813 if (!test_bit(R5_Insync
, &dev
->flags
) ||
2814 (i
== sh
->pd_idx
&& s
.failed
== 0))
2815 set_bit(STRIPE_INSYNC
, &sh
->state
);
2818 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2819 atomic_dec(&conf
->preread_active_stripes
);
2820 if (atomic_read(&conf
->preread_active_stripes
) <
2822 md_wakeup_thread(conf
->mddev
->thread
);
2826 /* Now to consider new write requests and what else, if anything
2827 * should be read. We do not handle new writes when:
2828 * 1/ A 'write' operation (copy+xor) is already in flight.
2829 * 2/ A 'check' operation is in flight, as it may clobber the parity
2832 if (s
.to_write
&& !test_bit(STRIPE_OP_POSTXOR
, &sh
->ops
.pending
) &&
2833 !test_bit(STRIPE_OP_CHECK
, &sh
->ops
.pending
))
2834 handle_issuing_new_write_requests5(conf
, sh
, &s
, disks
);
2836 /* maybe we need to check and possibly fix the parity for this stripe
2837 * Any reads will already have been scheduled, so we just see if enough
2838 * data is available. The parity check is held off while parity
2839 * dependent operations are in flight.
2841 if ((s
.syncing
&& s
.locked
== 0 &&
2842 !test_bit(STRIPE_OP_COMPUTE_BLK
, &sh
->ops
.pending
) &&
2843 !test_bit(STRIPE_INSYNC
, &sh
->state
)) ||
2844 test_bit(STRIPE_OP_CHECK
, &sh
->ops
.pending
) ||
2845 test_bit(STRIPE_OP_MOD_REPAIR_PD
, &sh
->ops
.pending
))
2846 handle_parity_checks5(conf
, sh
, &s
, disks
);
2848 if (s
.syncing
&& s
.locked
== 0 && test_bit(STRIPE_INSYNC
, &sh
->state
)) {
2849 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,1);
2850 clear_bit(STRIPE_SYNCING
, &sh
->state
);
2853 /* If the failed drive is just a ReadError, then we might need to progress
2854 * the repair/check process
2856 if (s
.failed
== 1 && !conf
->mddev
->ro
&&
2857 test_bit(R5_ReadError
, &sh
->dev
[s
.failed_num
].flags
)
2858 && !test_bit(R5_LOCKED
, &sh
->dev
[s
.failed_num
].flags
)
2859 && test_bit(R5_UPTODATE
, &sh
->dev
[s
.failed_num
].flags
)
2861 dev
= &sh
->dev
[s
.failed_num
];
2862 if (!test_bit(R5_ReWrite
, &dev
->flags
)) {
2863 set_bit(R5_Wantwrite
, &dev
->flags
);
2864 if (!test_and_set_bit(STRIPE_OP_IO
, &sh
->ops
.pending
))
2866 set_bit(R5_ReWrite
, &dev
->flags
);
2867 set_bit(R5_LOCKED
, &dev
->flags
);
2870 /* let's read it back */
2871 set_bit(R5_Wantread
, &dev
->flags
);
2872 if (!test_and_set_bit(STRIPE_OP_IO
, &sh
->ops
.pending
))
2874 set_bit(R5_LOCKED
, &dev
->flags
);
2879 /* Finish postxor operations initiated by the expansion
2882 if (test_bit(STRIPE_OP_POSTXOR
, &sh
->ops
.complete
) &&
2883 !test_bit(STRIPE_OP_BIODRAIN
, &sh
->ops
.pending
)) {
2885 clear_bit(STRIPE_EXPANDING
, &sh
->state
);
2887 clear_bit(STRIPE_OP_POSTXOR
, &sh
->ops
.pending
);
2888 clear_bit(STRIPE_OP_POSTXOR
, &sh
->ops
.ack
);
2889 clear_bit(STRIPE_OP_POSTXOR
, &sh
->ops
.complete
);
2891 for (i
= conf
->raid_disks
; i
--; ) {
2892 set_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
);
2893 if (!test_and_set_bit(STRIPE_OP_IO
, &sh
->ops
.pending
))
2898 if (s
.expanded
&& test_bit(STRIPE_EXPANDING
, &sh
->state
) &&
2899 !test_bit(STRIPE_OP_POSTXOR
, &sh
->ops
.pending
)) {
2900 /* Need to write out all blocks after computing parity */
2901 sh
->disks
= conf
->raid_disks
;
2902 sh
->pd_idx
= stripe_to_pdidx(sh
->sector
, conf
,
2904 s
.locked
+= handle_write_operations5(sh
, 1, 1);
2905 } else if (s
.expanded
&&
2906 !test_bit(STRIPE_OP_POSTXOR
, &sh
->ops
.pending
)) {
2907 clear_bit(STRIPE_EXPAND_READY
, &sh
->state
);
2908 atomic_dec(&conf
->reshape_stripes
);
2909 wake_up(&conf
->wait_for_overlap
);
2910 md_done_sync(conf
->mddev
, STRIPE_SECTORS
, 1);
2913 if (s
.expanding
&& s
.locked
== 0 &&
2914 !test_bit(STRIPE_OP_COMPUTE_BLK
, &sh
->ops
.pending
))
2915 handle_stripe_expansion(conf
, sh
, NULL
);
2918 pending
= get_stripe_work(sh
);
2921 spin_unlock(&sh
->lock
);
2923 /* wait for this device to become unblocked */
2924 if (unlikely(blocked_rdev
))
2925 md_wait_for_blocked_rdev(blocked_rdev
, conf
->mddev
);
2928 raid5_run_ops(sh
, pending
);
2930 return_io(return_bi
);
2934 static void handle_stripe6(struct stripe_head
*sh
, struct page
*tmp_page
)
2936 raid6_conf_t
*conf
= sh
->raid_conf
;
2937 int disks
= sh
->disks
;
2938 struct bio
*return_bi
= NULL
;
2939 int i
, pd_idx
= sh
->pd_idx
;
2940 struct stripe_head_state s
;
2941 struct r6_state r6s
;
2942 struct r5dev
*dev
, *pdev
, *qdev
;
2943 mdk_rdev_t
*blocked_rdev
= NULL
;
2945 r6s
.qd_idx
= raid6_next_disk(pd_idx
, disks
);
2946 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
2947 "pd_idx=%d, qd_idx=%d\n",
2948 (unsigned long long)sh
->sector
, sh
->state
,
2949 atomic_read(&sh
->count
), pd_idx
, r6s
.qd_idx
);
2950 memset(&s
, 0, sizeof(s
));
2952 spin_lock(&sh
->lock
);
2953 clear_bit(STRIPE_HANDLE
, &sh
->state
);
2954 clear_bit(STRIPE_DELAYED
, &sh
->state
);
2956 s
.syncing
= test_bit(STRIPE_SYNCING
, &sh
->state
);
2957 s
.expanding
= test_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
2958 s
.expanded
= test_bit(STRIPE_EXPAND_READY
, &sh
->state
);
2959 /* Now to look around and see what can be done */
2962 for (i
=disks
; i
--; ) {
2965 clear_bit(R5_Insync
, &dev
->flags
);
2967 pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
2968 i
, dev
->flags
, dev
->toread
, dev
->towrite
, dev
->written
);
2969 /* maybe we can reply to a read */
2970 if (test_bit(R5_UPTODATE
, &dev
->flags
) && dev
->toread
) {
2971 struct bio
*rbi
, *rbi2
;
2972 pr_debug("Return read for disc %d\n", i
);
2973 spin_lock_irq(&conf
->device_lock
);
2976 if (test_and_clear_bit(R5_Overlap
, &dev
->flags
))
2977 wake_up(&conf
->wait_for_overlap
);
2978 spin_unlock_irq(&conf
->device_lock
);
2979 while (rbi
&& rbi
->bi_sector
< dev
->sector
+ STRIPE_SECTORS
) {
2980 copy_data(0, rbi
, dev
->page
, dev
->sector
);
2981 rbi2
= r5_next_bio(rbi
, dev
->sector
);
2982 spin_lock_irq(&conf
->device_lock
);
2983 if (--rbi
->bi_phys_segments
== 0) {
2984 rbi
->bi_next
= return_bi
;
2987 spin_unlock_irq(&conf
->device_lock
);
2992 /* now count some things */
2993 if (test_bit(R5_LOCKED
, &dev
->flags
)) s
.locked
++;
2994 if (test_bit(R5_UPTODATE
, &dev
->flags
)) s
.uptodate
++;
3001 if (!test_bit(R5_OVERWRITE
, &dev
->flags
))
3006 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
3007 if (rdev
&& unlikely(test_bit(Blocked
, &rdev
->flags
))) {
3008 blocked_rdev
= rdev
;
3009 atomic_inc(&rdev
->nr_pending
);
3012 if (!rdev
|| !test_bit(In_sync
, &rdev
->flags
)) {
3013 /* The ReadError flag will just be confusing now */
3014 clear_bit(R5_ReadError
, &dev
->flags
);
3015 clear_bit(R5_ReWrite
, &dev
->flags
);
3017 if (!rdev
|| !test_bit(In_sync
, &rdev
->flags
)
3018 || test_bit(R5_ReadError
, &dev
->flags
)) {
3020 r6s
.failed_num
[s
.failed
] = i
;
3023 set_bit(R5_Insync
, &dev
->flags
);
3027 if (unlikely(blocked_rdev
)) {
3028 set_bit(STRIPE_HANDLE
, &sh
->state
);
3031 pr_debug("locked=%d uptodate=%d to_read=%d"
3032 " to_write=%d failed=%d failed_num=%d,%d\n",
3033 s
.locked
, s
.uptodate
, s
.to_read
, s
.to_write
, s
.failed
,
3034 r6s
.failed_num
[0], r6s
.failed_num
[1]);
3035 /* check if the array has lost >2 devices and, if so, some requests
3036 * might need to be failed
3038 if (s
.failed
> 2 && s
.to_read
+s
.to_write
+s
.written
)
3039 handle_requests_to_failed_array(conf
, sh
, &s
, disks
,
3041 if (s
.failed
> 2 && s
.syncing
) {
3042 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,0);
3043 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3048 * might be able to return some write requests if the parity blocks
3049 * are safe, or on a failed drive
3051 pdev
= &sh
->dev
[pd_idx
];
3052 r6s
.p_failed
= (s
.failed
>= 1 && r6s
.failed_num
[0] == pd_idx
)
3053 || (s
.failed
>= 2 && r6s
.failed_num
[1] == pd_idx
);
3054 qdev
= &sh
->dev
[r6s
.qd_idx
];
3055 r6s
.q_failed
= (s
.failed
>= 1 && r6s
.failed_num
[0] == r6s
.qd_idx
)
3056 || (s
.failed
>= 2 && r6s
.failed_num
[1] == r6s
.qd_idx
);
3059 ( r6s
.p_failed
|| ((test_bit(R5_Insync
, &pdev
->flags
)
3060 && !test_bit(R5_LOCKED
, &pdev
->flags
)
3061 && test_bit(R5_UPTODATE
, &pdev
->flags
)))) &&
3062 ( r6s
.q_failed
|| ((test_bit(R5_Insync
, &qdev
->flags
)
3063 && !test_bit(R5_LOCKED
, &qdev
->flags
)
3064 && test_bit(R5_UPTODATE
, &qdev
->flags
)))))
3065 handle_completed_write_requests(conf
, sh
, disks
, &return_bi
);
3067 /* Now we might consider reading some blocks, either to check/generate
3068 * parity, or to satisfy requests
3069 * or to load a block that is being partially written.
3071 if (s
.to_read
|| s
.non_overwrite
|| (s
.to_write
&& s
.failed
) ||
3072 (s
.syncing
&& (s
.uptodate
< disks
)) || s
.expanding
)
3073 handle_issuing_new_read_requests6(sh
, &s
, &r6s
, disks
);
3075 /* now to consider writing and what else, if anything should be read */
3077 handle_issuing_new_write_requests6(conf
, sh
, &s
, &r6s
, disks
);
3079 /* maybe we need to check and possibly fix the parity for this stripe
3080 * Any reads will already have been scheduled, so we just see if enough
3083 if (s
.syncing
&& s
.locked
== 0 && !test_bit(STRIPE_INSYNC
, &sh
->state
))
3084 handle_parity_checks6(conf
, sh
, &s
, &r6s
, tmp_page
, disks
);
3086 if (s
.syncing
&& s
.locked
== 0 && test_bit(STRIPE_INSYNC
, &sh
->state
)) {
3087 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,1);
3088 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3091 /* If the failed drives are just a ReadError, then we might need
3092 * to progress the repair/check process
3094 if (s
.failed
<= 2 && !conf
->mddev
->ro
)
3095 for (i
= 0; i
< s
.failed
; i
++) {
3096 dev
= &sh
->dev
[r6s
.failed_num
[i
]];
3097 if (test_bit(R5_ReadError
, &dev
->flags
)
3098 && !test_bit(R5_LOCKED
, &dev
->flags
)
3099 && test_bit(R5_UPTODATE
, &dev
->flags
)
3101 if (!test_bit(R5_ReWrite
, &dev
->flags
)) {
3102 set_bit(R5_Wantwrite
, &dev
->flags
);
3103 set_bit(R5_ReWrite
, &dev
->flags
);
3104 set_bit(R5_LOCKED
, &dev
->flags
);
3106 /* let's read it back */
3107 set_bit(R5_Wantread
, &dev
->flags
);
3108 set_bit(R5_LOCKED
, &dev
->flags
);
3113 if (s
.expanded
&& test_bit(STRIPE_EXPANDING
, &sh
->state
)) {
3114 /* Need to write out all blocks after computing P&Q */
3115 sh
->disks
= conf
->raid_disks
;
3116 sh
->pd_idx
= stripe_to_pdidx(sh
->sector
, conf
,
3118 compute_parity6(sh
, RECONSTRUCT_WRITE
);
3119 for (i
= conf
->raid_disks
; i
-- ; ) {
3120 set_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
3122 set_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
);
3124 clear_bit(STRIPE_EXPANDING
, &sh
->state
);
3125 } else if (s
.expanded
) {
3126 clear_bit(STRIPE_EXPAND_READY
, &sh
->state
);
3127 atomic_dec(&conf
->reshape_stripes
);
3128 wake_up(&conf
->wait_for_overlap
);
3129 md_done_sync(conf
->mddev
, STRIPE_SECTORS
, 1);
3132 if (s
.expanding
&& s
.locked
== 0 &&
3133 !test_bit(STRIPE_OP_COMPUTE_BLK
, &sh
->ops
.pending
))
3134 handle_stripe_expansion(conf
, sh
, &r6s
);
3137 spin_unlock(&sh
->lock
);
3139 /* wait for this device to become unblocked */
3140 if (unlikely(blocked_rdev
))
3141 md_wait_for_blocked_rdev(blocked_rdev
, conf
->mddev
);
3143 return_io(return_bi
);
3145 for (i
=disks
; i
-- ;) {
3149 if (test_and_clear_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
))
3151 else if (test_and_clear_bit(R5_Wantread
, &sh
->dev
[i
].flags
))
3156 set_bit(STRIPE_IO_STARTED
, &sh
->state
);
3158 bi
= &sh
->dev
[i
].req
;
3162 bi
->bi_end_io
= raid5_end_write_request
;
3164 bi
->bi_end_io
= raid5_end_read_request
;
3167 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
3168 if (rdev
&& test_bit(Faulty
, &rdev
->flags
))
3171 atomic_inc(&rdev
->nr_pending
);
3175 if (s
.syncing
|| s
.expanding
|| s
.expanded
)
3176 md_sync_acct(rdev
->bdev
, STRIPE_SECTORS
);
3178 bi
->bi_bdev
= rdev
->bdev
;
3179 pr_debug("for %llu schedule op %ld on disc %d\n",
3180 (unsigned long long)sh
->sector
, bi
->bi_rw
, i
);
3181 atomic_inc(&sh
->count
);
3182 bi
->bi_sector
= sh
->sector
+ rdev
->data_offset
;
3183 bi
->bi_flags
= 1 << BIO_UPTODATE
;
3185 bi
->bi_max_vecs
= 1;
3187 bi
->bi_io_vec
= &sh
->dev
[i
].vec
;
3188 bi
->bi_io_vec
[0].bv_len
= STRIPE_SIZE
;
3189 bi
->bi_io_vec
[0].bv_offset
= 0;
3190 bi
->bi_size
= STRIPE_SIZE
;
3193 test_bit(R5_ReWrite
, &sh
->dev
[i
].flags
))
3194 atomic_add(STRIPE_SECTORS
, &rdev
->corrected_errors
);
3195 generic_make_request(bi
);
3198 set_bit(STRIPE_DEGRADED
, &sh
->state
);
3199 pr_debug("skip op %ld on disc %d for sector %llu\n",
3200 bi
->bi_rw
, i
, (unsigned long long)sh
->sector
);
3201 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
3202 set_bit(STRIPE_HANDLE
, &sh
->state
);
3207 static void handle_stripe(struct stripe_head
*sh
, struct page
*tmp_page
)
3209 if (sh
->raid_conf
->level
== 6)
3210 handle_stripe6(sh
, tmp_page
);
3217 static void raid5_activate_delayed(raid5_conf_t
*conf
)
3219 if (atomic_read(&conf
->preread_active_stripes
) < IO_THRESHOLD
) {
3220 while (!list_empty(&conf
->delayed_list
)) {
3221 struct list_head
*l
= conf
->delayed_list
.next
;
3222 struct stripe_head
*sh
;
3223 sh
= list_entry(l
, struct stripe_head
, lru
);
3225 clear_bit(STRIPE_DELAYED
, &sh
->state
);
3226 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
3227 atomic_inc(&conf
->preread_active_stripes
);
3228 list_add_tail(&sh
->lru
, &conf
->hold_list
);
3231 blk_plug_device(conf
->mddev
->queue
);
3234 static void activate_bit_delay(raid5_conf_t
*conf
)
3236 /* device_lock is held */
3237 struct list_head head
;
3238 list_add(&head
, &conf
->bitmap_list
);
3239 list_del_init(&conf
->bitmap_list
);
3240 while (!list_empty(&head
)) {
3241 struct stripe_head
*sh
= list_entry(head
.next
, struct stripe_head
, lru
);
3242 list_del_init(&sh
->lru
);
3243 atomic_inc(&sh
->count
);
3244 __release_stripe(conf
, sh
);
3248 static void unplug_slaves(mddev_t
*mddev
)
3250 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
3254 for (i
=0; i
<mddev
->raid_disks
; i
++) {
3255 mdk_rdev_t
*rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
3256 if (rdev
&& !test_bit(Faulty
, &rdev
->flags
) && atomic_read(&rdev
->nr_pending
)) {
3257 struct request_queue
*r_queue
= bdev_get_queue(rdev
->bdev
);
3259 atomic_inc(&rdev
->nr_pending
);
3262 blk_unplug(r_queue
);
3264 rdev_dec_pending(rdev
, mddev
);
3271 static void raid5_unplug_device(struct request_queue
*q
)
3273 mddev_t
*mddev
= q
->queuedata
;
3274 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
3275 unsigned long flags
;
3277 spin_lock_irqsave(&conf
->device_lock
, flags
);
3279 if (blk_remove_plug(q
)) {
3281 raid5_activate_delayed(conf
);
3283 md_wakeup_thread(mddev
->thread
);
3285 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
3287 unplug_slaves(mddev
);
3290 static int raid5_congested(void *data
, int bits
)
3292 mddev_t
*mddev
= data
;
3293 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
3295 /* No difference between reads and writes. Just check
3296 * how busy the stripe_cache is
3298 if (conf
->inactive_blocked
)
3302 if (list_empty_careful(&conf
->inactive_list
))
3308 /* We want read requests to align with chunks where possible,
3309 * but write requests don't need to.
3311 static int raid5_mergeable_bvec(struct request_queue
*q
, struct bio
*bio
, struct bio_vec
*biovec
)
3313 mddev_t
*mddev
= q
->queuedata
;
3314 sector_t sector
= bio
->bi_sector
+ get_start_sect(bio
->bi_bdev
);
3316 unsigned int chunk_sectors
= mddev
->chunk_size
>> 9;
3317 unsigned int bio_sectors
= bio
->bi_size
>> 9;
3319 if (bio_data_dir(bio
) == WRITE
)
3320 return biovec
->bv_len
; /* always allow writes to be mergeable */
3322 max
= (chunk_sectors
- ((sector
& (chunk_sectors
- 1)) + bio_sectors
)) << 9;
3323 if (max
< 0) max
= 0;
3324 if (max
<= biovec
->bv_len
&& bio_sectors
== 0)
3325 return biovec
->bv_len
;
3331 static int in_chunk_boundary(mddev_t
*mddev
, struct bio
*bio
)
3333 sector_t sector
= bio
->bi_sector
+ get_start_sect(bio
->bi_bdev
);
3334 unsigned int chunk_sectors
= mddev
->chunk_size
>> 9;
3335 unsigned int bio_sectors
= bio
->bi_size
>> 9;
3337 return chunk_sectors
>=
3338 ((sector
& (chunk_sectors
- 1)) + bio_sectors
);
3342 * add bio to the retry LIFO ( in O(1) ... we are in interrupt )
3343 * later sampled by raid5d.
3345 static void add_bio_to_retry(struct bio
*bi
,raid5_conf_t
*conf
)
3347 unsigned long flags
;
3349 spin_lock_irqsave(&conf
->device_lock
, flags
);
3351 bi
->bi_next
= conf
->retry_read_aligned_list
;
3352 conf
->retry_read_aligned_list
= bi
;
3354 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
3355 md_wakeup_thread(conf
->mddev
->thread
);
3359 static struct bio
*remove_bio_from_retry(raid5_conf_t
*conf
)
3363 bi
= conf
->retry_read_aligned
;
3365 conf
->retry_read_aligned
= NULL
;
3368 bi
= conf
->retry_read_aligned_list
;
3370 conf
->retry_read_aligned_list
= bi
->bi_next
;
3372 bi
->bi_phys_segments
= 1; /* biased count of active stripes */
3373 bi
->bi_hw_segments
= 0; /* count of processed stripes */
3381 * The "raid5_align_endio" should check if the read succeeded and if it
3382 * did, call bio_endio on the original bio (having bio_put the new bio
3384 * If the read failed..
3386 static void raid5_align_endio(struct bio
*bi
, int error
)
3388 struct bio
* raid_bi
= bi
->bi_private
;
3391 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
3396 mddev
= raid_bi
->bi_bdev
->bd_disk
->queue
->queuedata
;
3397 conf
= mddev_to_conf(mddev
);
3398 rdev
= (void*)raid_bi
->bi_next
;
3399 raid_bi
->bi_next
= NULL
;
3401 rdev_dec_pending(rdev
, conf
->mddev
);
3403 if (!error
&& uptodate
) {
3404 bio_endio(raid_bi
, 0);
3405 if (atomic_dec_and_test(&conf
->active_aligned_reads
))
3406 wake_up(&conf
->wait_for_stripe
);
3411 pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
3413 add_bio_to_retry(raid_bi
, conf
);
3416 static int bio_fits_rdev(struct bio
*bi
)
3418 struct request_queue
*q
= bdev_get_queue(bi
->bi_bdev
);
3420 if ((bi
->bi_size
>>9) > q
->max_sectors
)
3422 blk_recount_segments(q
, bi
);
3423 if (bi
->bi_phys_segments
> q
->max_phys_segments
||
3424 bi
->bi_hw_segments
> q
->max_hw_segments
)
3427 if (q
->merge_bvec_fn
)
3428 /* it's too hard to apply the merge_bvec_fn at this stage,
3437 static int chunk_aligned_read(struct request_queue
*q
, struct bio
* raid_bio
)
3439 mddev_t
*mddev
= q
->queuedata
;
3440 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
3441 const unsigned int raid_disks
= conf
->raid_disks
;
3442 const unsigned int data_disks
= raid_disks
- conf
->max_degraded
;
3443 unsigned int dd_idx
, pd_idx
;
3444 struct bio
* align_bi
;
3447 if (!in_chunk_boundary(mddev
, raid_bio
)) {
3448 pr_debug("chunk_aligned_read : non aligned\n");
3452 * use bio_clone to make a copy of the bio
3454 align_bi
= bio_clone(raid_bio
, GFP_NOIO
);
3458 * set bi_end_io to a new function, and set bi_private to the
3461 align_bi
->bi_end_io
= raid5_align_endio
;
3462 align_bi
->bi_private
= raid_bio
;
3466 align_bi
->bi_sector
= raid5_compute_sector(raid_bio
->bi_sector
,
3474 rdev
= rcu_dereference(conf
->disks
[dd_idx
].rdev
);
3475 if (rdev
&& test_bit(In_sync
, &rdev
->flags
)) {
3476 atomic_inc(&rdev
->nr_pending
);
3478 raid_bio
->bi_next
= (void*)rdev
;
3479 align_bi
->bi_bdev
= rdev
->bdev
;
3480 align_bi
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
3481 align_bi
->bi_sector
+= rdev
->data_offset
;
3483 if (!bio_fits_rdev(align_bi
)) {
3484 /* too big in some way */
3486 rdev_dec_pending(rdev
, mddev
);
3490 spin_lock_irq(&conf
->device_lock
);
3491 wait_event_lock_irq(conf
->wait_for_stripe
,
3493 conf
->device_lock
, /* nothing */);
3494 atomic_inc(&conf
->active_aligned_reads
);
3495 spin_unlock_irq(&conf
->device_lock
);
3497 generic_make_request(align_bi
);
3506 /* __get_priority_stripe - get the next stripe to process
3508 * Full stripe writes are allowed to pass preread active stripes up until
3509 * the bypass_threshold is exceeded. In general the bypass_count
3510 * increments when the handle_list is handled before the hold_list; however, it
3511 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
3512 * stripe with in flight i/o. The bypass_count will be reset when the
3513 * head of the hold_list has changed, i.e. the head was promoted to the
3516 static struct stripe_head
*__get_priority_stripe(raid5_conf_t
*conf
)
3518 struct stripe_head
*sh
;
3520 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
3522 list_empty(&conf
->handle_list
) ? "empty" : "busy",
3523 list_empty(&conf
->hold_list
) ? "empty" : "busy",
3524 atomic_read(&conf
->pending_full_writes
), conf
->bypass_count
);
3526 if (!list_empty(&conf
->handle_list
)) {
3527 sh
= list_entry(conf
->handle_list
.next
, typeof(*sh
), lru
);
3529 if (list_empty(&conf
->hold_list
))
3530 conf
->bypass_count
= 0;
3531 else if (!test_bit(STRIPE_IO_STARTED
, &sh
->state
)) {
3532 if (conf
->hold_list
.next
== conf
->last_hold
)
3533 conf
->bypass_count
++;
3535 conf
->last_hold
= conf
->hold_list
.next
;
3536 conf
->bypass_count
-= conf
->bypass_threshold
;
3537 if (conf
->bypass_count
< 0)
3538 conf
->bypass_count
= 0;
3541 } else if (!list_empty(&conf
->hold_list
) &&
3542 ((conf
->bypass_threshold
&&
3543 conf
->bypass_count
> conf
->bypass_threshold
) ||
3544 atomic_read(&conf
->pending_full_writes
) == 0)) {
3545 sh
= list_entry(conf
->hold_list
.next
,
3547 conf
->bypass_count
-= conf
->bypass_threshold
;
3548 if (conf
->bypass_count
< 0)
3549 conf
->bypass_count
= 0;
3553 list_del_init(&sh
->lru
);
3554 atomic_inc(&sh
->count
);
3555 BUG_ON(atomic_read(&sh
->count
) != 1);
3559 static int make_request(struct request_queue
*q
, struct bio
* bi
)
3561 mddev_t
*mddev
= q
->queuedata
;
3562 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
3563 unsigned int dd_idx
, pd_idx
;
3564 sector_t new_sector
;
3565 sector_t logical_sector
, last_sector
;
3566 struct stripe_head
*sh
;
3567 const int rw
= bio_data_dir(bi
);
3570 if (unlikely(bio_barrier(bi
))) {
3571 bio_endio(bi
, -EOPNOTSUPP
);
3575 md_write_start(mddev
, bi
);
3577 disk_stat_inc(mddev
->gendisk
, ios
[rw
]);
3578 disk_stat_add(mddev
->gendisk
, sectors
[rw
], bio_sectors(bi
));
3581 mddev
->reshape_position
== MaxSector
&&
3582 chunk_aligned_read(q
,bi
))
3585 logical_sector
= bi
->bi_sector
& ~((sector_t
)STRIPE_SECTORS
-1);
3586 last_sector
= bi
->bi_sector
+ (bi
->bi_size
>>9);
3588 bi
->bi_phys_segments
= 1; /* over-loaded to count active stripes */
3590 for (;logical_sector
< last_sector
; logical_sector
+= STRIPE_SECTORS
) {
3592 int disks
, data_disks
;
3595 prepare_to_wait(&conf
->wait_for_overlap
, &w
, TASK_UNINTERRUPTIBLE
);
3596 if (likely(conf
->expand_progress
== MaxSector
))
3597 disks
= conf
->raid_disks
;
3599 /* spinlock is needed as expand_progress may be
3600 * 64bit on a 32bit platform, and so it might be
3601 * possible to see a half-updated value
3602 * Ofcourse expand_progress could change after
3603 * the lock is dropped, so once we get a reference
3604 * to the stripe that we think it is, we will have
3607 spin_lock_irq(&conf
->device_lock
);
3608 disks
= conf
->raid_disks
;
3609 if (logical_sector
>= conf
->expand_progress
)
3610 disks
= conf
->previous_raid_disks
;
3612 if (logical_sector
>= conf
->expand_lo
) {
3613 spin_unlock_irq(&conf
->device_lock
);
3618 spin_unlock_irq(&conf
->device_lock
);
3620 data_disks
= disks
- conf
->max_degraded
;
3622 new_sector
= raid5_compute_sector(logical_sector
, disks
, data_disks
,
3623 &dd_idx
, &pd_idx
, conf
);
3624 pr_debug("raid5: make_request, sector %llu logical %llu\n",
3625 (unsigned long long)new_sector
,
3626 (unsigned long long)logical_sector
);
3628 sh
= get_active_stripe(conf
, new_sector
, disks
, pd_idx
, (bi
->bi_rw
&RWA_MASK
));
3630 if (unlikely(conf
->expand_progress
!= MaxSector
)) {
3631 /* expansion might have moved on while waiting for a
3632 * stripe, so we must do the range check again.
3633 * Expansion could still move past after this
3634 * test, but as we are holding a reference to
3635 * 'sh', we know that if that happens,
3636 * STRIPE_EXPANDING will get set and the expansion
3637 * won't proceed until we finish with the stripe.
3640 spin_lock_irq(&conf
->device_lock
);
3641 if (logical_sector
< conf
->expand_progress
&&
3642 disks
== conf
->previous_raid_disks
)
3643 /* mismatch, need to try again */
3645 spin_unlock_irq(&conf
->device_lock
);
3651 /* FIXME what if we get a false positive because these
3652 * are being updated.
3654 if (logical_sector
>= mddev
->suspend_lo
&&
3655 logical_sector
< mddev
->suspend_hi
) {
3661 if (test_bit(STRIPE_EXPANDING
, &sh
->state
) ||
3662 !add_stripe_bio(sh
, bi
, dd_idx
, (bi
->bi_rw
&RW_MASK
))) {
3663 /* Stripe is busy expanding or
3664 * add failed due to overlap. Flush everything
3667 raid5_unplug_device(mddev
->queue
);
3672 finish_wait(&conf
->wait_for_overlap
, &w
);
3673 set_bit(STRIPE_HANDLE
, &sh
->state
);
3674 clear_bit(STRIPE_DELAYED
, &sh
->state
);
3677 /* cannot get stripe for read-ahead, just give-up */
3678 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
3679 finish_wait(&conf
->wait_for_overlap
, &w
);
3684 spin_lock_irq(&conf
->device_lock
);
3685 remaining
= --bi
->bi_phys_segments
;
3686 spin_unlock_irq(&conf
->device_lock
);
3687 if (remaining
== 0) {
3690 md_write_end(mddev
);
3693 test_bit(BIO_UPTODATE
, &bi
->bi_flags
)
3699 static sector_t
reshape_request(mddev_t
*mddev
, sector_t sector_nr
, int *skipped
)
3701 /* reshaping is quite different to recovery/resync so it is
3702 * handled quite separately ... here.
3704 * On each call to sync_request, we gather one chunk worth of
3705 * destination stripes and flag them as expanding.
3706 * Then we find all the source stripes and request reads.
3707 * As the reads complete, handle_stripe will copy the data
3708 * into the destination stripe and release that stripe.
3710 raid5_conf_t
*conf
= (raid5_conf_t
*) mddev
->private;
3711 struct stripe_head
*sh
;
3713 sector_t first_sector
, last_sector
;
3714 int raid_disks
= conf
->previous_raid_disks
;
3715 int data_disks
= raid_disks
- conf
->max_degraded
;
3716 int new_data_disks
= conf
->raid_disks
- conf
->max_degraded
;
3719 sector_t writepos
, safepos
, gap
;
3721 if (sector_nr
== 0 &&
3722 conf
->expand_progress
!= 0) {
3723 /* restarting in the middle, skip the initial sectors */
3724 sector_nr
= conf
->expand_progress
;
3725 sector_div(sector_nr
, new_data_disks
);
3730 /* we update the metadata when there is more than 3Meg
3731 * in the block range (that is rather arbitrary, should
3732 * probably be time based) or when the data about to be
3733 * copied would over-write the source of the data at
3734 * the front of the range.
3735 * i.e. one new_stripe forward from expand_progress new_maps
3736 * to after where expand_lo old_maps to
3738 writepos
= conf
->expand_progress
+
3739 conf
->chunk_size
/512*(new_data_disks
);
3740 sector_div(writepos
, new_data_disks
);
3741 safepos
= conf
->expand_lo
;
3742 sector_div(safepos
, data_disks
);
3743 gap
= conf
->expand_progress
- conf
->expand_lo
;
3745 if (writepos
>= safepos
||
3746 gap
> (new_data_disks
)*3000*2 /*3Meg*/) {
3747 /* Cannot proceed until we've updated the superblock... */
3748 wait_event(conf
->wait_for_overlap
,
3749 atomic_read(&conf
->reshape_stripes
)==0);
3750 mddev
->reshape_position
= conf
->expand_progress
;
3751 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
3752 md_wakeup_thread(mddev
->thread
);
3753 wait_event(mddev
->sb_wait
, mddev
->flags
== 0 ||
3754 kthread_should_stop());
3755 spin_lock_irq(&conf
->device_lock
);
3756 conf
->expand_lo
= mddev
->reshape_position
;
3757 spin_unlock_irq(&conf
->device_lock
);
3758 wake_up(&conf
->wait_for_overlap
);
3761 for (i
=0; i
< conf
->chunk_size
/512; i
+= STRIPE_SECTORS
) {
3764 pd_idx
= stripe_to_pdidx(sector_nr
+i
, conf
, conf
->raid_disks
);
3765 sh
= get_active_stripe(conf
, sector_nr
+i
,
3766 conf
->raid_disks
, pd_idx
, 0);
3767 set_bit(STRIPE_EXPANDING
, &sh
->state
);
3768 atomic_inc(&conf
->reshape_stripes
);
3769 /* If any of this stripe is beyond the end of the old
3770 * array, then we need to zero those blocks
3772 for (j
=sh
->disks
; j
--;) {
3774 if (j
== sh
->pd_idx
)
3776 if (conf
->level
== 6 &&
3777 j
== raid6_next_disk(sh
->pd_idx
, sh
->disks
))
3779 s
= compute_blocknr(sh
, j
);
3780 if (s
< (mddev
->array_size
<<1)) {
3784 memset(page_address(sh
->dev
[j
].page
), 0, STRIPE_SIZE
);
3785 set_bit(R5_Expanded
, &sh
->dev
[j
].flags
);
3786 set_bit(R5_UPTODATE
, &sh
->dev
[j
].flags
);
3789 set_bit(STRIPE_EXPAND_READY
, &sh
->state
);
3790 set_bit(STRIPE_HANDLE
, &sh
->state
);
3794 spin_lock_irq(&conf
->device_lock
);
3795 conf
->expand_progress
= (sector_nr
+ i
) * new_data_disks
;
3796 spin_unlock_irq(&conf
->device_lock
);
3797 /* Ok, those stripe are ready. We can start scheduling
3798 * reads on the source stripes.
3799 * The source stripes are determined by mapping the first and last
3800 * block on the destination stripes.
3803 raid5_compute_sector(sector_nr
*(new_data_disks
),
3804 raid_disks
, data_disks
,
3805 &dd_idx
, &pd_idx
, conf
);
3807 raid5_compute_sector((sector_nr
+conf
->chunk_size
/512)
3808 *(new_data_disks
) -1,
3809 raid_disks
, data_disks
,
3810 &dd_idx
, &pd_idx
, conf
);
3811 if (last_sector
>= (mddev
->size
<<1))
3812 last_sector
= (mddev
->size
<<1)-1;
3813 while (first_sector
<= last_sector
) {
3814 pd_idx
= stripe_to_pdidx(first_sector
, conf
,
3815 conf
->previous_raid_disks
);
3816 sh
= get_active_stripe(conf
, first_sector
,
3817 conf
->previous_raid_disks
, pd_idx
, 0);
3818 set_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
3819 set_bit(STRIPE_HANDLE
, &sh
->state
);
3821 first_sector
+= STRIPE_SECTORS
;
3823 /* If this takes us to the resync_max point where we have to pause,
3824 * then we need to write out the superblock.
3826 sector_nr
+= conf
->chunk_size
>>9;
3827 if (sector_nr
>= mddev
->resync_max
) {
3828 /* Cannot proceed until we've updated the superblock... */
3829 wait_event(conf
->wait_for_overlap
,
3830 atomic_read(&conf
->reshape_stripes
) == 0);
3831 mddev
->reshape_position
= conf
->expand_progress
;
3832 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
3833 md_wakeup_thread(mddev
->thread
);
3834 wait_event(mddev
->sb_wait
,
3835 !test_bit(MD_CHANGE_DEVS
, &mddev
->flags
)
3836 || kthread_should_stop());
3837 spin_lock_irq(&conf
->device_lock
);
3838 conf
->expand_lo
= mddev
->reshape_position
;
3839 spin_unlock_irq(&conf
->device_lock
);
3840 wake_up(&conf
->wait_for_overlap
);
3842 return conf
->chunk_size
>>9;
3845 /* FIXME go_faster isn't used */
3846 static inline sector_t
sync_request(mddev_t
*mddev
, sector_t sector_nr
, int *skipped
, int go_faster
)
3848 raid5_conf_t
*conf
= (raid5_conf_t
*) mddev
->private;
3849 struct stripe_head
*sh
;
3851 int raid_disks
= conf
->raid_disks
;
3852 sector_t max_sector
= mddev
->size
<< 1;
3854 int still_degraded
= 0;
3857 if (sector_nr
>= max_sector
) {
3858 /* just being told to finish up .. nothing much to do */
3859 unplug_slaves(mddev
);
3860 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
)) {
3865 if (mddev
->curr_resync
< max_sector
) /* aborted */
3866 bitmap_end_sync(mddev
->bitmap
, mddev
->curr_resync
,
3868 else /* completed sync */
3870 bitmap_close_sync(mddev
->bitmap
);
3875 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
3876 return reshape_request(mddev
, sector_nr
, skipped
);
3878 /* No need to check resync_max as we never do more than one
3879 * stripe, and as resync_max will always be on a chunk boundary,
3880 * if the check in md_do_sync didn't fire, there is no chance
3881 * of overstepping resync_max here
3884 /* if there is too many failed drives and we are trying
3885 * to resync, then assert that we are finished, because there is
3886 * nothing we can do.
3888 if (mddev
->degraded
>= conf
->max_degraded
&&
3889 test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
3890 sector_t rv
= (mddev
->size
<< 1) - sector_nr
;
3894 if (!bitmap_start_sync(mddev
->bitmap
, sector_nr
, &sync_blocks
, 1) &&
3895 !test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
) &&
3896 !conf
->fullsync
&& sync_blocks
>= STRIPE_SECTORS
) {
3897 /* we can skip this block, and probably more */
3898 sync_blocks
/= STRIPE_SECTORS
;
3900 return sync_blocks
* STRIPE_SECTORS
; /* keep things rounded to whole stripes */
3904 bitmap_cond_end_sync(mddev
->bitmap
, sector_nr
);
3906 pd_idx
= stripe_to_pdidx(sector_nr
, conf
, raid_disks
);
3907 sh
= get_active_stripe(conf
, sector_nr
, raid_disks
, pd_idx
, 1);
3909 sh
= get_active_stripe(conf
, sector_nr
, raid_disks
, pd_idx
, 0);
3910 /* make sure we don't swamp the stripe cache if someone else
3911 * is trying to get access
3913 schedule_timeout_uninterruptible(1);
3915 /* Need to check if array will still be degraded after recovery/resync
3916 * We don't need to check the 'failed' flag as when that gets set,
3919 for (i
=0; i
<mddev
->raid_disks
; i
++)
3920 if (conf
->disks
[i
].rdev
== NULL
)
3923 bitmap_start_sync(mddev
->bitmap
, sector_nr
, &sync_blocks
, still_degraded
);
3925 spin_lock(&sh
->lock
);
3926 set_bit(STRIPE_SYNCING
, &sh
->state
);
3927 clear_bit(STRIPE_INSYNC
, &sh
->state
);
3928 spin_unlock(&sh
->lock
);
3930 handle_stripe(sh
, NULL
);
3933 return STRIPE_SECTORS
;
3936 static int retry_aligned_read(raid5_conf_t
*conf
, struct bio
*raid_bio
)
3938 /* We may not be able to submit a whole bio at once as there
3939 * may not be enough stripe_heads available.
3940 * We cannot pre-allocate enough stripe_heads as we may need
3941 * more than exist in the cache (if we allow ever large chunks).
3942 * So we do one stripe head at a time and record in
3943 * ->bi_hw_segments how many have been done.
3945 * We *know* that this entire raid_bio is in one chunk, so
3946 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
3948 struct stripe_head
*sh
;
3950 sector_t sector
, logical_sector
, last_sector
;
3955 logical_sector
= raid_bio
->bi_sector
& ~((sector_t
)STRIPE_SECTORS
-1);
3956 sector
= raid5_compute_sector( logical_sector
,
3958 conf
->raid_disks
- conf
->max_degraded
,
3962 last_sector
= raid_bio
->bi_sector
+ (raid_bio
->bi_size
>>9);
3964 for (; logical_sector
< last_sector
;
3965 logical_sector
+= STRIPE_SECTORS
,
3966 sector
+= STRIPE_SECTORS
,
3969 if (scnt
< raid_bio
->bi_hw_segments
)
3970 /* already done this stripe */
3973 sh
= get_active_stripe(conf
, sector
, conf
->raid_disks
, pd_idx
, 1);
3976 /* failed to get a stripe - must wait */
3977 raid_bio
->bi_hw_segments
= scnt
;
3978 conf
->retry_read_aligned
= raid_bio
;
3982 set_bit(R5_ReadError
, &sh
->dev
[dd_idx
].flags
);
3983 if (!add_stripe_bio(sh
, raid_bio
, dd_idx
, 0)) {
3985 raid_bio
->bi_hw_segments
= scnt
;
3986 conf
->retry_read_aligned
= raid_bio
;
3990 handle_stripe(sh
, NULL
);
3994 spin_lock_irq(&conf
->device_lock
);
3995 remaining
= --raid_bio
->bi_phys_segments
;
3996 spin_unlock_irq(&conf
->device_lock
);
3997 if (remaining
== 0) {
3999 raid_bio
->bi_end_io(raid_bio
,
4000 test_bit(BIO_UPTODATE
, &raid_bio
->bi_flags
)
4003 if (atomic_dec_and_test(&conf
->active_aligned_reads
))
4004 wake_up(&conf
->wait_for_stripe
);
4011 * This is our raid5 kernel thread.
4013 * We scan the hash table for stripes which can be handled now.
4014 * During the scan, completed stripes are saved for us by the interrupt
4015 * handler, so that they will not have to wait for our next wakeup.
4017 static void raid5d(mddev_t
*mddev
)
4019 struct stripe_head
*sh
;
4020 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
4023 pr_debug("+++ raid5d active\n");
4025 md_check_recovery(mddev
);
4028 spin_lock_irq(&conf
->device_lock
);
4032 if (conf
->seq_flush
!= conf
->seq_write
) {
4033 int seq
= conf
->seq_flush
;
4034 spin_unlock_irq(&conf
->device_lock
);
4035 bitmap_unplug(mddev
->bitmap
);
4036 spin_lock_irq(&conf
->device_lock
);
4037 conf
->seq_write
= seq
;
4038 activate_bit_delay(conf
);
4041 while ((bio
= remove_bio_from_retry(conf
))) {
4043 spin_unlock_irq(&conf
->device_lock
);
4044 ok
= retry_aligned_read(conf
, bio
);
4045 spin_lock_irq(&conf
->device_lock
);
4051 sh
= __get_priority_stripe(conf
);
4054 async_tx_issue_pending_all();
4057 spin_unlock_irq(&conf
->device_lock
);
4060 handle_stripe(sh
, conf
->spare_page
);
4063 spin_lock_irq(&conf
->device_lock
);
4065 pr_debug("%d stripes handled\n", handled
);
4067 spin_unlock_irq(&conf
->device_lock
);
4069 unplug_slaves(mddev
);
4071 pr_debug("--- raid5d inactive\n");
4075 raid5_show_stripe_cache_size(mddev_t
*mddev
, char *page
)
4077 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
4079 return sprintf(page
, "%d\n", conf
->max_nr_stripes
);
4085 raid5_store_stripe_cache_size(mddev_t
*mddev
, const char *page
, size_t len
)
4087 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
4089 if (len
>= PAGE_SIZE
)
4094 if (strict_strtoul(page
, 10, &new))
4096 if (new <= 16 || new > 32768)
4098 while (new < conf
->max_nr_stripes
) {
4099 if (drop_one_stripe(conf
))
4100 conf
->max_nr_stripes
--;
4104 md_allow_write(mddev
);
4105 while (new > conf
->max_nr_stripes
) {
4106 if (grow_one_stripe(conf
))
4107 conf
->max_nr_stripes
++;
4113 static struct md_sysfs_entry
4114 raid5_stripecache_size
= __ATTR(stripe_cache_size
, S_IRUGO
| S_IWUSR
,
4115 raid5_show_stripe_cache_size
,
4116 raid5_store_stripe_cache_size
);
4119 raid5_show_preread_threshold(mddev_t
*mddev
, char *page
)
4121 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
4123 return sprintf(page
, "%d\n", conf
->bypass_threshold
);
4129 raid5_store_preread_threshold(mddev_t
*mddev
, const char *page
, size_t len
)
4131 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
4133 if (len
>= PAGE_SIZE
)
4138 if (strict_strtoul(page
, 10, &new))
4140 if (new > conf
->max_nr_stripes
)
4142 conf
->bypass_threshold
= new;
4146 static struct md_sysfs_entry
4147 raid5_preread_bypass_threshold
= __ATTR(preread_bypass_threshold
,
4149 raid5_show_preread_threshold
,
4150 raid5_store_preread_threshold
);
4153 stripe_cache_active_show(mddev_t
*mddev
, char *page
)
4155 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
4157 return sprintf(page
, "%d\n", atomic_read(&conf
->active_stripes
));
4162 static struct md_sysfs_entry
4163 raid5_stripecache_active
= __ATTR_RO(stripe_cache_active
);
4165 static struct attribute
*raid5_attrs
[] = {
4166 &raid5_stripecache_size
.attr
,
4167 &raid5_stripecache_active
.attr
,
4168 &raid5_preread_bypass_threshold
.attr
,
4171 static struct attribute_group raid5_attrs_group
= {
4173 .attrs
= raid5_attrs
,
4176 static int run(mddev_t
*mddev
)
4179 int raid_disk
, memory
;
4181 struct disk_info
*disk
;
4182 struct list_head
*tmp
;
4183 int working_disks
= 0;
4185 if (mddev
->level
!= 5 && mddev
->level
!= 4 && mddev
->level
!= 6) {
4186 printk(KERN_ERR
"raid5: %s: raid level not set to 4/5/6 (%d)\n",
4187 mdname(mddev
), mddev
->level
);
4191 if (mddev
->reshape_position
!= MaxSector
) {
4192 /* Check that we can continue the reshape.
4193 * Currently only disks can change, it must
4194 * increase, and we must be past the point where
4195 * a stripe over-writes itself
4197 sector_t here_new
, here_old
;
4199 int max_degraded
= (mddev
->level
== 5 ? 1 : 2);
4201 if (mddev
->new_level
!= mddev
->level
||
4202 mddev
->new_layout
!= mddev
->layout
||
4203 mddev
->new_chunk
!= mddev
->chunk_size
) {
4204 printk(KERN_ERR
"raid5: %s: unsupported reshape "
4205 "required - aborting.\n",
4209 if (mddev
->delta_disks
<= 0) {
4210 printk(KERN_ERR
"raid5: %s: unsupported reshape "
4211 "(reduce disks) required - aborting.\n",
4215 old_disks
= mddev
->raid_disks
- mddev
->delta_disks
;
4216 /* reshape_position must be on a new-stripe boundary, and one
4217 * further up in new geometry must map after here in old
4220 here_new
= mddev
->reshape_position
;
4221 if (sector_div(here_new
, (mddev
->chunk_size
>>9)*
4222 (mddev
->raid_disks
- max_degraded
))) {
4223 printk(KERN_ERR
"raid5: reshape_position not "
4224 "on a stripe boundary\n");
4227 /* here_new is the stripe we will write to */
4228 here_old
= mddev
->reshape_position
;
4229 sector_div(here_old
, (mddev
->chunk_size
>>9)*
4230 (old_disks
-max_degraded
));
4231 /* here_old is the first stripe that we might need to read
4233 if (here_new
>= here_old
) {
4234 /* Reading from the same stripe as writing to - bad */
4235 printk(KERN_ERR
"raid5: reshape_position too early for "
4236 "auto-recovery - aborting.\n");
4239 printk(KERN_INFO
"raid5: reshape will continue\n");
4240 /* OK, we should be able to continue; */
4244 mddev
->private = kzalloc(sizeof (raid5_conf_t
), GFP_KERNEL
);
4245 if ((conf
= mddev
->private) == NULL
)
4247 if (mddev
->reshape_position
== MaxSector
) {
4248 conf
->previous_raid_disks
= conf
->raid_disks
= mddev
->raid_disks
;
4250 conf
->raid_disks
= mddev
->raid_disks
;
4251 conf
->previous_raid_disks
= mddev
->raid_disks
- mddev
->delta_disks
;
4254 conf
->disks
= kzalloc(conf
->raid_disks
* sizeof(struct disk_info
),
4259 conf
->mddev
= mddev
;
4261 if ((conf
->stripe_hashtbl
= kzalloc(PAGE_SIZE
, GFP_KERNEL
)) == NULL
)
4264 if (mddev
->level
== 6) {
4265 conf
->spare_page
= alloc_page(GFP_KERNEL
);
4266 if (!conf
->spare_page
)
4269 spin_lock_init(&conf
->device_lock
);
4270 mddev
->queue
->queue_lock
= &conf
->device_lock
;
4271 init_waitqueue_head(&conf
->wait_for_stripe
);
4272 init_waitqueue_head(&conf
->wait_for_overlap
);
4273 INIT_LIST_HEAD(&conf
->handle_list
);
4274 INIT_LIST_HEAD(&conf
->hold_list
);
4275 INIT_LIST_HEAD(&conf
->delayed_list
);
4276 INIT_LIST_HEAD(&conf
->bitmap_list
);
4277 INIT_LIST_HEAD(&conf
->inactive_list
);
4278 atomic_set(&conf
->active_stripes
, 0);
4279 atomic_set(&conf
->preread_active_stripes
, 0);
4280 atomic_set(&conf
->active_aligned_reads
, 0);
4281 conf
->bypass_threshold
= BYPASS_THRESHOLD
;
4283 pr_debug("raid5: run(%s) called.\n", mdname(mddev
));
4285 rdev_for_each(rdev
, tmp
, mddev
) {
4286 raid_disk
= rdev
->raid_disk
;
4287 if (raid_disk
>= conf
->raid_disks
4290 disk
= conf
->disks
+ raid_disk
;
4294 if (test_bit(In_sync
, &rdev
->flags
)) {
4295 char b
[BDEVNAME_SIZE
];
4296 printk(KERN_INFO
"raid5: device %s operational as raid"
4297 " disk %d\n", bdevname(rdev
->bdev
,b
),
4304 * 0 for a fully functional array, 1 or 2 for a degraded array.
4306 mddev
->degraded
= conf
->raid_disks
- working_disks
;
4307 conf
->mddev
= mddev
;
4308 conf
->chunk_size
= mddev
->chunk_size
;
4309 conf
->level
= mddev
->level
;
4310 if (conf
->level
== 6)
4311 conf
->max_degraded
= 2;
4313 conf
->max_degraded
= 1;
4314 conf
->algorithm
= mddev
->layout
;
4315 conf
->max_nr_stripes
= NR_STRIPES
;
4316 conf
->expand_progress
= mddev
->reshape_position
;
4318 /* device size must be a multiple of chunk size */
4319 mddev
->size
&= ~(mddev
->chunk_size
/1024 -1);
4320 mddev
->resync_max_sectors
= mddev
->size
<< 1;
4322 if (conf
->level
== 6 && conf
->raid_disks
< 4) {
4323 printk(KERN_ERR
"raid6: not enough configured devices for %s (%d, minimum 4)\n",
4324 mdname(mddev
), conf
->raid_disks
);
4327 if (!conf
->chunk_size
|| conf
->chunk_size
% 4) {
4328 printk(KERN_ERR
"raid5: invalid chunk size %d for %s\n",
4329 conf
->chunk_size
, mdname(mddev
));
4332 if (conf
->algorithm
> ALGORITHM_RIGHT_SYMMETRIC
) {
4334 "raid5: unsupported parity algorithm %d for %s\n",
4335 conf
->algorithm
, mdname(mddev
));
4338 if (mddev
->degraded
> conf
->max_degraded
) {
4339 printk(KERN_ERR
"raid5: not enough operational devices for %s"
4340 " (%d/%d failed)\n",
4341 mdname(mddev
), mddev
->degraded
, conf
->raid_disks
);
4345 if (mddev
->degraded
> 0 &&
4346 mddev
->recovery_cp
!= MaxSector
) {
4347 if (mddev
->ok_start_degraded
)
4349 "raid5: starting dirty degraded array: %s"
4350 "- data corruption possible.\n",
4354 "raid5: cannot start dirty degraded array for %s\n",
4361 mddev
->thread
= md_register_thread(raid5d
, mddev
, "%s_raid5");
4362 if (!mddev
->thread
) {
4364 "raid5: couldn't allocate thread for %s\n",
4369 memory
= conf
->max_nr_stripes
* (sizeof(struct stripe_head
) +
4370 conf
->raid_disks
* ((sizeof(struct bio
) + PAGE_SIZE
))) / 1024;
4371 if (grow_stripes(conf
, conf
->max_nr_stripes
)) {
4373 "raid5: couldn't allocate %dkB for buffers\n", memory
);
4374 shrink_stripes(conf
);
4375 md_unregister_thread(mddev
->thread
);
4378 printk(KERN_INFO
"raid5: allocated %dkB for %s\n",
4379 memory
, mdname(mddev
));
4381 if (mddev
->degraded
== 0)
4382 printk("raid5: raid level %d set %s active with %d out of %d"
4383 " devices, algorithm %d\n", conf
->level
, mdname(mddev
),
4384 mddev
->raid_disks
-mddev
->degraded
, mddev
->raid_disks
,
4387 printk(KERN_ALERT
"raid5: raid level %d set %s active with %d"
4388 " out of %d devices, algorithm %d\n", conf
->level
,
4389 mdname(mddev
), mddev
->raid_disks
- mddev
->degraded
,
4390 mddev
->raid_disks
, conf
->algorithm
);
4392 print_raid5_conf(conf
);
4394 if (conf
->expand_progress
!= MaxSector
) {
4395 printk("...ok start reshape thread\n");
4396 conf
->expand_lo
= conf
->expand_progress
;
4397 atomic_set(&conf
->reshape_stripes
, 0);
4398 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
4399 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
4400 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
4401 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
4402 mddev
->sync_thread
= md_register_thread(md_do_sync
, mddev
,
4406 /* read-ahead size must cover two whole stripes, which is
4407 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
4410 int data_disks
= conf
->previous_raid_disks
- conf
->max_degraded
;
4411 int stripe
= data_disks
*
4412 (mddev
->chunk_size
/ PAGE_SIZE
);
4413 if (mddev
->queue
->backing_dev_info
.ra_pages
< 2 * stripe
)
4414 mddev
->queue
->backing_dev_info
.ra_pages
= 2 * stripe
;
4417 /* Ok, everything is just fine now */
4418 if (sysfs_create_group(&mddev
->kobj
, &raid5_attrs_group
))
4420 "raid5: failed to create sysfs attributes for %s\n",
4423 mddev
->queue
->unplug_fn
= raid5_unplug_device
;
4424 mddev
->queue
->backing_dev_info
.congested_data
= mddev
;
4425 mddev
->queue
->backing_dev_info
.congested_fn
= raid5_congested
;
4427 mddev
->array_size
= mddev
->size
* (conf
->previous_raid_disks
-
4428 conf
->max_degraded
);
4430 blk_queue_merge_bvec(mddev
->queue
, raid5_mergeable_bvec
);
4435 print_raid5_conf(conf
);
4436 safe_put_page(conf
->spare_page
);
4438 kfree(conf
->stripe_hashtbl
);
4441 mddev
->private = NULL
;
4442 printk(KERN_ALERT
"raid5: failed to run raid set %s\n", mdname(mddev
));
4448 static int stop(mddev_t
*mddev
)
4450 raid5_conf_t
*conf
= (raid5_conf_t
*) mddev
->private;
4452 md_unregister_thread(mddev
->thread
);
4453 mddev
->thread
= NULL
;
4454 shrink_stripes(conf
);
4455 kfree(conf
->stripe_hashtbl
);
4456 mddev
->queue
->backing_dev_info
.congested_fn
= NULL
;
4457 blk_sync_queue(mddev
->queue
); /* the unplug fn references 'conf'*/
4458 sysfs_remove_group(&mddev
->kobj
, &raid5_attrs_group
);
4461 mddev
->private = NULL
;
4466 static void print_sh (struct seq_file
*seq
, struct stripe_head
*sh
)
4470 seq_printf(seq
, "sh %llu, pd_idx %d, state %ld.\n",
4471 (unsigned long long)sh
->sector
, sh
->pd_idx
, sh
->state
);
4472 seq_printf(seq
, "sh %llu, count %d.\n",
4473 (unsigned long long)sh
->sector
, atomic_read(&sh
->count
));
4474 seq_printf(seq
, "sh %llu, ", (unsigned long long)sh
->sector
);
4475 for (i
= 0; i
< sh
->disks
; i
++) {
4476 seq_printf(seq
, "(cache%d: %p %ld) ",
4477 i
, sh
->dev
[i
].page
, sh
->dev
[i
].flags
);
4479 seq_printf(seq
, "\n");
4482 static void printall (struct seq_file
*seq
, raid5_conf_t
*conf
)
4484 struct stripe_head
*sh
;
4485 struct hlist_node
*hn
;
4488 spin_lock_irq(&conf
->device_lock
);
4489 for (i
= 0; i
< NR_HASH
; i
++) {
4490 hlist_for_each_entry(sh
, hn
, &conf
->stripe_hashtbl
[i
], hash
) {
4491 if (sh
->raid_conf
!= conf
)
4496 spin_unlock_irq(&conf
->device_lock
);
4500 static void status (struct seq_file
*seq
, mddev_t
*mddev
)
4502 raid5_conf_t
*conf
= (raid5_conf_t
*) mddev
->private;
4505 seq_printf (seq
, " level %d, %dk chunk, algorithm %d", mddev
->level
, mddev
->chunk_size
>> 10, mddev
->layout
);
4506 seq_printf (seq
, " [%d/%d] [", conf
->raid_disks
, conf
->raid_disks
- mddev
->degraded
);
4507 for (i
= 0; i
< conf
->raid_disks
; i
++)
4508 seq_printf (seq
, "%s",
4509 conf
->disks
[i
].rdev
&&
4510 test_bit(In_sync
, &conf
->disks
[i
].rdev
->flags
) ? "U" : "_");
4511 seq_printf (seq
, "]");
4513 seq_printf (seq
, "\n");
4514 printall(seq
, conf
);
4518 static void print_raid5_conf (raid5_conf_t
*conf
)
4521 struct disk_info
*tmp
;
4523 printk("RAID5 conf printout:\n");
4525 printk("(conf==NULL)\n");
4528 printk(" --- rd:%d wd:%d\n", conf
->raid_disks
,
4529 conf
->raid_disks
- conf
->mddev
->degraded
);
4531 for (i
= 0; i
< conf
->raid_disks
; i
++) {
4532 char b
[BDEVNAME_SIZE
];
4533 tmp
= conf
->disks
+ i
;
4535 printk(" disk %d, o:%d, dev:%s\n",
4536 i
, !test_bit(Faulty
, &tmp
->rdev
->flags
),
4537 bdevname(tmp
->rdev
->bdev
,b
));
4541 static int raid5_spare_active(mddev_t
*mddev
)
4544 raid5_conf_t
*conf
= mddev
->private;
4545 struct disk_info
*tmp
;
4547 for (i
= 0; i
< conf
->raid_disks
; i
++) {
4548 tmp
= conf
->disks
+ i
;
4550 && !test_bit(Faulty
, &tmp
->rdev
->flags
)
4551 && !test_and_set_bit(In_sync
, &tmp
->rdev
->flags
)) {
4552 unsigned long flags
;
4553 spin_lock_irqsave(&conf
->device_lock
, flags
);
4555 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
4558 print_raid5_conf(conf
);
4562 static int raid5_remove_disk(mddev_t
*mddev
, int number
)
4564 raid5_conf_t
*conf
= mddev
->private;
4567 struct disk_info
*p
= conf
->disks
+ number
;
4569 print_raid5_conf(conf
);
4572 if (test_bit(In_sync
, &rdev
->flags
) ||
4573 atomic_read(&rdev
->nr_pending
)) {
4577 /* Only remove non-faulty devices if recovery
4580 if (!test_bit(Faulty
, &rdev
->flags
) &&
4581 mddev
->degraded
<= conf
->max_degraded
) {
4587 if (atomic_read(&rdev
->nr_pending
)) {
4588 /* lost the race, try later */
4595 print_raid5_conf(conf
);
4599 static int raid5_add_disk(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
4601 raid5_conf_t
*conf
= mddev
->private;
4604 struct disk_info
*p
;
4606 if (mddev
->degraded
> conf
->max_degraded
)
4607 /* no point adding a device */
4611 * find the disk ... but prefer rdev->saved_raid_disk
4614 if (rdev
->saved_raid_disk
>= 0 &&
4615 conf
->disks
[rdev
->saved_raid_disk
].rdev
== NULL
)
4616 disk
= rdev
->saved_raid_disk
;
4619 for ( ; disk
< conf
->raid_disks
; disk
++)
4620 if ((p
=conf
->disks
+ disk
)->rdev
== NULL
) {
4621 clear_bit(In_sync
, &rdev
->flags
);
4622 rdev
->raid_disk
= disk
;
4624 if (rdev
->saved_raid_disk
!= disk
)
4626 rcu_assign_pointer(p
->rdev
, rdev
);
4629 print_raid5_conf(conf
);
4633 static int raid5_resize(mddev_t
*mddev
, sector_t sectors
)
4635 /* no resync is happening, and there is enough space
4636 * on all devices, so we can resize.
4637 * We need to make sure resync covers any new space.
4638 * If the array is shrinking we should possibly wait until
4639 * any io in the removed space completes, but it hardly seems
4642 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
4644 sectors
&= ~((sector_t
)mddev
->chunk_size
/512 - 1);
4645 mddev
->array_size
= (sectors
* (mddev
->raid_disks
-conf
->max_degraded
))>>1;
4646 set_capacity(mddev
->gendisk
, mddev
->array_size
<< 1);
4648 if (sectors
/2 > mddev
->size
&& mddev
->recovery_cp
== MaxSector
) {
4649 mddev
->recovery_cp
= mddev
->size
<< 1;
4650 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
4652 mddev
->size
= sectors
/2;
4653 mddev
->resync_max_sectors
= sectors
;
4657 #ifdef CONFIG_MD_RAID5_RESHAPE
4658 static int raid5_check_reshape(mddev_t
*mddev
)
4660 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
4663 if (mddev
->delta_disks
< 0 ||
4664 mddev
->new_level
!= mddev
->level
)
4665 return -EINVAL
; /* Cannot shrink array or change level yet */
4666 if (mddev
->delta_disks
== 0)
4667 return 0; /* nothing to do */
4669 /* Can only proceed if there are plenty of stripe_heads.
4670 * We need a minimum of one full stripe,, and for sensible progress
4671 * it is best to have about 4 times that.
4672 * If we require 4 times, then the default 256 4K stripe_heads will
4673 * allow for chunk sizes up to 256K, which is probably OK.
4674 * If the chunk size is greater, user-space should request more
4675 * stripe_heads first.
4677 if ((mddev
->chunk_size
/ STRIPE_SIZE
) * 4 > conf
->max_nr_stripes
||
4678 (mddev
->new_chunk
/ STRIPE_SIZE
) * 4 > conf
->max_nr_stripes
) {
4679 printk(KERN_WARNING
"raid5: reshape: not enough stripes. Needed %lu\n",
4680 (mddev
->chunk_size
/ STRIPE_SIZE
)*4);
4684 err
= resize_stripes(conf
, conf
->raid_disks
+ mddev
->delta_disks
);
4688 if (mddev
->degraded
> conf
->max_degraded
)
4690 /* looks like we might be able to manage this */
4694 static int raid5_start_reshape(mddev_t
*mddev
)
4696 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
4698 struct list_head
*rtmp
;
4700 int added_devices
= 0;
4701 unsigned long flags
;
4703 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
))
4706 rdev_for_each(rdev
, rtmp
, mddev
)
4707 if (rdev
->raid_disk
< 0 &&
4708 !test_bit(Faulty
, &rdev
->flags
))
4711 if (spares
- mddev
->degraded
< mddev
->delta_disks
- conf
->max_degraded
)
4712 /* Not enough devices even to make a degraded array
4717 atomic_set(&conf
->reshape_stripes
, 0);
4718 spin_lock_irq(&conf
->device_lock
);
4719 conf
->previous_raid_disks
= conf
->raid_disks
;
4720 conf
->raid_disks
+= mddev
->delta_disks
;
4721 conf
->expand_progress
= 0;
4722 conf
->expand_lo
= 0;
4723 spin_unlock_irq(&conf
->device_lock
);
4725 /* Add some new drives, as many as will fit.
4726 * We know there are enough to make the newly sized array work.
4728 rdev_for_each(rdev
, rtmp
, mddev
)
4729 if (rdev
->raid_disk
< 0 &&
4730 !test_bit(Faulty
, &rdev
->flags
)) {
4731 if (raid5_add_disk(mddev
, rdev
)) {
4733 set_bit(In_sync
, &rdev
->flags
);
4735 rdev
->recovery_offset
= 0;
4736 sprintf(nm
, "rd%d", rdev
->raid_disk
);
4737 if (sysfs_create_link(&mddev
->kobj
,
4740 "raid5: failed to create "
4741 " link %s for %s\n",
4747 spin_lock_irqsave(&conf
->device_lock
, flags
);
4748 mddev
->degraded
= (conf
->raid_disks
- conf
->previous_raid_disks
) - added_devices
;
4749 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
4750 mddev
->raid_disks
= conf
->raid_disks
;
4751 mddev
->reshape_position
= 0;
4752 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
4754 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
4755 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
4756 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
4757 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
4758 mddev
->sync_thread
= md_register_thread(md_do_sync
, mddev
,
4760 if (!mddev
->sync_thread
) {
4761 mddev
->recovery
= 0;
4762 spin_lock_irq(&conf
->device_lock
);
4763 mddev
->raid_disks
= conf
->raid_disks
= conf
->previous_raid_disks
;
4764 conf
->expand_progress
= MaxSector
;
4765 spin_unlock_irq(&conf
->device_lock
);
4768 md_wakeup_thread(mddev
->sync_thread
);
4769 md_new_event(mddev
);
4774 static void end_reshape(raid5_conf_t
*conf
)
4776 struct block_device
*bdev
;
4778 if (!test_bit(MD_RECOVERY_INTR
, &conf
->mddev
->recovery
)) {
4779 conf
->mddev
->array_size
= conf
->mddev
->size
*
4780 (conf
->raid_disks
- conf
->max_degraded
);
4781 set_capacity(conf
->mddev
->gendisk
, conf
->mddev
->array_size
<< 1);
4782 conf
->mddev
->changed
= 1;
4784 bdev
= bdget_disk(conf
->mddev
->gendisk
, 0);
4786 mutex_lock(&bdev
->bd_inode
->i_mutex
);
4787 i_size_write(bdev
->bd_inode
, (loff_t
)conf
->mddev
->array_size
<< 10);
4788 mutex_unlock(&bdev
->bd_inode
->i_mutex
);
4791 spin_lock_irq(&conf
->device_lock
);
4792 conf
->expand_progress
= MaxSector
;
4793 spin_unlock_irq(&conf
->device_lock
);
4794 conf
->mddev
->reshape_position
= MaxSector
;
4796 /* read-ahead size must cover two whole stripes, which is
4797 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
4800 int data_disks
= conf
->previous_raid_disks
- conf
->max_degraded
;
4801 int stripe
= data_disks
*
4802 (conf
->mddev
->chunk_size
/ PAGE_SIZE
);
4803 if (conf
->mddev
->queue
->backing_dev_info
.ra_pages
< 2 * stripe
)
4804 conf
->mddev
->queue
->backing_dev_info
.ra_pages
= 2 * stripe
;
4809 static void raid5_quiesce(mddev_t
*mddev
, int state
)
4811 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
4814 case 2: /* resume for a suspend */
4815 wake_up(&conf
->wait_for_overlap
);
4818 case 1: /* stop all writes */
4819 spin_lock_irq(&conf
->device_lock
);
4821 wait_event_lock_irq(conf
->wait_for_stripe
,
4822 atomic_read(&conf
->active_stripes
) == 0 &&
4823 atomic_read(&conf
->active_aligned_reads
) == 0,
4824 conf
->device_lock
, /* nothing */);
4825 spin_unlock_irq(&conf
->device_lock
);
4828 case 0: /* re-enable writes */
4829 spin_lock_irq(&conf
->device_lock
);
4831 wake_up(&conf
->wait_for_stripe
);
4832 wake_up(&conf
->wait_for_overlap
);
4833 spin_unlock_irq(&conf
->device_lock
);
4838 static struct mdk_personality raid6_personality
=
4842 .owner
= THIS_MODULE
,
4843 .make_request
= make_request
,
4847 .error_handler
= error
,
4848 .hot_add_disk
= raid5_add_disk
,
4849 .hot_remove_disk
= raid5_remove_disk
,
4850 .spare_active
= raid5_spare_active
,
4851 .sync_request
= sync_request
,
4852 .resize
= raid5_resize
,
4853 #ifdef CONFIG_MD_RAID5_RESHAPE
4854 .check_reshape
= raid5_check_reshape
,
4855 .start_reshape
= raid5_start_reshape
,
4857 .quiesce
= raid5_quiesce
,
4859 static struct mdk_personality raid5_personality
=
4863 .owner
= THIS_MODULE
,
4864 .make_request
= make_request
,
4868 .error_handler
= error
,
4869 .hot_add_disk
= raid5_add_disk
,
4870 .hot_remove_disk
= raid5_remove_disk
,
4871 .spare_active
= raid5_spare_active
,
4872 .sync_request
= sync_request
,
4873 .resize
= raid5_resize
,
4874 #ifdef CONFIG_MD_RAID5_RESHAPE
4875 .check_reshape
= raid5_check_reshape
,
4876 .start_reshape
= raid5_start_reshape
,
4878 .quiesce
= raid5_quiesce
,
4881 static struct mdk_personality raid4_personality
=
4885 .owner
= THIS_MODULE
,
4886 .make_request
= make_request
,
4890 .error_handler
= error
,
4891 .hot_add_disk
= raid5_add_disk
,
4892 .hot_remove_disk
= raid5_remove_disk
,
4893 .spare_active
= raid5_spare_active
,
4894 .sync_request
= sync_request
,
4895 .resize
= raid5_resize
,
4896 #ifdef CONFIG_MD_RAID5_RESHAPE
4897 .check_reshape
= raid5_check_reshape
,
4898 .start_reshape
= raid5_start_reshape
,
4900 .quiesce
= raid5_quiesce
,
4903 static int __init
raid5_init(void)
4907 e
= raid6_select_algo();
4910 register_md_personality(&raid6_personality
);
4911 register_md_personality(&raid5_personality
);
4912 register_md_personality(&raid4_personality
);
4916 static void raid5_exit(void)
4918 unregister_md_personality(&raid6_personality
);
4919 unregister_md_personality(&raid5_personality
);
4920 unregister_md_personality(&raid4_personality
);
4923 module_init(raid5_init
);
4924 module_exit(raid5_exit
);
4925 MODULE_LICENSE("GPL");
4926 MODULE_ALIAS("md-personality-4"); /* RAID5 */
4927 MODULE_ALIAS("md-raid5");
4928 MODULE_ALIAS("md-raid4");
4929 MODULE_ALIAS("md-level-5");
4930 MODULE_ALIAS("md-level-4");
4931 MODULE_ALIAS("md-personality-8"); /* RAID6 */
4932 MODULE_ALIAS("md-raid6");
4933 MODULE_ALIAS("md-level-6");
4935 /* This used to be two separate modules, they were: */
4936 MODULE_ALIAS("raid5");
4937 MODULE_ALIAS("raid6");