2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
5 * Copyright (C) 2002, 2003 H. Peter Anvin
7 * RAID-4/5/6 management functions.
8 * Thanks to Penguin Computing for making the RAID-6 development possible
9 * by donating a test server!
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 * The sequencing for updating the bitmap reliably is a little
25 * subtle (and I got it wrong the first time) so it deserves some
28 * We group bitmap updates into batches. Each batch has a number.
29 * We may write out several batches at once, but that isn't very important.
30 * conf->seq_write is the number of the last batch successfully written.
31 * conf->seq_flush is the number of the last batch that was closed to
33 * When we discover that we will need to write to any block in a stripe
34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
35 * the number of the batch it will be in. This is seq_flush+1.
36 * When we are ready to do a write, if that batch hasn't been written yet,
37 * we plug the array and queue the stripe for later.
38 * When an unplug happens, we increment bm_flush, thus closing the current
40 * When we notice that bm_flush > bm_write, we write out all pending updates
41 * to the bitmap, and advance bm_write to where bm_flush was.
42 * This may occasionally write a bit out twice, but is sure never to
46 #include <linux/blkdev.h>
47 #include <linux/kthread.h>
48 #include <linux/raid/pq.h>
49 #include <linux/async_tx.h>
50 #include <linux/async.h>
51 #include <linux/seq_file.h>
52 #include <linux/cpu.h>
53 #include <linux/slab.h>
63 #define NR_STRIPES 256
64 #define STRIPE_SIZE PAGE_SIZE
65 #define STRIPE_SHIFT (PAGE_SHIFT - 9)
66 #define STRIPE_SECTORS (STRIPE_SIZE>>9)
67 #define IO_THRESHOLD 1
68 #define BYPASS_THRESHOLD 1
69 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
70 #define HASH_MASK (NR_HASH - 1)
72 #define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
74 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
75 * order without overlap. There may be several bio's per stripe+device, and
76 * a bio could span several devices.
77 * When walking this list for a particular stripe+device, we must never proceed
78 * beyond a bio that extends past this device, as the next bio might no longer
80 * This macro is used to determine the 'next' bio in the list, given the sector
81 * of the current stripe+device
83 #define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
85 * The following can be used to debug the driver
87 #define RAID5_PARANOIA 1
88 #if RAID5_PARANOIA && defined(CONFIG_SMP)
89 # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
91 # define CHECK_DEVLOCK()
99 #define printk_rl(args...) ((void) (printk_ratelimit() && printk(args)))
102 * We maintain a biased count of active stripes in the bottom 16 bits of
103 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
105 static inline int raid5_bi_phys_segments(struct bio
*bio
)
107 return bio
->bi_phys_segments
& 0xffff;
110 static inline int raid5_bi_hw_segments(struct bio
*bio
)
112 return (bio
->bi_phys_segments
>> 16) & 0xffff;
115 static inline int raid5_dec_bi_phys_segments(struct bio
*bio
)
117 --bio
->bi_phys_segments
;
118 return raid5_bi_phys_segments(bio
);
121 static inline int raid5_dec_bi_hw_segments(struct bio
*bio
)
123 unsigned short val
= raid5_bi_hw_segments(bio
);
126 bio
->bi_phys_segments
= (val
<< 16) | raid5_bi_phys_segments(bio
);
130 static inline void raid5_set_bi_hw_segments(struct bio
*bio
, unsigned int cnt
)
132 bio
->bi_phys_segments
= raid5_bi_phys_segments(bio
) || (cnt
<< 16);
135 /* Find first data disk in a raid6 stripe */
136 static inline int raid6_d0(struct stripe_head
*sh
)
139 /* ddf always start from first device */
141 /* md starts just after Q block */
142 if (sh
->qd_idx
== sh
->disks
- 1)
145 return sh
->qd_idx
+ 1;
147 static inline int raid6_next_disk(int disk
, int raid_disks
)
150 return (disk
< raid_disks
) ? disk
: 0;
153 /* When walking through the disks in a raid5, starting at raid6_d0,
154 * We need to map each disk to a 'slot', where the data disks are slot
155 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
156 * is raid_disks-1. This help does that mapping.
158 static int raid6_idx_to_slot(int idx
, struct stripe_head
*sh
,
159 int *count
, int syndrome_disks
)
165 if (idx
== sh
->pd_idx
)
166 return syndrome_disks
;
167 if (idx
== sh
->qd_idx
)
168 return syndrome_disks
+ 1;
174 static void return_io(struct bio
*return_bi
)
176 struct bio
*bi
= return_bi
;
179 return_bi
= bi
->bi_next
;
187 static void print_raid5_conf (raid5_conf_t
*conf
);
189 static int stripe_operations_active(struct stripe_head
*sh
)
191 return sh
->check_state
|| sh
->reconstruct_state
||
192 test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
) ||
193 test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
196 static void __release_stripe(raid5_conf_t
*conf
, struct stripe_head
*sh
)
198 if (atomic_dec_and_test(&sh
->count
)) {
199 BUG_ON(!list_empty(&sh
->lru
));
200 BUG_ON(atomic_read(&conf
->active_stripes
)==0);
201 if (test_bit(STRIPE_HANDLE
, &sh
->state
)) {
202 if (test_bit(STRIPE_DELAYED
, &sh
->state
))
203 list_add_tail(&sh
->lru
, &conf
->delayed_list
);
204 else if (test_bit(STRIPE_BIT_DELAY
, &sh
->state
) &&
205 sh
->bm_seq
- conf
->seq_write
> 0)
206 list_add_tail(&sh
->lru
, &conf
->bitmap_list
);
208 clear_bit(STRIPE_BIT_DELAY
, &sh
->state
);
209 list_add_tail(&sh
->lru
, &conf
->handle_list
);
211 md_wakeup_thread(conf
->mddev
->thread
);
213 BUG_ON(stripe_operations_active(sh
));
214 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
215 atomic_dec(&conf
->preread_active_stripes
);
216 if (atomic_read(&conf
->preread_active_stripes
) < IO_THRESHOLD
)
217 md_wakeup_thread(conf
->mddev
->thread
);
219 atomic_dec(&conf
->active_stripes
);
220 if (!test_bit(STRIPE_EXPANDING
, &sh
->state
)) {
221 list_add_tail(&sh
->lru
, &conf
->inactive_list
);
222 wake_up(&conf
->wait_for_stripe
);
223 if (conf
->retry_read_aligned
)
224 md_wakeup_thread(conf
->mddev
->thread
);
230 static void release_stripe(struct stripe_head
*sh
)
232 raid5_conf_t
*conf
= sh
->raid_conf
;
235 spin_lock_irqsave(&conf
->device_lock
, flags
);
236 __release_stripe(conf
, sh
);
237 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
240 static inline void remove_hash(struct stripe_head
*sh
)
242 pr_debug("remove_hash(), stripe %llu\n",
243 (unsigned long long)sh
->sector
);
245 hlist_del_init(&sh
->hash
);
248 static inline void insert_hash(raid5_conf_t
*conf
, struct stripe_head
*sh
)
250 struct hlist_head
*hp
= stripe_hash(conf
, sh
->sector
);
252 pr_debug("insert_hash(), stripe %llu\n",
253 (unsigned long long)sh
->sector
);
256 hlist_add_head(&sh
->hash
, hp
);
260 /* find an idle stripe, make sure it is unhashed, and return it. */
261 static struct stripe_head
*get_free_stripe(raid5_conf_t
*conf
)
263 struct stripe_head
*sh
= NULL
;
264 struct list_head
*first
;
267 if (list_empty(&conf
->inactive_list
))
269 first
= conf
->inactive_list
.next
;
270 sh
= list_entry(first
, struct stripe_head
, lru
);
271 list_del_init(first
);
273 atomic_inc(&conf
->active_stripes
);
278 static void shrink_buffers(struct stripe_head
*sh
)
282 int num
= sh
->raid_conf
->pool_size
;
284 for (i
= 0; i
< num
; i
++) {
288 sh
->dev
[i
].page
= NULL
;
293 static int grow_buffers(struct stripe_head
*sh
)
296 int num
= sh
->raid_conf
->pool_size
;
298 for (i
= 0; i
< num
; i
++) {
301 if (!(page
= alloc_page(GFP_KERNEL
))) {
304 sh
->dev
[i
].page
= page
;
309 static void raid5_build_block(struct stripe_head
*sh
, int i
, int previous
);
310 static void stripe_set_idx(sector_t stripe
, raid5_conf_t
*conf
, int previous
,
311 struct stripe_head
*sh
);
313 static void init_stripe(struct stripe_head
*sh
, sector_t sector
, int previous
)
315 raid5_conf_t
*conf
= sh
->raid_conf
;
318 BUG_ON(atomic_read(&sh
->count
) != 0);
319 BUG_ON(test_bit(STRIPE_HANDLE
, &sh
->state
));
320 BUG_ON(stripe_operations_active(sh
));
323 pr_debug("init_stripe called, stripe %llu\n",
324 (unsigned long long)sh
->sector
);
328 sh
->generation
= conf
->generation
- previous
;
329 sh
->disks
= previous
? conf
->previous_raid_disks
: conf
->raid_disks
;
331 stripe_set_idx(sector
, conf
, previous
, sh
);
335 for (i
= sh
->disks
; i
--; ) {
336 struct r5dev
*dev
= &sh
->dev
[i
];
338 if (dev
->toread
|| dev
->read
|| dev
->towrite
|| dev
->written
||
339 test_bit(R5_LOCKED
, &dev
->flags
)) {
340 printk(KERN_ERR
"sector=%llx i=%d %p %p %p %p %d\n",
341 (unsigned long long)sh
->sector
, i
, dev
->toread
,
342 dev
->read
, dev
->towrite
, dev
->written
,
343 test_bit(R5_LOCKED
, &dev
->flags
));
347 raid5_build_block(sh
, i
, previous
);
349 insert_hash(conf
, sh
);
352 static struct stripe_head
*__find_stripe(raid5_conf_t
*conf
, sector_t sector
,
355 struct stripe_head
*sh
;
356 struct hlist_node
*hn
;
359 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector
);
360 hlist_for_each_entry(sh
, hn
, stripe_hash(conf
, sector
), hash
)
361 if (sh
->sector
== sector
&& sh
->generation
== generation
)
363 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector
);
368 * Need to check if array has failed when deciding whether to:
370 * - remove non-faulty devices
373 * This determination is simple when no reshape is happening.
374 * However if there is a reshape, we need to carefully check
375 * both the before and after sections.
376 * This is because some failed devices may only affect one
377 * of the two sections, and some non-in_sync devices may
378 * be insync in the section most affected by failed devices.
380 static int has_failed(raid5_conf_t
*conf
)
384 if (conf
->mddev
->reshape_position
== MaxSector
)
385 return conf
->mddev
->degraded
> conf
->max_degraded
;
389 for (i
= 0; i
< conf
->previous_raid_disks
; i
++) {
390 mdk_rdev_t
*rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
391 if (!rdev
|| test_bit(Faulty
, &rdev
->flags
))
393 else if (test_bit(In_sync
, &rdev
->flags
))
396 /* not in-sync or faulty.
397 * If the reshape increases the number of devices,
398 * this is being recovered by the reshape, so
399 * this 'previous' section is not in_sync.
400 * If the number of devices is being reduced however,
401 * the device can only be part of the array if
402 * we are reverting a reshape, so this section will
405 if (conf
->raid_disks
>= conf
->previous_raid_disks
)
409 if (degraded
> conf
->max_degraded
)
413 for (i
= 0; i
< conf
->raid_disks
; i
++) {
414 mdk_rdev_t
*rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
415 if (!rdev
|| test_bit(Faulty
, &rdev
->flags
))
417 else if (test_bit(In_sync
, &rdev
->flags
))
420 /* not in-sync or faulty.
421 * If reshape increases the number of devices, this
422 * section has already been recovered, else it
423 * almost certainly hasn't.
425 if (conf
->raid_disks
<= conf
->previous_raid_disks
)
429 if (degraded
> conf
->max_degraded
)
434 static struct stripe_head
*
435 get_active_stripe(raid5_conf_t
*conf
, sector_t sector
,
436 int previous
, int noblock
, int noquiesce
)
438 struct stripe_head
*sh
;
440 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector
);
442 spin_lock_irq(&conf
->device_lock
);
445 wait_event_lock_irq(conf
->wait_for_stripe
,
446 conf
->quiesce
== 0 || noquiesce
,
447 conf
->device_lock
, /* nothing */);
448 sh
= __find_stripe(conf
, sector
, conf
->generation
- previous
);
450 if (!conf
->inactive_blocked
)
451 sh
= get_free_stripe(conf
);
452 if (noblock
&& sh
== NULL
)
455 conf
->inactive_blocked
= 1;
456 wait_event_lock_irq(conf
->wait_for_stripe
,
457 !list_empty(&conf
->inactive_list
) &&
458 (atomic_read(&conf
->active_stripes
)
459 < (conf
->max_nr_stripes
*3/4)
460 || !conf
->inactive_blocked
),
463 conf
->inactive_blocked
= 0;
465 init_stripe(sh
, sector
, previous
);
467 if (atomic_read(&sh
->count
)) {
468 BUG_ON(!list_empty(&sh
->lru
)
469 && !test_bit(STRIPE_EXPANDING
, &sh
->state
));
471 if (!test_bit(STRIPE_HANDLE
, &sh
->state
))
472 atomic_inc(&conf
->active_stripes
);
473 if (list_empty(&sh
->lru
) &&
474 !test_bit(STRIPE_EXPANDING
, &sh
->state
))
476 list_del_init(&sh
->lru
);
479 } while (sh
== NULL
);
482 atomic_inc(&sh
->count
);
484 spin_unlock_irq(&conf
->device_lock
);
489 raid5_end_read_request(struct bio
*bi
, int error
);
491 raid5_end_write_request(struct bio
*bi
, int error
);
493 static void ops_run_io(struct stripe_head
*sh
, struct stripe_head_state
*s
)
495 raid5_conf_t
*conf
= sh
->raid_conf
;
496 int i
, disks
= sh
->disks
;
500 for (i
= disks
; i
--; ) {
504 if (test_and_clear_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
)) {
505 if (test_and_clear_bit(R5_WantFUA
, &sh
->dev
[i
].flags
))
509 } else if (test_and_clear_bit(R5_Wantread
, &sh
->dev
[i
].flags
))
514 bi
= &sh
->dev
[i
].req
;
518 bi
->bi_end_io
= raid5_end_write_request
;
520 bi
->bi_end_io
= raid5_end_read_request
;
523 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
524 if (rdev
&& test_bit(Faulty
, &rdev
->flags
))
527 atomic_inc(&rdev
->nr_pending
);
531 if (s
->syncing
|| s
->expanding
|| s
->expanded
)
532 md_sync_acct(rdev
->bdev
, STRIPE_SECTORS
);
534 set_bit(STRIPE_IO_STARTED
, &sh
->state
);
536 bi
->bi_bdev
= rdev
->bdev
;
537 pr_debug("%s: for %llu schedule op %ld on disc %d\n",
538 __func__
, (unsigned long long)sh
->sector
,
540 atomic_inc(&sh
->count
);
541 bi
->bi_sector
= sh
->sector
+ rdev
->data_offset
;
542 bi
->bi_flags
= 1 << BIO_UPTODATE
;
546 bi
->bi_io_vec
= &sh
->dev
[i
].vec
;
547 bi
->bi_io_vec
[0].bv_len
= STRIPE_SIZE
;
548 bi
->bi_io_vec
[0].bv_offset
= 0;
549 bi
->bi_size
= STRIPE_SIZE
;
552 test_bit(R5_ReWrite
, &sh
->dev
[i
].flags
))
553 atomic_add(STRIPE_SECTORS
,
554 &rdev
->corrected_errors
);
555 generic_make_request(bi
);
558 set_bit(STRIPE_DEGRADED
, &sh
->state
);
559 pr_debug("skip op %ld on disc %d for sector %llu\n",
560 bi
->bi_rw
, i
, (unsigned long long)sh
->sector
);
561 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
562 set_bit(STRIPE_HANDLE
, &sh
->state
);
567 static struct dma_async_tx_descriptor
*
568 async_copy_data(int frombio
, struct bio
*bio
, struct page
*page
,
569 sector_t sector
, struct dma_async_tx_descriptor
*tx
)
572 struct page
*bio_page
;
575 struct async_submit_ctl submit
;
576 enum async_tx_flags flags
= 0;
578 if (bio
->bi_sector
>= sector
)
579 page_offset
= (signed)(bio
->bi_sector
- sector
) * 512;
581 page_offset
= (signed)(sector
- bio
->bi_sector
) * -512;
584 flags
|= ASYNC_TX_FENCE
;
585 init_async_submit(&submit
, flags
, tx
, NULL
, NULL
, NULL
);
587 bio_for_each_segment(bvl
, bio
, i
) {
588 int len
= bio_iovec_idx(bio
, i
)->bv_len
;
592 if (page_offset
< 0) {
593 b_offset
= -page_offset
;
594 page_offset
+= b_offset
;
598 if (len
> 0 && page_offset
+ len
> STRIPE_SIZE
)
599 clen
= STRIPE_SIZE
- page_offset
;
604 b_offset
+= bio_iovec_idx(bio
, i
)->bv_offset
;
605 bio_page
= bio_iovec_idx(bio
, i
)->bv_page
;
607 tx
= async_memcpy(page
, bio_page
, page_offset
,
608 b_offset
, clen
, &submit
);
610 tx
= async_memcpy(bio_page
, page
, b_offset
,
611 page_offset
, clen
, &submit
);
613 /* chain the operations */
614 submit
.depend_tx
= tx
;
616 if (clen
< len
) /* hit end of page */
624 static void ops_complete_biofill(void *stripe_head_ref
)
626 struct stripe_head
*sh
= stripe_head_ref
;
627 struct bio
*return_bi
= NULL
;
628 raid5_conf_t
*conf
= sh
->raid_conf
;
631 pr_debug("%s: stripe %llu\n", __func__
,
632 (unsigned long long)sh
->sector
);
634 /* clear completed biofills */
635 spin_lock_irq(&conf
->device_lock
);
636 for (i
= sh
->disks
; i
--; ) {
637 struct r5dev
*dev
= &sh
->dev
[i
];
639 /* acknowledge completion of a biofill operation */
640 /* and check if we need to reply to a read request,
641 * new R5_Wantfill requests are held off until
642 * !STRIPE_BIOFILL_RUN
644 if (test_and_clear_bit(R5_Wantfill
, &dev
->flags
)) {
645 struct bio
*rbi
, *rbi2
;
650 while (rbi
&& rbi
->bi_sector
<
651 dev
->sector
+ STRIPE_SECTORS
) {
652 rbi2
= r5_next_bio(rbi
, dev
->sector
);
653 if (!raid5_dec_bi_phys_segments(rbi
)) {
654 rbi
->bi_next
= return_bi
;
661 spin_unlock_irq(&conf
->device_lock
);
662 clear_bit(STRIPE_BIOFILL_RUN
, &sh
->state
);
664 return_io(return_bi
);
666 set_bit(STRIPE_HANDLE
, &sh
->state
);
670 static void ops_run_biofill(struct stripe_head
*sh
)
672 struct dma_async_tx_descriptor
*tx
= NULL
;
673 raid5_conf_t
*conf
= sh
->raid_conf
;
674 struct async_submit_ctl submit
;
677 pr_debug("%s: stripe %llu\n", __func__
,
678 (unsigned long long)sh
->sector
);
680 for (i
= sh
->disks
; i
--; ) {
681 struct r5dev
*dev
= &sh
->dev
[i
];
682 if (test_bit(R5_Wantfill
, &dev
->flags
)) {
684 spin_lock_irq(&conf
->device_lock
);
685 dev
->read
= rbi
= dev
->toread
;
687 spin_unlock_irq(&conf
->device_lock
);
688 while (rbi
&& rbi
->bi_sector
<
689 dev
->sector
+ STRIPE_SECTORS
) {
690 tx
= async_copy_data(0, rbi
, dev
->page
,
692 rbi
= r5_next_bio(rbi
, dev
->sector
);
697 atomic_inc(&sh
->count
);
698 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, ops_complete_biofill
, sh
, NULL
);
699 async_trigger_callback(&submit
);
702 static void mark_target_uptodate(struct stripe_head
*sh
, int target
)
709 tgt
= &sh
->dev
[target
];
710 set_bit(R5_UPTODATE
, &tgt
->flags
);
711 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
712 clear_bit(R5_Wantcompute
, &tgt
->flags
);
715 static void ops_complete_compute(void *stripe_head_ref
)
717 struct stripe_head
*sh
= stripe_head_ref
;
719 pr_debug("%s: stripe %llu\n", __func__
,
720 (unsigned long long)sh
->sector
);
722 /* mark the computed target(s) as uptodate */
723 mark_target_uptodate(sh
, sh
->ops
.target
);
724 mark_target_uptodate(sh
, sh
->ops
.target2
);
726 clear_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
727 if (sh
->check_state
== check_state_compute_run
)
728 sh
->check_state
= check_state_compute_result
;
729 set_bit(STRIPE_HANDLE
, &sh
->state
);
733 /* return a pointer to the address conversion region of the scribble buffer */
734 static addr_conv_t
*to_addr_conv(struct stripe_head
*sh
,
735 struct raid5_percpu
*percpu
)
737 return percpu
->scribble
+ sizeof(struct page
*) * (sh
->disks
+ 2);
740 static struct dma_async_tx_descriptor
*
741 ops_run_compute5(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
743 int disks
= sh
->disks
;
744 struct page
**xor_srcs
= percpu
->scribble
;
745 int target
= sh
->ops
.target
;
746 struct r5dev
*tgt
= &sh
->dev
[target
];
747 struct page
*xor_dest
= tgt
->page
;
749 struct dma_async_tx_descriptor
*tx
;
750 struct async_submit_ctl submit
;
753 pr_debug("%s: stripe %llu block: %d\n",
754 __func__
, (unsigned long long)sh
->sector
, target
);
755 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
757 for (i
= disks
; i
--; )
759 xor_srcs
[count
++] = sh
->dev
[i
].page
;
761 atomic_inc(&sh
->count
);
763 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
, NULL
,
764 ops_complete_compute
, sh
, to_addr_conv(sh
, percpu
));
765 if (unlikely(count
== 1))
766 tx
= async_memcpy(xor_dest
, xor_srcs
[0], 0, 0, STRIPE_SIZE
, &submit
);
768 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
, &submit
);
773 /* set_syndrome_sources - populate source buffers for gen_syndrome
774 * @srcs - (struct page *) array of size sh->disks
775 * @sh - stripe_head to parse
777 * Populates srcs in proper layout order for the stripe and returns the
778 * 'count' of sources to be used in a call to async_gen_syndrome. The P
779 * destination buffer is recorded in srcs[count] and the Q destination
780 * is recorded in srcs[count+1]].
782 static int set_syndrome_sources(struct page
**srcs
, struct stripe_head
*sh
)
784 int disks
= sh
->disks
;
785 int syndrome_disks
= sh
->ddf_layout
? disks
: (disks
- 2);
786 int d0_idx
= raid6_d0(sh
);
790 for (i
= 0; i
< disks
; i
++)
796 int slot
= raid6_idx_to_slot(i
, sh
, &count
, syndrome_disks
);
798 srcs
[slot
] = sh
->dev
[i
].page
;
799 i
= raid6_next_disk(i
, disks
);
800 } while (i
!= d0_idx
);
802 return syndrome_disks
;
805 static struct dma_async_tx_descriptor
*
806 ops_run_compute6_1(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
808 int disks
= sh
->disks
;
809 struct page
**blocks
= percpu
->scribble
;
811 int qd_idx
= sh
->qd_idx
;
812 struct dma_async_tx_descriptor
*tx
;
813 struct async_submit_ctl submit
;
819 if (sh
->ops
.target
< 0)
820 target
= sh
->ops
.target2
;
821 else if (sh
->ops
.target2
< 0)
822 target
= sh
->ops
.target
;
824 /* we should only have one valid target */
827 pr_debug("%s: stripe %llu block: %d\n",
828 __func__
, (unsigned long long)sh
->sector
, target
);
830 tgt
= &sh
->dev
[target
];
831 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
834 atomic_inc(&sh
->count
);
836 if (target
== qd_idx
) {
837 count
= set_syndrome_sources(blocks
, sh
);
838 blocks
[count
] = NULL
; /* regenerating p is not necessary */
839 BUG_ON(blocks
[count
+1] != dest
); /* q should already be set */
840 init_async_submit(&submit
, ASYNC_TX_FENCE
, NULL
,
841 ops_complete_compute
, sh
,
842 to_addr_conv(sh
, percpu
));
843 tx
= async_gen_syndrome(blocks
, 0, count
+2, STRIPE_SIZE
, &submit
);
845 /* Compute any data- or p-drive using XOR */
847 for (i
= disks
; i
-- ; ) {
848 if (i
== target
|| i
== qd_idx
)
850 blocks
[count
++] = sh
->dev
[i
].page
;
853 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
,
854 NULL
, ops_complete_compute
, sh
,
855 to_addr_conv(sh
, percpu
));
856 tx
= async_xor(dest
, blocks
, 0, count
, STRIPE_SIZE
, &submit
);
862 static struct dma_async_tx_descriptor
*
863 ops_run_compute6_2(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
865 int i
, count
, disks
= sh
->disks
;
866 int syndrome_disks
= sh
->ddf_layout
? disks
: disks
-2;
867 int d0_idx
= raid6_d0(sh
);
868 int faila
= -1, failb
= -1;
869 int target
= sh
->ops
.target
;
870 int target2
= sh
->ops
.target2
;
871 struct r5dev
*tgt
= &sh
->dev
[target
];
872 struct r5dev
*tgt2
= &sh
->dev
[target2
];
873 struct dma_async_tx_descriptor
*tx
;
874 struct page
**blocks
= percpu
->scribble
;
875 struct async_submit_ctl submit
;
877 pr_debug("%s: stripe %llu block1: %d block2: %d\n",
878 __func__
, (unsigned long long)sh
->sector
, target
, target2
);
879 BUG_ON(target
< 0 || target2
< 0);
880 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
881 BUG_ON(!test_bit(R5_Wantcompute
, &tgt2
->flags
));
883 /* we need to open-code set_syndrome_sources to handle the
884 * slot number conversion for 'faila' and 'failb'
886 for (i
= 0; i
< disks
; i
++)
891 int slot
= raid6_idx_to_slot(i
, sh
, &count
, syndrome_disks
);
893 blocks
[slot
] = sh
->dev
[i
].page
;
899 i
= raid6_next_disk(i
, disks
);
900 } while (i
!= d0_idx
);
902 BUG_ON(faila
== failb
);
905 pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
906 __func__
, (unsigned long long)sh
->sector
, faila
, failb
);
908 atomic_inc(&sh
->count
);
910 if (failb
== syndrome_disks
+1) {
911 /* Q disk is one of the missing disks */
912 if (faila
== syndrome_disks
) {
913 /* Missing P+Q, just recompute */
914 init_async_submit(&submit
, ASYNC_TX_FENCE
, NULL
,
915 ops_complete_compute
, sh
,
916 to_addr_conv(sh
, percpu
));
917 return async_gen_syndrome(blocks
, 0, syndrome_disks
+2,
918 STRIPE_SIZE
, &submit
);
922 int qd_idx
= sh
->qd_idx
;
924 /* Missing D+Q: recompute D from P, then recompute Q */
925 if (target
== qd_idx
)
926 data_target
= target2
;
928 data_target
= target
;
931 for (i
= disks
; i
-- ; ) {
932 if (i
== data_target
|| i
== qd_idx
)
934 blocks
[count
++] = sh
->dev
[i
].page
;
936 dest
= sh
->dev
[data_target
].page
;
937 init_async_submit(&submit
,
938 ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
,
940 to_addr_conv(sh
, percpu
));
941 tx
= async_xor(dest
, blocks
, 0, count
, STRIPE_SIZE
,
944 count
= set_syndrome_sources(blocks
, sh
);
945 init_async_submit(&submit
, ASYNC_TX_FENCE
, tx
,
946 ops_complete_compute
, sh
,
947 to_addr_conv(sh
, percpu
));
948 return async_gen_syndrome(blocks
, 0, count
+2,
949 STRIPE_SIZE
, &submit
);
952 init_async_submit(&submit
, ASYNC_TX_FENCE
, NULL
,
953 ops_complete_compute
, sh
,
954 to_addr_conv(sh
, percpu
));
955 if (failb
== syndrome_disks
) {
956 /* We're missing D+P. */
957 return async_raid6_datap_recov(syndrome_disks
+2,
961 /* We're missing D+D. */
962 return async_raid6_2data_recov(syndrome_disks
+2,
963 STRIPE_SIZE
, faila
, failb
,
970 static void ops_complete_prexor(void *stripe_head_ref
)
972 struct stripe_head
*sh
= stripe_head_ref
;
974 pr_debug("%s: stripe %llu\n", __func__
,
975 (unsigned long long)sh
->sector
);
978 static struct dma_async_tx_descriptor
*
979 ops_run_prexor(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
980 struct dma_async_tx_descriptor
*tx
)
982 int disks
= sh
->disks
;
983 struct page
**xor_srcs
= percpu
->scribble
;
984 int count
= 0, pd_idx
= sh
->pd_idx
, i
;
985 struct async_submit_ctl submit
;
987 /* existing parity data subtracted */
988 struct page
*xor_dest
= xor_srcs
[count
++] = sh
->dev
[pd_idx
].page
;
990 pr_debug("%s: stripe %llu\n", __func__
,
991 (unsigned long long)sh
->sector
);
993 for (i
= disks
; i
--; ) {
994 struct r5dev
*dev
= &sh
->dev
[i
];
995 /* Only process blocks that are known to be uptodate */
996 if (test_bit(R5_Wantdrain
, &dev
->flags
))
997 xor_srcs
[count
++] = dev
->page
;
1000 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_DROP_DST
, tx
,
1001 ops_complete_prexor
, sh
, to_addr_conv(sh
, percpu
));
1002 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
, &submit
);
1007 static struct dma_async_tx_descriptor
*
1008 ops_run_biodrain(struct stripe_head
*sh
, struct dma_async_tx_descriptor
*tx
)
1010 int disks
= sh
->disks
;
1013 pr_debug("%s: stripe %llu\n", __func__
,
1014 (unsigned long long)sh
->sector
);
1016 for (i
= disks
; i
--; ) {
1017 struct r5dev
*dev
= &sh
->dev
[i
];
1020 if (test_and_clear_bit(R5_Wantdrain
, &dev
->flags
)) {
1023 spin_lock(&sh
->lock
);
1024 chosen
= dev
->towrite
;
1025 dev
->towrite
= NULL
;
1026 BUG_ON(dev
->written
);
1027 wbi
= dev
->written
= chosen
;
1028 spin_unlock(&sh
->lock
);
1030 while (wbi
&& wbi
->bi_sector
<
1031 dev
->sector
+ STRIPE_SECTORS
) {
1032 if (wbi
->bi_rw
& REQ_FUA
)
1033 set_bit(R5_WantFUA
, &dev
->flags
);
1034 tx
= async_copy_data(1, wbi
, dev
->page
,
1036 wbi
= r5_next_bio(wbi
, dev
->sector
);
1044 static void ops_complete_reconstruct(void *stripe_head_ref
)
1046 struct stripe_head
*sh
= stripe_head_ref
;
1047 int disks
= sh
->disks
;
1048 int pd_idx
= sh
->pd_idx
;
1049 int qd_idx
= sh
->qd_idx
;
1053 pr_debug("%s: stripe %llu\n", __func__
,
1054 (unsigned long long)sh
->sector
);
1056 for (i
= disks
; i
--; )
1057 fua
|= test_bit(R5_WantFUA
, &sh
->dev
[i
].flags
);
1059 for (i
= disks
; i
--; ) {
1060 struct r5dev
*dev
= &sh
->dev
[i
];
1062 if (dev
->written
|| i
== pd_idx
|| i
== qd_idx
) {
1063 set_bit(R5_UPTODATE
, &dev
->flags
);
1065 set_bit(R5_WantFUA
, &dev
->flags
);
1069 if (sh
->reconstruct_state
== reconstruct_state_drain_run
)
1070 sh
->reconstruct_state
= reconstruct_state_drain_result
;
1071 else if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_run
)
1072 sh
->reconstruct_state
= reconstruct_state_prexor_drain_result
;
1074 BUG_ON(sh
->reconstruct_state
!= reconstruct_state_run
);
1075 sh
->reconstruct_state
= reconstruct_state_result
;
1078 set_bit(STRIPE_HANDLE
, &sh
->state
);
1083 ops_run_reconstruct5(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
1084 struct dma_async_tx_descriptor
*tx
)
1086 int disks
= sh
->disks
;
1087 struct page
**xor_srcs
= percpu
->scribble
;
1088 struct async_submit_ctl submit
;
1089 int count
= 0, pd_idx
= sh
->pd_idx
, i
;
1090 struct page
*xor_dest
;
1092 unsigned long flags
;
1094 pr_debug("%s: stripe %llu\n", __func__
,
1095 (unsigned long long)sh
->sector
);
1097 /* check if prexor is active which means only process blocks
1098 * that are part of a read-modify-write (written)
1100 if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_run
) {
1102 xor_dest
= xor_srcs
[count
++] = sh
->dev
[pd_idx
].page
;
1103 for (i
= disks
; i
--; ) {
1104 struct r5dev
*dev
= &sh
->dev
[i
];
1106 xor_srcs
[count
++] = dev
->page
;
1109 xor_dest
= sh
->dev
[pd_idx
].page
;
1110 for (i
= disks
; i
--; ) {
1111 struct r5dev
*dev
= &sh
->dev
[i
];
1113 xor_srcs
[count
++] = dev
->page
;
1117 /* 1/ if we prexor'd then the dest is reused as a source
1118 * 2/ if we did not prexor then we are redoing the parity
1119 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
1120 * for the synchronous xor case
1122 flags
= ASYNC_TX_ACK
|
1123 (prexor
? ASYNC_TX_XOR_DROP_DST
: ASYNC_TX_XOR_ZERO_DST
);
1125 atomic_inc(&sh
->count
);
1127 init_async_submit(&submit
, flags
, tx
, ops_complete_reconstruct
, sh
,
1128 to_addr_conv(sh
, percpu
));
1129 if (unlikely(count
== 1))
1130 tx
= async_memcpy(xor_dest
, xor_srcs
[0], 0, 0, STRIPE_SIZE
, &submit
);
1132 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
, &submit
);
1136 ops_run_reconstruct6(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
1137 struct dma_async_tx_descriptor
*tx
)
1139 struct async_submit_ctl submit
;
1140 struct page
**blocks
= percpu
->scribble
;
1143 pr_debug("%s: stripe %llu\n", __func__
, (unsigned long long)sh
->sector
);
1145 count
= set_syndrome_sources(blocks
, sh
);
1147 atomic_inc(&sh
->count
);
1149 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, ops_complete_reconstruct
,
1150 sh
, to_addr_conv(sh
, percpu
));
1151 async_gen_syndrome(blocks
, 0, count
+2, STRIPE_SIZE
, &submit
);
1154 static void ops_complete_check(void *stripe_head_ref
)
1156 struct stripe_head
*sh
= stripe_head_ref
;
1158 pr_debug("%s: stripe %llu\n", __func__
,
1159 (unsigned long long)sh
->sector
);
1161 sh
->check_state
= check_state_check_result
;
1162 set_bit(STRIPE_HANDLE
, &sh
->state
);
1166 static void ops_run_check_p(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
1168 int disks
= sh
->disks
;
1169 int pd_idx
= sh
->pd_idx
;
1170 int qd_idx
= sh
->qd_idx
;
1171 struct page
*xor_dest
;
1172 struct page
**xor_srcs
= percpu
->scribble
;
1173 struct dma_async_tx_descriptor
*tx
;
1174 struct async_submit_ctl submit
;
1178 pr_debug("%s: stripe %llu\n", __func__
,
1179 (unsigned long long)sh
->sector
);
1182 xor_dest
= sh
->dev
[pd_idx
].page
;
1183 xor_srcs
[count
++] = xor_dest
;
1184 for (i
= disks
; i
--; ) {
1185 if (i
== pd_idx
|| i
== qd_idx
)
1187 xor_srcs
[count
++] = sh
->dev
[i
].page
;
1190 init_async_submit(&submit
, 0, NULL
, NULL
, NULL
,
1191 to_addr_conv(sh
, percpu
));
1192 tx
= async_xor_val(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
,
1193 &sh
->ops
.zero_sum_result
, &submit
);
1195 atomic_inc(&sh
->count
);
1196 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, ops_complete_check
, sh
, NULL
);
1197 tx
= async_trigger_callback(&submit
);
1200 static void ops_run_check_pq(struct stripe_head
*sh
, struct raid5_percpu
*percpu
, int checkp
)
1202 struct page
**srcs
= percpu
->scribble
;
1203 struct async_submit_ctl submit
;
1206 pr_debug("%s: stripe %llu checkp: %d\n", __func__
,
1207 (unsigned long long)sh
->sector
, checkp
);
1209 count
= set_syndrome_sources(srcs
, sh
);
1213 atomic_inc(&sh
->count
);
1214 init_async_submit(&submit
, ASYNC_TX_ACK
, NULL
, ops_complete_check
,
1215 sh
, to_addr_conv(sh
, percpu
));
1216 async_syndrome_val(srcs
, 0, count
+2, STRIPE_SIZE
,
1217 &sh
->ops
.zero_sum_result
, percpu
->spare_page
, &submit
);
1220 static void __raid_run_ops(struct stripe_head
*sh
, unsigned long ops_request
)
1222 int overlap_clear
= 0, i
, disks
= sh
->disks
;
1223 struct dma_async_tx_descriptor
*tx
= NULL
;
1224 raid5_conf_t
*conf
= sh
->raid_conf
;
1225 int level
= conf
->level
;
1226 struct raid5_percpu
*percpu
;
1230 percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
1231 if (test_bit(STRIPE_OP_BIOFILL
, &ops_request
)) {
1232 ops_run_biofill(sh
);
1236 if (test_bit(STRIPE_OP_COMPUTE_BLK
, &ops_request
)) {
1238 tx
= ops_run_compute5(sh
, percpu
);
1240 if (sh
->ops
.target2
< 0 || sh
->ops
.target
< 0)
1241 tx
= ops_run_compute6_1(sh
, percpu
);
1243 tx
= ops_run_compute6_2(sh
, percpu
);
1245 /* terminate the chain if reconstruct is not set to be run */
1246 if (tx
&& !test_bit(STRIPE_OP_RECONSTRUCT
, &ops_request
))
1250 if (test_bit(STRIPE_OP_PREXOR
, &ops_request
))
1251 tx
= ops_run_prexor(sh
, percpu
, tx
);
1253 if (test_bit(STRIPE_OP_BIODRAIN
, &ops_request
)) {
1254 tx
= ops_run_biodrain(sh
, tx
);
1258 if (test_bit(STRIPE_OP_RECONSTRUCT
, &ops_request
)) {
1260 ops_run_reconstruct5(sh
, percpu
, tx
);
1262 ops_run_reconstruct6(sh
, percpu
, tx
);
1265 if (test_bit(STRIPE_OP_CHECK
, &ops_request
)) {
1266 if (sh
->check_state
== check_state_run
)
1267 ops_run_check_p(sh
, percpu
);
1268 else if (sh
->check_state
== check_state_run_q
)
1269 ops_run_check_pq(sh
, percpu
, 0);
1270 else if (sh
->check_state
== check_state_run_pq
)
1271 ops_run_check_pq(sh
, percpu
, 1);
1277 for (i
= disks
; i
--; ) {
1278 struct r5dev
*dev
= &sh
->dev
[i
];
1279 if (test_and_clear_bit(R5_Overlap
, &dev
->flags
))
1280 wake_up(&sh
->raid_conf
->wait_for_overlap
);
1285 #ifdef CONFIG_MULTICORE_RAID456
1286 static void async_run_ops(void *param
, async_cookie_t cookie
)
1288 struct stripe_head
*sh
= param
;
1289 unsigned long ops_request
= sh
->ops
.request
;
1291 clear_bit_unlock(STRIPE_OPS_REQ_PENDING
, &sh
->state
);
1292 wake_up(&sh
->ops
.wait_for_ops
);
1294 __raid_run_ops(sh
, ops_request
);
1298 static void raid_run_ops(struct stripe_head
*sh
, unsigned long ops_request
)
1300 /* since handle_stripe can be called outside of raid5d context
1301 * we need to ensure sh->ops.request is de-staged before another
1304 wait_event(sh
->ops
.wait_for_ops
,
1305 !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING
, &sh
->state
));
1306 sh
->ops
.request
= ops_request
;
1308 atomic_inc(&sh
->count
);
1309 async_schedule(async_run_ops
, sh
);
1312 #define raid_run_ops __raid_run_ops
1315 static int grow_one_stripe(raid5_conf_t
*conf
)
1317 struct stripe_head
*sh
;
1318 sh
= kmem_cache_alloc(conf
->slab_cache
, GFP_KERNEL
);
1321 memset(sh
, 0, sizeof(*sh
) + (conf
->pool_size
-1)*sizeof(struct r5dev
));
1322 sh
->raid_conf
= conf
;
1323 spin_lock_init(&sh
->lock
);
1324 #ifdef CONFIG_MULTICORE_RAID456
1325 init_waitqueue_head(&sh
->ops
.wait_for_ops
);
1328 if (grow_buffers(sh
)) {
1330 kmem_cache_free(conf
->slab_cache
, sh
);
1333 /* we just created an active stripe so... */
1334 atomic_set(&sh
->count
, 1);
1335 atomic_inc(&conf
->active_stripes
);
1336 INIT_LIST_HEAD(&sh
->lru
);
1341 static int grow_stripes(raid5_conf_t
*conf
, int num
)
1343 struct kmem_cache
*sc
;
1344 int devs
= max(conf
->raid_disks
, conf
->previous_raid_disks
);
1346 if (conf
->mddev
->gendisk
)
1347 sprintf(conf
->cache_name
[0],
1348 "raid%d-%s", conf
->level
, mdname(conf
->mddev
));
1350 sprintf(conf
->cache_name
[0],
1351 "raid%d-%p", conf
->level
, conf
->mddev
);
1352 sprintf(conf
->cache_name
[1], "%s-alt", conf
->cache_name
[0]);
1354 conf
->active_name
= 0;
1355 sc
= kmem_cache_create(conf
->cache_name
[conf
->active_name
],
1356 sizeof(struct stripe_head
)+(devs
-1)*sizeof(struct r5dev
),
1360 conf
->slab_cache
= sc
;
1361 conf
->pool_size
= devs
;
1363 if (!grow_one_stripe(conf
))
1369 * scribble_len - return the required size of the scribble region
1370 * @num - total number of disks in the array
1372 * The size must be enough to contain:
1373 * 1/ a struct page pointer for each device in the array +2
1374 * 2/ room to convert each entry in (1) to its corresponding dma
1375 * (dma_map_page()) or page (page_address()) address.
1377 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
1378 * calculate over all devices (not just the data blocks), using zeros in place
1379 * of the P and Q blocks.
1381 static size_t scribble_len(int num
)
1385 len
= sizeof(struct page
*) * (num
+2) + sizeof(addr_conv_t
) * (num
+2);
1390 static int resize_stripes(raid5_conf_t
*conf
, int newsize
)
1392 /* Make all the stripes able to hold 'newsize' devices.
1393 * New slots in each stripe get 'page' set to a new page.
1395 * This happens in stages:
1396 * 1/ create a new kmem_cache and allocate the required number of
1398 * 2/ gather all the old stripe_heads and tranfer the pages across
1399 * to the new stripe_heads. This will have the side effect of
1400 * freezing the array as once all stripe_heads have been collected,
1401 * no IO will be possible. Old stripe heads are freed once their
1402 * pages have been transferred over, and the old kmem_cache is
1403 * freed when all stripes are done.
1404 * 3/ reallocate conf->disks to be suitable bigger. If this fails,
1405 * we simple return a failre status - no need to clean anything up.
1406 * 4/ allocate new pages for the new slots in the new stripe_heads.
1407 * If this fails, we don't bother trying the shrink the
1408 * stripe_heads down again, we just leave them as they are.
1409 * As each stripe_head is processed the new one is released into
1412 * Once step2 is started, we cannot afford to wait for a write,
1413 * so we use GFP_NOIO allocations.
1415 struct stripe_head
*osh
, *nsh
;
1416 LIST_HEAD(newstripes
);
1417 struct disk_info
*ndisks
;
1420 struct kmem_cache
*sc
;
1423 if (newsize
<= conf
->pool_size
)
1424 return 0; /* never bother to shrink */
1426 err
= md_allow_write(conf
->mddev
);
1431 sc
= kmem_cache_create(conf
->cache_name
[1-conf
->active_name
],
1432 sizeof(struct stripe_head
)+(newsize
-1)*sizeof(struct r5dev
),
1437 for (i
= conf
->max_nr_stripes
; i
; i
--) {
1438 nsh
= kmem_cache_alloc(sc
, GFP_KERNEL
);
1442 memset(nsh
, 0, sizeof(*nsh
) + (newsize
-1)*sizeof(struct r5dev
));
1444 nsh
->raid_conf
= conf
;
1445 spin_lock_init(&nsh
->lock
);
1446 #ifdef CONFIG_MULTICORE_RAID456
1447 init_waitqueue_head(&nsh
->ops
.wait_for_ops
);
1450 list_add(&nsh
->lru
, &newstripes
);
1453 /* didn't get enough, give up */
1454 while (!list_empty(&newstripes
)) {
1455 nsh
= list_entry(newstripes
.next
, struct stripe_head
, lru
);
1456 list_del(&nsh
->lru
);
1457 kmem_cache_free(sc
, nsh
);
1459 kmem_cache_destroy(sc
);
1462 /* Step 2 - Must use GFP_NOIO now.
1463 * OK, we have enough stripes, start collecting inactive
1464 * stripes and copying them over
1466 list_for_each_entry(nsh
, &newstripes
, lru
) {
1467 spin_lock_irq(&conf
->device_lock
);
1468 wait_event_lock_irq(conf
->wait_for_stripe
,
1469 !list_empty(&conf
->inactive_list
),
1472 osh
= get_free_stripe(conf
);
1473 spin_unlock_irq(&conf
->device_lock
);
1474 atomic_set(&nsh
->count
, 1);
1475 for(i
=0; i
<conf
->pool_size
; i
++)
1476 nsh
->dev
[i
].page
= osh
->dev
[i
].page
;
1477 for( ; i
<newsize
; i
++)
1478 nsh
->dev
[i
].page
= NULL
;
1479 kmem_cache_free(conf
->slab_cache
, osh
);
1481 kmem_cache_destroy(conf
->slab_cache
);
1484 * At this point, we are holding all the stripes so the array
1485 * is completely stalled, so now is a good time to resize
1486 * conf->disks and the scribble region
1488 ndisks
= kzalloc(newsize
* sizeof(struct disk_info
), GFP_NOIO
);
1490 for (i
=0; i
<conf
->raid_disks
; i
++)
1491 ndisks
[i
] = conf
->disks
[i
];
1493 conf
->disks
= ndisks
;
1498 conf
->scribble_len
= scribble_len(newsize
);
1499 for_each_present_cpu(cpu
) {
1500 struct raid5_percpu
*percpu
;
1503 percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
1504 scribble
= kmalloc(conf
->scribble_len
, GFP_NOIO
);
1507 kfree(percpu
->scribble
);
1508 percpu
->scribble
= scribble
;
1516 /* Step 4, return new stripes to service */
1517 while(!list_empty(&newstripes
)) {
1518 nsh
= list_entry(newstripes
.next
, struct stripe_head
, lru
);
1519 list_del_init(&nsh
->lru
);
1521 for (i
=conf
->raid_disks
; i
< newsize
; i
++)
1522 if (nsh
->dev
[i
].page
== NULL
) {
1523 struct page
*p
= alloc_page(GFP_NOIO
);
1524 nsh
->dev
[i
].page
= p
;
1528 release_stripe(nsh
);
1530 /* critical section pass, GFP_NOIO no longer needed */
1532 conf
->slab_cache
= sc
;
1533 conf
->active_name
= 1-conf
->active_name
;
1534 conf
->pool_size
= newsize
;
1538 static int drop_one_stripe(raid5_conf_t
*conf
)
1540 struct stripe_head
*sh
;
1542 spin_lock_irq(&conf
->device_lock
);
1543 sh
= get_free_stripe(conf
);
1544 spin_unlock_irq(&conf
->device_lock
);
1547 BUG_ON(atomic_read(&sh
->count
));
1549 kmem_cache_free(conf
->slab_cache
, sh
);
1550 atomic_dec(&conf
->active_stripes
);
1554 static void shrink_stripes(raid5_conf_t
*conf
)
1556 while (drop_one_stripe(conf
))
1559 if (conf
->slab_cache
)
1560 kmem_cache_destroy(conf
->slab_cache
);
1561 conf
->slab_cache
= NULL
;
1564 static void raid5_end_read_request(struct bio
* bi
, int error
)
1566 struct stripe_head
*sh
= bi
->bi_private
;
1567 raid5_conf_t
*conf
= sh
->raid_conf
;
1568 int disks
= sh
->disks
, i
;
1569 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
1570 char b
[BDEVNAME_SIZE
];
1574 for (i
=0 ; i
<disks
; i
++)
1575 if (bi
== &sh
->dev
[i
].req
)
1578 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
1579 (unsigned long long)sh
->sector
, i
, atomic_read(&sh
->count
),
1587 set_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
);
1588 if (test_bit(R5_ReadError
, &sh
->dev
[i
].flags
)) {
1589 rdev
= conf
->disks
[i
].rdev
;
1590 printk_rl(KERN_INFO
"md/raid:%s: read error corrected"
1591 " (%lu sectors at %llu on %s)\n",
1592 mdname(conf
->mddev
), STRIPE_SECTORS
,
1593 (unsigned long long)(sh
->sector
1594 + rdev
->data_offset
),
1595 bdevname(rdev
->bdev
, b
));
1596 clear_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
1597 clear_bit(R5_ReWrite
, &sh
->dev
[i
].flags
);
1599 if (atomic_read(&conf
->disks
[i
].rdev
->read_errors
))
1600 atomic_set(&conf
->disks
[i
].rdev
->read_errors
, 0);
1602 const char *bdn
= bdevname(conf
->disks
[i
].rdev
->bdev
, b
);
1604 rdev
= conf
->disks
[i
].rdev
;
1606 clear_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
);
1607 atomic_inc(&rdev
->read_errors
);
1608 if (conf
->mddev
->degraded
>= conf
->max_degraded
)
1609 printk_rl(KERN_WARNING
1610 "md/raid:%s: read error not correctable "
1611 "(sector %llu on %s).\n",
1612 mdname(conf
->mddev
),
1613 (unsigned long long)(sh
->sector
1614 + rdev
->data_offset
),
1616 else if (test_bit(R5_ReWrite
, &sh
->dev
[i
].flags
))
1618 printk_rl(KERN_WARNING
1619 "md/raid:%s: read error NOT corrected!! "
1620 "(sector %llu on %s).\n",
1621 mdname(conf
->mddev
),
1622 (unsigned long long)(sh
->sector
1623 + rdev
->data_offset
),
1625 else if (atomic_read(&rdev
->read_errors
)
1626 > conf
->max_nr_stripes
)
1628 "md/raid:%s: Too many read errors, failing device %s.\n",
1629 mdname(conf
->mddev
), bdn
);
1633 set_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
1635 clear_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
1636 clear_bit(R5_ReWrite
, &sh
->dev
[i
].flags
);
1637 md_error(conf
->mddev
, rdev
);
1640 rdev_dec_pending(conf
->disks
[i
].rdev
, conf
->mddev
);
1641 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
1642 set_bit(STRIPE_HANDLE
, &sh
->state
);
1646 static void raid5_end_write_request(struct bio
*bi
, int error
)
1648 struct stripe_head
*sh
= bi
->bi_private
;
1649 raid5_conf_t
*conf
= sh
->raid_conf
;
1650 int disks
= sh
->disks
, i
;
1651 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
1653 for (i
=0 ; i
<disks
; i
++)
1654 if (bi
== &sh
->dev
[i
].req
)
1657 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
1658 (unsigned long long)sh
->sector
, i
, atomic_read(&sh
->count
),
1666 md_error(conf
->mddev
, conf
->disks
[i
].rdev
);
1668 rdev_dec_pending(conf
->disks
[i
].rdev
, conf
->mddev
);
1670 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
1671 set_bit(STRIPE_HANDLE
, &sh
->state
);
1676 static sector_t
compute_blocknr(struct stripe_head
*sh
, int i
, int previous
);
1678 static void raid5_build_block(struct stripe_head
*sh
, int i
, int previous
)
1680 struct r5dev
*dev
= &sh
->dev
[i
];
1682 bio_init(&dev
->req
);
1683 dev
->req
.bi_io_vec
= &dev
->vec
;
1685 dev
->req
.bi_max_vecs
++;
1686 dev
->vec
.bv_page
= dev
->page
;
1687 dev
->vec
.bv_len
= STRIPE_SIZE
;
1688 dev
->vec
.bv_offset
= 0;
1690 dev
->req
.bi_sector
= sh
->sector
;
1691 dev
->req
.bi_private
= sh
;
1694 dev
->sector
= compute_blocknr(sh
, i
, previous
);
1697 static void error(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1699 char b
[BDEVNAME_SIZE
];
1700 raid5_conf_t
*conf
= mddev
->private;
1701 pr_debug("raid456: error called\n");
1703 if (!test_bit(Faulty
, &rdev
->flags
)) {
1704 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
1705 if (test_and_clear_bit(In_sync
, &rdev
->flags
)) {
1706 unsigned long flags
;
1707 spin_lock_irqsave(&conf
->device_lock
, flags
);
1709 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1711 * if recovery was running, make sure it aborts.
1713 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
1715 set_bit(Faulty
, &rdev
->flags
);
1717 "md/raid:%s: Disk failure on %s, disabling device.\n"
1718 "md/raid:%s: Operation continuing on %d devices.\n",
1720 bdevname(rdev
->bdev
, b
),
1722 conf
->raid_disks
- mddev
->degraded
);
1727 * Input: a 'big' sector number,
1728 * Output: index of the data and parity disk, and the sector # in them.
1730 static sector_t
raid5_compute_sector(raid5_conf_t
*conf
, sector_t r_sector
,
1731 int previous
, int *dd_idx
,
1732 struct stripe_head
*sh
)
1734 sector_t stripe
, stripe2
;
1735 sector_t chunk_number
;
1736 unsigned int chunk_offset
;
1739 sector_t new_sector
;
1740 int algorithm
= previous
? conf
->prev_algo
1742 int sectors_per_chunk
= previous
? conf
->prev_chunk_sectors
1743 : conf
->chunk_sectors
;
1744 int raid_disks
= previous
? conf
->previous_raid_disks
1746 int data_disks
= raid_disks
- conf
->max_degraded
;
1748 /* First compute the information on this sector */
1751 * Compute the chunk number and the sector offset inside the chunk
1753 chunk_offset
= sector_div(r_sector
, sectors_per_chunk
);
1754 chunk_number
= r_sector
;
1757 * Compute the stripe number
1759 stripe
= chunk_number
;
1760 *dd_idx
= sector_div(stripe
, data_disks
);
1763 * Select the parity disk based on the user selected algorithm.
1765 pd_idx
= qd_idx
= ~0;
1766 switch(conf
->level
) {
1768 pd_idx
= data_disks
;
1771 switch (algorithm
) {
1772 case ALGORITHM_LEFT_ASYMMETRIC
:
1773 pd_idx
= data_disks
- sector_div(stripe2
, raid_disks
);
1774 if (*dd_idx
>= pd_idx
)
1777 case ALGORITHM_RIGHT_ASYMMETRIC
:
1778 pd_idx
= sector_div(stripe2
, raid_disks
);
1779 if (*dd_idx
>= pd_idx
)
1782 case ALGORITHM_LEFT_SYMMETRIC
:
1783 pd_idx
= data_disks
- sector_div(stripe2
, raid_disks
);
1784 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
1786 case ALGORITHM_RIGHT_SYMMETRIC
:
1787 pd_idx
= sector_div(stripe2
, raid_disks
);
1788 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
1790 case ALGORITHM_PARITY_0
:
1794 case ALGORITHM_PARITY_N
:
1795 pd_idx
= data_disks
;
1803 switch (algorithm
) {
1804 case ALGORITHM_LEFT_ASYMMETRIC
:
1805 pd_idx
= raid_disks
- 1 - sector_div(stripe2
, raid_disks
);
1806 qd_idx
= pd_idx
+ 1;
1807 if (pd_idx
== raid_disks
-1) {
1808 (*dd_idx
)++; /* Q D D D P */
1810 } else if (*dd_idx
>= pd_idx
)
1811 (*dd_idx
) += 2; /* D D P Q D */
1813 case ALGORITHM_RIGHT_ASYMMETRIC
:
1814 pd_idx
= sector_div(stripe2
, raid_disks
);
1815 qd_idx
= pd_idx
+ 1;
1816 if (pd_idx
== raid_disks
-1) {
1817 (*dd_idx
)++; /* Q D D D P */
1819 } else if (*dd_idx
>= pd_idx
)
1820 (*dd_idx
) += 2; /* D D P Q D */
1822 case ALGORITHM_LEFT_SYMMETRIC
:
1823 pd_idx
= raid_disks
- 1 - sector_div(stripe2
, raid_disks
);
1824 qd_idx
= (pd_idx
+ 1) % raid_disks
;
1825 *dd_idx
= (pd_idx
+ 2 + *dd_idx
) % raid_disks
;
1827 case ALGORITHM_RIGHT_SYMMETRIC
:
1828 pd_idx
= sector_div(stripe2
, raid_disks
);
1829 qd_idx
= (pd_idx
+ 1) % raid_disks
;
1830 *dd_idx
= (pd_idx
+ 2 + *dd_idx
) % raid_disks
;
1833 case ALGORITHM_PARITY_0
:
1838 case ALGORITHM_PARITY_N
:
1839 pd_idx
= data_disks
;
1840 qd_idx
= data_disks
+ 1;
1843 case ALGORITHM_ROTATING_ZERO_RESTART
:
1844 /* Exactly the same as RIGHT_ASYMMETRIC, but or
1845 * of blocks for computing Q is different.
1847 pd_idx
= sector_div(stripe2
, raid_disks
);
1848 qd_idx
= pd_idx
+ 1;
1849 if (pd_idx
== raid_disks
-1) {
1850 (*dd_idx
)++; /* Q D D D P */
1852 } else if (*dd_idx
>= pd_idx
)
1853 (*dd_idx
) += 2; /* D D P Q D */
1857 case ALGORITHM_ROTATING_N_RESTART
:
1858 /* Same a left_asymmetric, by first stripe is
1859 * D D D P Q rather than
1863 pd_idx
= raid_disks
- 1 - sector_div(stripe2
, raid_disks
);
1864 qd_idx
= pd_idx
+ 1;
1865 if (pd_idx
== raid_disks
-1) {
1866 (*dd_idx
)++; /* Q D D D P */
1868 } else if (*dd_idx
>= pd_idx
)
1869 (*dd_idx
) += 2; /* D D P Q D */
1873 case ALGORITHM_ROTATING_N_CONTINUE
:
1874 /* Same as left_symmetric but Q is before P */
1875 pd_idx
= raid_disks
- 1 - sector_div(stripe2
, raid_disks
);
1876 qd_idx
= (pd_idx
+ raid_disks
- 1) % raid_disks
;
1877 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
1881 case ALGORITHM_LEFT_ASYMMETRIC_6
:
1882 /* RAID5 left_asymmetric, with Q on last device */
1883 pd_idx
= data_disks
- sector_div(stripe2
, raid_disks
-1);
1884 if (*dd_idx
>= pd_idx
)
1886 qd_idx
= raid_disks
- 1;
1889 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
1890 pd_idx
= sector_div(stripe2
, raid_disks
-1);
1891 if (*dd_idx
>= pd_idx
)
1893 qd_idx
= raid_disks
- 1;
1896 case ALGORITHM_LEFT_SYMMETRIC_6
:
1897 pd_idx
= data_disks
- sector_div(stripe2
, raid_disks
-1);
1898 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % (raid_disks
-1);
1899 qd_idx
= raid_disks
- 1;
1902 case ALGORITHM_RIGHT_SYMMETRIC_6
:
1903 pd_idx
= sector_div(stripe2
, raid_disks
-1);
1904 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % (raid_disks
-1);
1905 qd_idx
= raid_disks
- 1;
1908 case ALGORITHM_PARITY_0_6
:
1911 qd_idx
= raid_disks
- 1;
1921 sh
->pd_idx
= pd_idx
;
1922 sh
->qd_idx
= qd_idx
;
1923 sh
->ddf_layout
= ddf_layout
;
1926 * Finally, compute the new sector number
1928 new_sector
= (sector_t
)stripe
* sectors_per_chunk
+ chunk_offset
;
1933 static sector_t
compute_blocknr(struct stripe_head
*sh
, int i
, int previous
)
1935 raid5_conf_t
*conf
= sh
->raid_conf
;
1936 int raid_disks
= sh
->disks
;
1937 int data_disks
= raid_disks
- conf
->max_degraded
;
1938 sector_t new_sector
= sh
->sector
, check
;
1939 int sectors_per_chunk
= previous
? conf
->prev_chunk_sectors
1940 : conf
->chunk_sectors
;
1941 int algorithm
= previous
? conf
->prev_algo
1945 sector_t chunk_number
;
1946 int dummy1
, dd_idx
= i
;
1948 struct stripe_head sh2
;
1951 chunk_offset
= sector_div(new_sector
, sectors_per_chunk
);
1952 stripe
= new_sector
;
1954 if (i
== sh
->pd_idx
)
1956 switch(conf
->level
) {
1959 switch (algorithm
) {
1960 case ALGORITHM_LEFT_ASYMMETRIC
:
1961 case ALGORITHM_RIGHT_ASYMMETRIC
:
1965 case ALGORITHM_LEFT_SYMMETRIC
:
1966 case ALGORITHM_RIGHT_SYMMETRIC
:
1969 i
-= (sh
->pd_idx
+ 1);
1971 case ALGORITHM_PARITY_0
:
1974 case ALGORITHM_PARITY_N
:
1981 if (i
== sh
->qd_idx
)
1982 return 0; /* It is the Q disk */
1983 switch (algorithm
) {
1984 case ALGORITHM_LEFT_ASYMMETRIC
:
1985 case ALGORITHM_RIGHT_ASYMMETRIC
:
1986 case ALGORITHM_ROTATING_ZERO_RESTART
:
1987 case ALGORITHM_ROTATING_N_RESTART
:
1988 if (sh
->pd_idx
== raid_disks
-1)
1989 i
--; /* Q D D D P */
1990 else if (i
> sh
->pd_idx
)
1991 i
-= 2; /* D D P Q D */
1993 case ALGORITHM_LEFT_SYMMETRIC
:
1994 case ALGORITHM_RIGHT_SYMMETRIC
:
1995 if (sh
->pd_idx
== raid_disks
-1)
1996 i
--; /* Q D D D P */
2001 i
-= (sh
->pd_idx
+ 2);
2004 case ALGORITHM_PARITY_0
:
2007 case ALGORITHM_PARITY_N
:
2009 case ALGORITHM_ROTATING_N_CONTINUE
:
2010 /* Like left_symmetric, but P is before Q */
2011 if (sh
->pd_idx
== 0)
2012 i
--; /* P D D D Q */
2017 i
-= (sh
->pd_idx
+ 1);
2020 case ALGORITHM_LEFT_ASYMMETRIC_6
:
2021 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
2025 case ALGORITHM_LEFT_SYMMETRIC_6
:
2026 case ALGORITHM_RIGHT_SYMMETRIC_6
:
2028 i
+= data_disks
+ 1;
2029 i
-= (sh
->pd_idx
+ 1);
2031 case ALGORITHM_PARITY_0_6
:
2040 chunk_number
= stripe
* data_disks
+ i
;
2041 r_sector
= chunk_number
* sectors_per_chunk
+ chunk_offset
;
2043 check
= raid5_compute_sector(conf
, r_sector
,
2044 previous
, &dummy1
, &sh2
);
2045 if (check
!= sh
->sector
|| dummy1
!= dd_idx
|| sh2
.pd_idx
!= sh
->pd_idx
2046 || sh2
.qd_idx
!= sh
->qd_idx
) {
2047 printk(KERN_ERR
"md/raid:%s: compute_blocknr: map not correct\n",
2048 mdname(conf
->mddev
));
2056 schedule_reconstruction(struct stripe_head
*sh
, struct stripe_head_state
*s
,
2057 int rcw
, int expand
)
2059 int i
, pd_idx
= sh
->pd_idx
, disks
= sh
->disks
;
2060 raid5_conf_t
*conf
= sh
->raid_conf
;
2061 int level
= conf
->level
;
2064 /* if we are not expanding this is a proper write request, and
2065 * there will be bios with new data to be drained into the
2069 sh
->reconstruct_state
= reconstruct_state_drain_run
;
2070 set_bit(STRIPE_OP_BIODRAIN
, &s
->ops_request
);
2072 sh
->reconstruct_state
= reconstruct_state_run
;
2074 set_bit(STRIPE_OP_RECONSTRUCT
, &s
->ops_request
);
2076 for (i
= disks
; i
--; ) {
2077 struct r5dev
*dev
= &sh
->dev
[i
];
2080 set_bit(R5_LOCKED
, &dev
->flags
);
2081 set_bit(R5_Wantdrain
, &dev
->flags
);
2083 clear_bit(R5_UPTODATE
, &dev
->flags
);
2087 if (s
->locked
+ conf
->max_degraded
== disks
)
2088 if (!test_and_set_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2089 atomic_inc(&conf
->pending_full_writes
);
2092 BUG_ON(!(test_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
) ||
2093 test_bit(R5_Wantcompute
, &sh
->dev
[pd_idx
].flags
)));
2095 sh
->reconstruct_state
= reconstruct_state_prexor_drain_run
;
2096 set_bit(STRIPE_OP_PREXOR
, &s
->ops_request
);
2097 set_bit(STRIPE_OP_BIODRAIN
, &s
->ops_request
);
2098 set_bit(STRIPE_OP_RECONSTRUCT
, &s
->ops_request
);
2100 for (i
= disks
; i
--; ) {
2101 struct r5dev
*dev
= &sh
->dev
[i
];
2106 (test_bit(R5_UPTODATE
, &dev
->flags
) ||
2107 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2108 set_bit(R5_Wantdrain
, &dev
->flags
);
2109 set_bit(R5_LOCKED
, &dev
->flags
);
2110 clear_bit(R5_UPTODATE
, &dev
->flags
);
2116 /* keep the parity disk(s) locked while asynchronous operations
2119 set_bit(R5_LOCKED
, &sh
->dev
[pd_idx
].flags
);
2120 clear_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
);
2124 int qd_idx
= sh
->qd_idx
;
2125 struct r5dev
*dev
= &sh
->dev
[qd_idx
];
2127 set_bit(R5_LOCKED
, &dev
->flags
);
2128 clear_bit(R5_UPTODATE
, &dev
->flags
);
2132 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
2133 __func__
, (unsigned long long)sh
->sector
,
2134 s
->locked
, s
->ops_request
);
2138 * Each stripe/dev can have one or more bion attached.
2139 * toread/towrite point to the first in a chain.
2140 * The bi_next chain must be in order.
2142 static int add_stripe_bio(struct stripe_head
*sh
, struct bio
*bi
, int dd_idx
, int forwrite
)
2145 raid5_conf_t
*conf
= sh
->raid_conf
;
2148 pr_debug("adding bh b#%llu to stripe s#%llu\n",
2149 (unsigned long long)bi
->bi_sector
,
2150 (unsigned long long)sh
->sector
);
2153 spin_lock(&sh
->lock
);
2154 spin_lock_irq(&conf
->device_lock
);
2156 bip
= &sh
->dev
[dd_idx
].towrite
;
2157 if (*bip
== NULL
&& sh
->dev
[dd_idx
].written
== NULL
)
2160 bip
= &sh
->dev
[dd_idx
].toread
;
2161 while (*bip
&& (*bip
)->bi_sector
< bi
->bi_sector
) {
2162 if ((*bip
)->bi_sector
+ ((*bip
)->bi_size
>> 9) > bi
->bi_sector
)
2164 bip
= & (*bip
)->bi_next
;
2166 if (*bip
&& (*bip
)->bi_sector
< bi
->bi_sector
+ ((bi
->bi_size
)>>9))
2169 BUG_ON(*bip
&& bi
->bi_next
&& (*bip
) != bi
->bi_next
);
2173 bi
->bi_phys_segments
++;
2174 spin_unlock_irq(&conf
->device_lock
);
2175 spin_unlock(&sh
->lock
);
2177 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2178 (unsigned long long)bi
->bi_sector
,
2179 (unsigned long long)sh
->sector
, dd_idx
);
2181 if (conf
->mddev
->bitmap
&& firstwrite
) {
2182 bitmap_startwrite(conf
->mddev
->bitmap
, sh
->sector
,
2184 sh
->bm_seq
= conf
->seq_flush
+1;
2185 set_bit(STRIPE_BIT_DELAY
, &sh
->state
);
2189 /* check if page is covered */
2190 sector_t sector
= sh
->dev
[dd_idx
].sector
;
2191 for (bi
=sh
->dev
[dd_idx
].towrite
;
2192 sector
< sh
->dev
[dd_idx
].sector
+ STRIPE_SECTORS
&&
2193 bi
&& bi
->bi_sector
<= sector
;
2194 bi
= r5_next_bio(bi
, sh
->dev
[dd_idx
].sector
)) {
2195 if (bi
->bi_sector
+ (bi
->bi_size
>>9) >= sector
)
2196 sector
= bi
->bi_sector
+ (bi
->bi_size
>>9);
2198 if (sector
>= sh
->dev
[dd_idx
].sector
+ STRIPE_SECTORS
)
2199 set_bit(R5_OVERWRITE
, &sh
->dev
[dd_idx
].flags
);
2204 set_bit(R5_Overlap
, &sh
->dev
[dd_idx
].flags
);
2205 spin_unlock_irq(&conf
->device_lock
);
2206 spin_unlock(&sh
->lock
);
2210 static void end_reshape(raid5_conf_t
*conf
);
2212 static void stripe_set_idx(sector_t stripe
, raid5_conf_t
*conf
, int previous
,
2213 struct stripe_head
*sh
)
2215 int sectors_per_chunk
=
2216 previous
? conf
->prev_chunk_sectors
: conf
->chunk_sectors
;
2218 int chunk_offset
= sector_div(stripe
, sectors_per_chunk
);
2219 int disks
= previous
? conf
->previous_raid_disks
: conf
->raid_disks
;
2221 raid5_compute_sector(conf
,
2222 stripe
* (disks
- conf
->max_degraded
)
2223 *sectors_per_chunk
+ chunk_offset
,
2229 handle_failed_stripe(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2230 struct stripe_head_state
*s
, int disks
,
2231 struct bio
**return_bi
)
2234 for (i
= disks
; i
--; ) {
2238 if (test_bit(R5_ReadError
, &sh
->dev
[i
].flags
)) {
2241 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
2242 if (rdev
&& test_bit(In_sync
, &rdev
->flags
))
2243 /* multiple read failures in one stripe */
2244 md_error(conf
->mddev
, rdev
);
2247 spin_lock_irq(&conf
->device_lock
);
2248 /* fail all writes first */
2249 bi
= sh
->dev
[i
].towrite
;
2250 sh
->dev
[i
].towrite
= NULL
;
2256 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[i
].flags
))
2257 wake_up(&conf
->wait_for_overlap
);
2259 while (bi
&& bi
->bi_sector
<
2260 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
2261 struct bio
*nextbi
= r5_next_bio(bi
, sh
->dev
[i
].sector
);
2262 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2263 if (!raid5_dec_bi_phys_segments(bi
)) {
2264 md_write_end(conf
->mddev
);
2265 bi
->bi_next
= *return_bi
;
2270 /* and fail all 'written' */
2271 bi
= sh
->dev
[i
].written
;
2272 sh
->dev
[i
].written
= NULL
;
2273 if (bi
) bitmap_end
= 1;
2274 while (bi
&& bi
->bi_sector
<
2275 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
2276 struct bio
*bi2
= r5_next_bio(bi
, sh
->dev
[i
].sector
);
2277 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2278 if (!raid5_dec_bi_phys_segments(bi
)) {
2279 md_write_end(conf
->mddev
);
2280 bi
->bi_next
= *return_bi
;
2286 /* fail any reads if this device is non-operational and
2287 * the data has not reached the cache yet.
2289 if (!test_bit(R5_Wantfill
, &sh
->dev
[i
].flags
) &&
2290 (!test_bit(R5_Insync
, &sh
->dev
[i
].flags
) ||
2291 test_bit(R5_ReadError
, &sh
->dev
[i
].flags
))) {
2292 bi
= sh
->dev
[i
].toread
;
2293 sh
->dev
[i
].toread
= NULL
;
2294 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[i
].flags
))
2295 wake_up(&conf
->wait_for_overlap
);
2296 if (bi
) s
->to_read
--;
2297 while (bi
&& bi
->bi_sector
<
2298 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
2299 struct bio
*nextbi
=
2300 r5_next_bio(bi
, sh
->dev
[i
].sector
);
2301 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2302 if (!raid5_dec_bi_phys_segments(bi
)) {
2303 bi
->bi_next
= *return_bi
;
2309 spin_unlock_irq(&conf
->device_lock
);
2311 bitmap_endwrite(conf
->mddev
->bitmap
, sh
->sector
,
2312 STRIPE_SECTORS
, 0, 0);
2315 if (test_and_clear_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2316 if (atomic_dec_and_test(&conf
->pending_full_writes
))
2317 md_wakeup_thread(conf
->mddev
->thread
);
2320 /* fetch_block5 - checks the given member device to see if its data needs
2321 * to be read or computed to satisfy a request.
2323 * Returns 1 when no more member devices need to be checked, otherwise returns
2324 * 0 to tell the loop in handle_stripe_fill5 to continue
2326 static int fetch_block5(struct stripe_head
*sh
, struct stripe_head_state
*s
,
2327 int disk_idx
, int disks
)
2329 struct r5dev
*dev
= &sh
->dev
[disk_idx
];
2330 struct r5dev
*failed_dev
= &sh
->dev
[s
->failed_num
];
2332 /* is the data in this block needed, and can we get it? */
2333 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
2334 !test_bit(R5_UPTODATE
, &dev
->flags
) &&
2336 (dev
->towrite
&& !test_bit(R5_OVERWRITE
, &dev
->flags
)) ||
2337 s
->syncing
|| s
->expanding
||
2339 (failed_dev
->toread
||
2340 (failed_dev
->towrite
&&
2341 !test_bit(R5_OVERWRITE
, &failed_dev
->flags
)))))) {
2342 /* We would like to get this block, possibly by computing it,
2343 * otherwise read it if the backing disk is insync
2345 if ((s
->uptodate
== disks
- 1) &&
2346 (s
->failed
&& disk_idx
== s
->failed_num
)) {
2347 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2348 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2349 set_bit(R5_Wantcompute
, &dev
->flags
);
2350 sh
->ops
.target
= disk_idx
;
2351 sh
->ops
.target2
= -1;
2353 /* Careful: from this point on 'uptodate' is in the eye
2354 * of raid_run_ops which services 'compute' operations
2355 * before writes. R5_Wantcompute flags a block that will
2356 * be R5_UPTODATE by the time it is needed for a
2357 * subsequent operation.
2360 return 1; /* uptodate + compute == disks */
2361 } else if (test_bit(R5_Insync
, &dev
->flags
)) {
2362 set_bit(R5_LOCKED
, &dev
->flags
);
2363 set_bit(R5_Wantread
, &dev
->flags
);
2365 pr_debug("Reading block %d (sync=%d)\n", disk_idx
,
2374 * handle_stripe_fill5 - read or compute data to satisfy pending requests.
2376 static void handle_stripe_fill5(struct stripe_head
*sh
,
2377 struct stripe_head_state
*s
, int disks
)
2381 /* look for blocks to read/compute, skip this if a compute
2382 * is already in flight, or if the stripe contents are in the
2383 * midst of changing due to a write
2385 if (!test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) && !sh
->check_state
&&
2386 !sh
->reconstruct_state
)
2387 for (i
= disks
; i
--; )
2388 if (fetch_block5(sh
, s
, i
, disks
))
2390 set_bit(STRIPE_HANDLE
, &sh
->state
);
2393 /* fetch_block6 - checks the given member device to see if its data needs
2394 * to be read or computed to satisfy a request.
2396 * Returns 1 when no more member devices need to be checked, otherwise returns
2397 * 0 to tell the loop in handle_stripe_fill6 to continue
2399 static int fetch_block6(struct stripe_head
*sh
, struct stripe_head_state
*s
,
2400 struct r6_state
*r6s
, int disk_idx
, int disks
)
2402 struct r5dev
*dev
= &sh
->dev
[disk_idx
];
2403 struct r5dev
*fdev
[2] = { &sh
->dev
[r6s
->failed_num
[0]],
2404 &sh
->dev
[r6s
->failed_num
[1]] };
2406 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
2407 !test_bit(R5_UPTODATE
, &dev
->flags
) &&
2409 (dev
->towrite
&& !test_bit(R5_OVERWRITE
, &dev
->flags
)) ||
2410 s
->syncing
|| s
->expanding
||
2412 (fdev
[0]->toread
|| s
->to_write
)) ||
2414 (fdev
[1]->toread
|| s
->to_write
)))) {
2415 /* we would like to get this block, possibly by computing it,
2416 * otherwise read it if the backing disk is insync
2418 BUG_ON(test_bit(R5_Wantcompute
, &dev
->flags
));
2419 BUG_ON(test_bit(R5_Wantread
, &dev
->flags
));
2420 if ((s
->uptodate
== disks
- 1) &&
2421 (s
->failed
&& (disk_idx
== r6s
->failed_num
[0] ||
2422 disk_idx
== r6s
->failed_num
[1]))) {
2423 /* have disk failed, and we're requested to fetch it;
2426 pr_debug("Computing stripe %llu block %d\n",
2427 (unsigned long long)sh
->sector
, disk_idx
);
2428 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2429 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2430 set_bit(R5_Wantcompute
, &dev
->flags
);
2431 sh
->ops
.target
= disk_idx
;
2432 sh
->ops
.target2
= -1; /* no 2nd target */
2436 } else if (s
->uptodate
== disks
-2 && s
->failed
>= 2) {
2437 /* Computing 2-failure is *very* expensive; only
2438 * do it if failed >= 2
2441 for (other
= disks
; other
--; ) {
2442 if (other
== disk_idx
)
2444 if (!test_bit(R5_UPTODATE
,
2445 &sh
->dev
[other
].flags
))
2449 pr_debug("Computing stripe %llu blocks %d,%d\n",
2450 (unsigned long long)sh
->sector
,
2452 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2453 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2454 set_bit(R5_Wantcompute
, &sh
->dev
[disk_idx
].flags
);
2455 set_bit(R5_Wantcompute
, &sh
->dev
[other
].flags
);
2456 sh
->ops
.target
= disk_idx
;
2457 sh
->ops
.target2
= other
;
2461 } else if (test_bit(R5_Insync
, &dev
->flags
)) {
2462 set_bit(R5_LOCKED
, &dev
->flags
);
2463 set_bit(R5_Wantread
, &dev
->flags
);
2465 pr_debug("Reading block %d (sync=%d)\n",
2466 disk_idx
, s
->syncing
);
2474 * handle_stripe_fill6 - read or compute data to satisfy pending requests.
2476 static void handle_stripe_fill6(struct stripe_head
*sh
,
2477 struct stripe_head_state
*s
, struct r6_state
*r6s
,
2482 /* look for blocks to read/compute, skip this if a compute
2483 * is already in flight, or if the stripe contents are in the
2484 * midst of changing due to a write
2486 if (!test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) && !sh
->check_state
&&
2487 !sh
->reconstruct_state
)
2488 for (i
= disks
; i
--; )
2489 if (fetch_block6(sh
, s
, r6s
, i
, disks
))
2491 set_bit(STRIPE_HANDLE
, &sh
->state
);
2495 /* handle_stripe_clean_event
2496 * any written block on an uptodate or failed drive can be returned.
2497 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
2498 * never LOCKED, so we don't need to test 'failed' directly.
2500 static void handle_stripe_clean_event(raid5_conf_t
*conf
,
2501 struct stripe_head
*sh
, int disks
, struct bio
**return_bi
)
2506 for (i
= disks
; i
--; )
2507 if (sh
->dev
[i
].written
) {
2509 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
2510 test_bit(R5_UPTODATE
, &dev
->flags
)) {
2511 /* We can return any write requests */
2512 struct bio
*wbi
, *wbi2
;
2514 pr_debug("Return write for disc %d\n", i
);
2515 spin_lock_irq(&conf
->device_lock
);
2517 dev
->written
= NULL
;
2518 while (wbi
&& wbi
->bi_sector
<
2519 dev
->sector
+ STRIPE_SECTORS
) {
2520 wbi2
= r5_next_bio(wbi
, dev
->sector
);
2521 if (!raid5_dec_bi_phys_segments(wbi
)) {
2522 md_write_end(conf
->mddev
);
2523 wbi
->bi_next
= *return_bi
;
2528 if (dev
->towrite
== NULL
)
2530 spin_unlock_irq(&conf
->device_lock
);
2532 bitmap_endwrite(conf
->mddev
->bitmap
,
2535 !test_bit(STRIPE_DEGRADED
, &sh
->state
),
2540 if (test_and_clear_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2541 if (atomic_dec_and_test(&conf
->pending_full_writes
))
2542 md_wakeup_thread(conf
->mddev
->thread
);
2545 static void handle_stripe_dirtying5(raid5_conf_t
*conf
,
2546 struct stripe_head
*sh
, struct stripe_head_state
*s
, int disks
)
2548 int rmw
= 0, rcw
= 0, i
;
2549 for (i
= disks
; i
--; ) {
2550 /* would I have to read this buffer for read_modify_write */
2551 struct r5dev
*dev
= &sh
->dev
[i
];
2552 if ((dev
->towrite
|| i
== sh
->pd_idx
) &&
2553 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2554 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2555 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2556 if (test_bit(R5_Insync
, &dev
->flags
))
2559 rmw
+= 2*disks
; /* cannot read it */
2561 /* Would I have to read this buffer for reconstruct_write */
2562 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) && i
!= sh
->pd_idx
&&
2563 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2564 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2565 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2566 if (test_bit(R5_Insync
, &dev
->flags
)) rcw
++;
2571 pr_debug("for sector %llu, rmw=%d rcw=%d\n",
2572 (unsigned long long)sh
->sector
, rmw
, rcw
);
2573 set_bit(STRIPE_HANDLE
, &sh
->state
);
2574 if (rmw
< rcw
&& rmw
> 0)
2575 /* prefer read-modify-write, but need to get some data */
2576 for (i
= disks
; i
--; ) {
2577 struct r5dev
*dev
= &sh
->dev
[i
];
2578 if ((dev
->towrite
|| i
== sh
->pd_idx
) &&
2579 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2580 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2581 test_bit(R5_Wantcompute
, &dev
->flags
)) &&
2582 test_bit(R5_Insync
, &dev
->flags
)) {
2584 test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2585 pr_debug("Read_old block "
2586 "%d for r-m-w\n", i
);
2587 set_bit(R5_LOCKED
, &dev
->flags
);
2588 set_bit(R5_Wantread
, &dev
->flags
);
2591 set_bit(STRIPE_DELAYED
, &sh
->state
);
2592 set_bit(STRIPE_HANDLE
, &sh
->state
);
2596 if (rcw
<= rmw
&& rcw
> 0)
2597 /* want reconstruct write, but need to get some data */
2598 for (i
= disks
; i
--; ) {
2599 struct r5dev
*dev
= &sh
->dev
[i
];
2600 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) &&
2602 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2603 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2604 test_bit(R5_Wantcompute
, &dev
->flags
)) &&
2605 test_bit(R5_Insync
, &dev
->flags
)) {
2607 test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2608 pr_debug("Read_old block "
2609 "%d for Reconstruct\n", i
);
2610 set_bit(R5_LOCKED
, &dev
->flags
);
2611 set_bit(R5_Wantread
, &dev
->flags
);
2614 set_bit(STRIPE_DELAYED
, &sh
->state
);
2615 set_bit(STRIPE_HANDLE
, &sh
->state
);
2619 /* now if nothing is locked, and if we have enough data,
2620 * we can start a write request
2622 /* since handle_stripe can be called at any time we need to handle the
2623 * case where a compute block operation has been submitted and then a
2624 * subsequent call wants to start a write request. raid_run_ops only
2625 * handles the case where compute block and reconstruct are requested
2626 * simultaneously. If this is not the case then new writes need to be
2627 * held off until the compute completes.
2629 if ((s
->req_compute
|| !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
)) &&
2630 (s
->locked
== 0 && (rcw
== 0 || rmw
== 0) &&
2631 !test_bit(STRIPE_BIT_DELAY
, &sh
->state
)))
2632 schedule_reconstruction(sh
, s
, rcw
== 0, 0);
2635 static void handle_stripe_dirtying6(raid5_conf_t
*conf
,
2636 struct stripe_head
*sh
, struct stripe_head_state
*s
,
2637 struct r6_state
*r6s
, int disks
)
2639 int rcw
= 0, pd_idx
= sh
->pd_idx
, i
;
2640 int qd_idx
= sh
->qd_idx
;
2642 set_bit(STRIPE_HANDLE
, &sh
->state
);
2643 for (i
= disks
; i
--; ) {
2644 struct r5dev
*dev
= &sh
->dev
[i
];
2645 /* check if we haven't enough data */
2646 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) &&
2647 i
!= pd_idx
&& i
!= qd_idx
&&
2648 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2649 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2650 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2652 if (!test_bit(R5_Insync
, &dev
->flags
))
2653 continue; /* it's a failed drive */
2656 test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2657 pr_debug("Read_old stripe %llu "
2658 "block %d for Reconstruct\n",
2659 (unsigned long long)sh
->sector
, i
);
2660 set_bit(R5_LOCKED
, &dev
->flags
);
2661 set_bit(R5_Wantread
, &dev
->flags
);
2664 pr_debug("Request delayed stripe %llu "
2665 "block %d for Reconstruct\n",
2666 (unsigned long long)sh
->sector
, i
);
2667 set_bit(STRIPE_DELAYED
, &sh
->state
);
2668 set_bit(STRIPE_HANDLE
, &sh
->state
);
2672 /* now if nothing is locked, and if we have enough data, we can start a
2675 if ((s
->req_compute
|| !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
)) &&
2676 s
->locked
== 0 && rcw
== 0 &&
2677 !test_bit(STRIPE_BIT_DELAY
, &sh
->state
)) {
2678 schedule_reconstruction(sh
, s
, 1, 0);
2682 static void handle_parity_checks5(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2683 struct stripe_head_state
*s
, int disks
)
2685 struct r5dev
*dev
= NULL
;
2687 set_bit(STRIPE_HANDLE
, &sh
->state
);
2689 switch (sh
->check_state
) {
2690 case check_state_idle
:
2691 /* start a new check operation if there are no failures */
2692 if (s
->failed
== 0) {
2693 BUG_ON(s
->uptodate
!= disks
);
2694 sh
->check_state
= check_state_run
;
2695 set_bit(STRIPE_OP_CHECK
, &s
->ops_request
);
2696 clear_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
);
2700 dev
= &sh
->dev
[s
->failed_num
];
2702 case check_state_compute_result
:
2703 sh
->check_state
= check_state_idle
;
2705 dev
= &sh
->dev
[sh
->pd_idx
];
2707 /* check that a write has not made the stripe insync */
2708 if (test_bit(STRIPE_INSYNC
, &sh
->state
))
2711 /* either failed parity check, or recovery is happening */
2712 BUG_ON(!test_bit(R5_UPTODATE
, &dev
->flags
));
2713 BUG_ON(s
->uptodate
!= disks
);
2715 set_bit(R5_LOCKED
, &dev
->flags
);
2717 set_bit(R5_Wantwrite
, &dev
->flags
);
2719 clear_bit(STRIPE_DEGRADED
, &sh
->state
);
2720 set_bit(STRIPE_INSYNC
, &sh
->state
);
2722 case check_state_run
:
2723 break; /* we will be called again upon completion */
2724 case check_state_check_result
:
2725 sh
->check_state
= check_state_idle
;
2727 /* if a failure occurred during the check operation, leave
2728 * STRIPE_INSYNC not set and let the stripe be handled again
2733 /* handle a successful check operation, if parity is correct
2734 * we are done. Otherwise update the mismatch count and repair
2735 * parity if !MD_RECOVERY_CHECK
2737 if ((sh
->ops
.zero_sum_result
& SUM_CHECK_P_RESULT
) == 0)
2738 /* parity is correct (on disc,
2739 * not in buffer any more)
2741 set_bit(STRIPE_INSYNC
, &sh
->state
);
2743 conf
->mddev
->resync_mismatches
+= STRIPE_SECTORS
;
2744 if (test_bit(MD_RECOVERY_CHECK
, &conf
->mddev
->recovery
))
2745 /* don't try to repair!! */
2746 set_bit(STRIPE_INSYNC
, &sh
->state
);
2748 sh
->check_state
= check_state_compute_run
;
2749 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2750 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2751 set_bit(R5_Wantcompute
,
2752 &sh
->dev
[sh
->pd_idx
].flags
);
2753 sh
->ops
.target
= sh
->pd_idx
;
2754 sh
->ops
.target2
= -1;
2759 case check_state_compute_run
:
2762 printk(KERN_ERR
"%s: unknown check_state: %d sector: %llu\n",
2763 __func__
, sh
->check_state
,
2764 (unsigned long long) sh
->sector
);
2770 static void handle_parity_checks6(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2771 struct stripe_head_state
*s
,
2772 struct r6_state
*r6s
, int disks
)
2774 int pd_idx
= sh
->pd_idx
;
2775 int qd_idx
= sh
->qd_idx
;
2778 set_bit(STRIPE_HANDLE
, &sh
->state
);
2780 BUG_ON(s
->failed
> 2);
2782 /* Want to check and possibly repair P and Q.
2783 * However there could be one 'failed' device, in which
2784 * case we can only check one of them, possibly using the
2785 * other to generate missing data
2788 switch (sh
->check_state
) {
2789 case check_state_idle
:
2790 /* start a new check operation if there are < 2 failures */
2791 if (s
->failed
== r6s
->q_failed
) {
2792 /* The only possible failed device holds Q, so it
2793 * makes sense to check P (If anything else were failed,
2794 * we would have used P to recreate it).
2796 sh
->check_state
= check_state_run
;
2798 if (!r6s
->q_failed
&& s
->failed
< 2) {
2799 /* Q is not failed, and we didn't use it to generate
2800 * anything, so it makes sense to check it
2802 if (sh
->check_state
== check_state_run
)
2803 sh
->check_state
= check_state_run_pq
;
2805 sh
->check_state
= check_state_run_q
;
2808 /* discard potentially stale zero_sum_result */
2809 sh
->ops
.zero_sum_result
= 0;
2811 if (sh
->check_state
== check_state_run
) {
2812 /* async_xor_zero_sum destroys the contents of P */
2813 clear_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
);
2816 if (sh
->check_state
>= check_state_run
&&
2817 sh
->check_state
<= check_state_run_pq
) {
2818 /* async_syndrome_zero_sum preserves P and Q, so
2819 * no need to mark them !uptodate here
2821 set_bit(STRIPE_OP_CHECK
, &s
->ops_request
);
2825 /* we have 2-disk failure */
2826 BUG_ON(s
->failed
!= 2);
2828 case check_state_compute_result
:
2829 sh
->check_state
= check_state_idle
;
2831 /* check that a write has not made the stripe insync */
2832 if (test_bit(STRIPE_INSYNC
, &sh
->state
))
2835 /* now write out any block on a failed drive,
2836 * or P or Q if they were recomputed
2838 BUG_ON(s
->uptodate
< disks
- 1); /* We don't need Q to recover */
2839 if (s
->failed
== 2) {
2840 dev
= &sh
->dev
[r6s
->failed_num
[1]];
2842 set_bit(R5_LOCKED
, &dev
->flags
);
2843 set_bit(R5_Wantwrite
, &dev
->flags
);
2845 if (s
->failed
>= 1) {
2846 dev
= &sh
->dev
[r6s
->failed_num
[0]];
2848 set_bit(R5_LOCKED
, &dev
->flags
);
2849 set_bit(R5_Wantwrite
, &dev
->flags
);
2851 if (sh
->ops
.zero_sum_result
& SUM_CHECK_P_RESULT
) {
2852 dev
= &sh
->dev
[pd_idx
];
2854 set_bit(R5_LOCKED
, &dev
->flags
);
2855 set_bit(R5_Wantwrite
, &dev
->flags
);
2857 if (sh
->ops
.zero_sum_result
& SUM_CHECK_Q_RESULT
) {
2858 dev
= &sh
->dev
[qd_idx
];
2860 set_bit(R5_LOCKED
, &dev
->flags
);
2861 set_bit(R5_Wantwrite
, &dev
->flags
);
2863 clear_bit(STRIPE_DEGRADED
, &sh
->state
);
2865 set_bit(STRIPE_INSYNC
, &sh
->state
);
2867 case check_state_run
:
2868 case check_state_run_q
:
2869 case check_state_run_pq
:
2870 break; /* we will be called again upon completion */
2871 case check_state_check_result
:
2872 sh
->check_state
= check_state_idle
;
2874 /* handle a successful check operation, if parity is correct
2875 * we are done. Otherwise update the mismatch count and repair
2876 * parity if !MD_RECOVERY_CHECK
2878 if (sh
->ops
.zero_sum_result
== 0) {
2879 /* both parities are correct */
2881 set_bit(STRIPE_INSYNC
, &sh
->state
);
2883 /* in contrast to the raid5 case we can validate
2884 * parity, but still have a failure to write
2887 sh
->check_state
= check_state_compute_result
;
2888 /* Returning at this point means that we may go
2889 * off and bring p and/or q uptodate again so
2890 * we make sure to check zero_sum_result again
2891 * to verify if p or q need writeback
2895 conf
->mddev
->resync_mismatches
+= STRIPE_SECTORS
;
2896 if (test_bit(MD_RECOVERY_CHECK
, &conf
->mddev
->recovery
))
2897 /* don't try to repair!! */
2898 set_bit(STRIPE_INSYNC
, &sh
->state
);
2900 int *target
= &sh
->ops
.target
;
2902 sh
->ops
.target
= -1;
2903 sh
->ops
.target2
= -1;
2904 sh
->check_state
= check_state_compute_run
;
2905 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2906 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2907 if (sh
->ops
.zero_sum_result
& SUM_CHECK_P_RESULT
) {
2908 set_bit(R5_Wantcompute
,
2909 &sh
->dev
[pd_idx
].flags
);
2911 target
= &sh
->ops
.target2
;
2914 if (sh
->ops
.zero_sum_result
& SUM_CHECK_Q_RESULT
) {
2915 set_bit(R5_Wantcompute
,
2916 &sh
->dev
[qd_idx
].flags
);
2923 case check_state_compute_run
:
2926 printk(KERN_ERR
"%s: unknown check_state: %d sector: %llu\n",
2927 __func__
, sh
->check_state
,
2928 (unsigned long long) sh
->sector
);
2933 static void handle_stripe_expansion(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2934 struct r6_state
*r6s
)
2938 /* We have read all the blocks in this stripe and now we need to
2939 * copy some of them into a target stripe for expand.
2941 struct dma_async_tx_descriptor
*tx
= NULL
;
2942 clear_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
2943 for (i
= 0; i
< sh
->disks
; i
++)
2944 if (i
!= sh
->pd_idx
&& i
!= sh
->qd_idx
) {
2946 struct stripe_head
*sh2
;
2947 struct async_submit_ctl submit
;
2949 sector_t bn
= compute_blocknr(sh
, i
, 1);
2950 sector_t s
= raid5_compute_sector(conf
, bn
, 0,
2952 sh2
= get_active_stripe(conf
, s
, 0, 1, 1);
2954 /* so far only the early blocks of this stripe
2955 * have been requested. When later blocks
2956 * get requested, we will try again
2959 if (!test_bit(STRIPE_EXPANDING
, &sh2
->state
) ||
2960 test_bit(R5_Expanded
, &sh2
->dev
[dd_idx
].flags
)) {
2961 /* must have already done this block */
2962 release_stripe(sh2
);
2966 /* place all the copies on one channel */
2967 init_async_submit(&submit
, 0, tx
, NULL
, NULL
, NULL
);
2968 tx
= async_memcpy(sh2
->dev
[dd_idx
].page
,
2969 sh
->dev
[i
].page
, 0, 0, STRIPE_SIZE
,
2972 set_bit(R5_Expanded
, &sh2
->dev
[dd_idx
].flags
);
2973 set_bit(R5_UPTODATE
, &sh2
->dev
[dd_idx
].flags
);
2974 for (j
= 0; j
< conf
->raid_disks
; j
++)
2975 if (j
!= sh2
->pd_idx
&&
2976 (!r6s
|| j
!= sh2
->qd_idx
) &&
2977 !test_bit(R5_Expanded
, &sh2
->dev
[j
].flags
))
2979 if (j
== conf
->raid_disks
) {
2980 set_bit(STRIPE_EXPAND_READY
, &sh2
->state
);
2981 set_bit(STRIPE_HANDLE
, &sh2
->state
);
2983 release_stripe(sh2
);
2986 /* done submitting copies, wait for them to complete */
2989 dma_wait_for_async_tx(tx
);
2995 * handle_stripe - do things to a stripe.
2997 * We lock the stripe and then examine the state of various bits
2998 * to see what needs to be done.
3000 * return some read request which now have data
3001 * return some write requests which are safely on disc
3002 * schedule a read on some buffers
3003 * schedule a write of some buffers
3004 * return confirmation of parity correctness
3006 * buffers are taken off read_list or write_list, and bh_cache buffers
3007 * get BH_Lock set before the stripe lock is released.
3011 static void handle_stripe5(struct stripe_head
*sh
)
3013 raid5_conf_t
*conf
= sh
->raid_conf
;
3014 int disks
= sh
->disks
, i
;
3015 struct bio
*return_bi
= NULL
;
3016 struct stripe_head_state s
;
3018 mdk_rdev_t
*blocked_rdev
= NULL
;
3020 int dec_preread_active
= 0;
3022 memset(&s
, 0, sizeof(s
));
3023 pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d "
3024 "reconstruct:%d\n", (unsigned long long)sh
->sector
, sh
->state
,
3025 atomic_read(&sh
->count
), sh
->pd_idx
, sh
->check_state
,
3026 sh
->reconstruct_state
);
3028 spin_lock(&sh
->lock
);
3029 clear_bit(STRIPE_HANDLE
, &sh
->state
);
3030 clear_bit(STRIPE_DELAYED
, &sh
->state
);
3032 s
.syncing
= test_bit(STRIPE_SYNCING
, &sh
->state
);
3033 s
.expanding
= test_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
3034 s
.expanded
= test_bit(STRIPE_EXPAND_READY
, &sh
->state
);
3036 /* Now to look around and see what can be done */
3038 for (i
=disks
; i
--; ) {
3043 pr_debug("check %d: state 0x%lx toread %p read %p write %p "
3044 "written %p\n", i
, dev
->flags
, dev
->toread
, dev
->read
,
3045 dev
->towrite
, dev
->written
);
3047 /* maybe we can request a biofill operation
3049 * new wantfill requests are only permitted while
3050 * ops_complete_biofill is guaranteed to be inactive
3052 if (test_bit(R5_UPTODATE
, &dev
->flags
) && dev
->toread
&&
3053 !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
))
3054 set_bit(R5_Wantfill
, &dev
->flags
);
3056 /* now count some things */
3057 if (test_bit(R5_LOCKED
, &dev
->flags
)) s
.locked
++;
3058 if (test_bit(R5_UPTODATE
, &dev
->flags
)) s
.uptodate
++;
3059 if (test_bit(R5_Wantcompute
, &dev
->flags
)) s
.compute
++;
3061 if (test_bit(R5_Wantfill
, &dev
->flags
))
3063 else if (dev
->toread
)
3067 if (!test_bit(R5_OVERWRITE
, &dev
->flags
))
3072 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
3073 if (blocked_rdev
== NULL
&&
3074 rdev
&& unlikely(test_bit(Blocked
, &rdev
->flags
))) {
3075 blocked_rdev
= rdev
;
3076 atomic_inc(&rdev
->nr_pending
);
3078 clear_bit(R5_Insync
, &dev
->flags
);
3081 else if (test_bit(In_sync
, &rdev
->flags
))
3082 set_bit(R5_Insync
, &dev
->flags
);
3084 /* could be in-sync depending on recovery/reshape status */
3085 if (sh
->sector
+ STRIPE_SECTORS
<= rdev
->recovery_offset
)
3086 set_bit(R5_Insync
, &dev
->flags
);
3088 if (!test_bit(R5_Insync
, &dev
->flags
)) {
3089 /* The ReadError flag will just be confusing now */
3090 clear_bit(R5_ReadError
, &dev
->flags
);
3091 clear_bit(R5_ReWrite
, &dev
->flags
);
3093 if (test_bit(R5_ReadError
, &dev
->flags
))
3094 clear_bit(R5_Insync
, &dev
->flags
);
3095 if (!test_bit(R5_Insync
, &dev
->flags
)) {
3102 if (unlikely(blocked_rdev
)) {
3103 if (s
.syncing
|| s
.expanding
|| s
.expanded
||
3104 s
.to_write
|| s
.written
) {
3105 set_bit(STRIPE_HANDLE
, &sh
->state
);
3108 /* There is nothing for the blocked_rdev to block */
3109 rdev_dec_pending(blocked_rdev
, conf
->mddev
);
3110 blocked_rdev
= NULL
;
3113 if (s
.to_fill
&& !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
)) {
3114 set_bit(STRIPE_OP_BIOFILL
, &s
.ops_request
);
3115 set_bit(STRIPE_BIOFILL_RUN
, &sh
->state
);
3118 pr_debug("locked=%d uptodate=%d to_read=%d"
3119 " to_write=%d failed=%d failed_num=%d\n",
3120 s
.locked
, s
.uptodate
, s
.to_read
, s
.to_write
,
3121 s
.failed
, s
.failed_num
);
3122 /* check if the array has lost two devices and, if so, some requests might
3125 if (s
.failed
> 1 && s
.to_read
+s
.to_write
+s
.written
)
3126 handle_failed_stripe(conf
, sh
, &s
, disks
, &return_bi
);
3127 if (s
.failed
> 1 && s
.syncing
) {
3128 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,0);
3129 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3133 /* might be able to return some write requests if the parity block
3134 * is safe, or on a failed drive
3136 dev
= &sh
->dev
[sh
->pd_idx
];
3138 ((test_bit(R5_Insync
, &dev
->flags
) &&
3139 !test_bit(R5_LOCKED
, &dev
->flags
) &&
3140 test_bit(R5_UPTODATE
, &dev
->flags
)) ||
3141 (s
.failed
== 1 && s
.failed_num
== sh
->pd_idx
)))
3142 handle_stripe_clean_event(conf
, sh
, disks
, &return_bi
);
3144 /* Now we might consider reading some blocks, either to check/generate
3145 * parity, or to satisfy requests
3146 * or to load a block that is being partially written.
3148 if (s
.to_read
|| s
.non_overwrite
||
3149 (s
.syncing
&& (s
.uptodate
+ s
.compute
< disks
)) || s
.expanding
)
3150 handle_stripe_fill5(sh
, &s
, disks
);
3152 /* Now we check to see if any write operations have recently
3156 if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_result
)
3158 if (sh
->reconstruct_state
== reconstruct_state_drain_result
||
3159 sh
->reconstruct_state
== reconstruct_state_prexor_drain_result
) {
3160 sh
->reconstruct_state
= reconstruct_state_idle
;
3162 /* All the 'written' buffers and the parity block are ready to
3163 * be written back to disk
3165 BUG_ON(!test_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
));
3166 for (i
= disks
; i
--; ) {
3168 if (test_bit(R5_LOCKED
, &dev
->flags
) &&
3169 (i
== sh
->pd_idx
|| dev
->written
)) {
3170 pr_debug("Writing block %d\n", i
);
3171 set_bit(R5_Wantwrite
, &dev
->flags
);
3174 if (!test_bit(R5_Insync
, &dev
->flags
) ||
3175 (i
== sh
->pd_idx
&& s
.failed
== 0))
3176 set_bit(STRIPE_INSYNC
, &sh
->state
);
3179 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
3180 dec_preread_active
= 1;
3183 /* Now to consider new write requests and what else, if anything
3184 * should be read. We do not handle new writes when:
3185 * 1/ A 'write' operation (copy+xor) is already in flight.
3186 * 2/ A 'check' operation is in flight, as it may clobber the parity
3189 if (s
.to_write
&& !sh
->reconstruct_state
&& !sh
->check_state
)
3190 handle_stripe_dirtying5(conf
, sh
, &s
, disks
);
3192 /* maybe we need to check and possibly fix the parity for this stripe
3193 * Any reads will already have been scheduled, so we just see if enough
3194 * data is available. The parity check is held off while parity
3195 * dependent operations are in flight.
3197 if (sh
->check_state
||
3198 (s
.syncing
&& s
.locked
== 0 &&
3199 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) &&
3200 !test_bit(STRIPE_INSYNC
, &sh
->state
)))
3201 handle_parity_checks5(conf
, sh
, &s
, disks
);
3203 if (s
.syncing
&& s
.locked
== 0 && test_bit(STRIPE_INSYNC
, &sh
->state
)) {
3204 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,1);
3205 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3208 /* If the failed drive is just a ReadError, then we might need to progress
3209 * the repair/check process
3211 if (s
.failed
== 1 && !conf
->mddev
->ro
&&
3212 test_bit(R5_ReadError
, &sh
->dev
[s
.failed_num
].flags
)
3213 && !test_bit(R5_LOCKED
, &sh
->dev
[s
.failed_num
].flags
)
3214 && test_bit(R5_UPTODATE
, &sh
->dev
[s
.failed_num
].flags
)
3216 dev
= &sh
->dev
[s
.failed_num
];
3217 if (!test_bit(R5_ReWrite
, &dev
->flags
)) {
3218 set_bit(R5_Wantwrite
, &dev
->flags
);
3219 set_bit(R5_ReWrite
, &dev
->flags
);
3220 set_bit(R5_LOCKED
, &dev
->flags
);
3223 /* let's read it back */
3224 set_bit(R5_Wantread
, &dev
->flags
);
3225 set_bit(R5_LOCKED
, &dev
->flags
);
3230 /* Finish reconstruct operations initiated by the expansion process */
3231 if (sh
->reconstruct_state
== reconstruct_state_result
) {
3232 struct stripe_head
*sh2
3233 = get_active_stripe(conf
, sh
->sector
, 1, 1, 1);
3234 if (sh2
&& test_bit(STRIPE_EXPAND_SOURCE
, &sh2
->state
)) {
3235 /* sh cannot be written until sh2 has been read.
3236 * so arrange for sh to be delayed a little
3238 set_bit(STRIPE_DELAYED
, &sh
->state
);
3239 set_bit(STRIPE_HANDLE
, &sh
->state
);
3240 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
,
3242 atomic_inc(&conf
->preread_active_stripes
);
3243 release_stripe(sh2
);
3247 release_stripe(sh2
);
3249 sh
->reconstruct_state
= reconstruct_state_idle
;
3250 clear_bit(STRIPE_EXPANDING
, &sh
->state
);
3251 for (i
= conf
->raid_disks
; i
--; ) {
3252 set_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
);
3253 set_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
3258 if (s
.expanded
&& test_bit(STRIPE_EXPANDING
, &sh
->state
) &&
3259 !sh
->reconstruct_state
) {
3260 /* Need to write out all blocks after computing parity */
3261 sh
->disks
= conf
->raid_disks
;
3262 stripe_set_idx(sh
->sector
, conf
, 0, sh
);
3263 schedule_reconstruction(sh
, &s
, 1, 1);
3264 } else if (s
.expanded
&& !sh
->reconstruct_state
&& s
.locked
== 0) {
3265 clear_bit(STRIPE_EXPAND_READY
, &sh
->state
);
3266 atomic_dec(&conf
->reshape_stripes
);
3267 wake_up(&conf
->wait_for_overlap
);
3268 md_done_sync(conf
->mddev
, STRIPE_SECTORS
, 1);
3271 if (s
.expanding
&& s
.locked
== 0 &&
3272 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
))
3273 handle_stripe_expansion(conf
, sh
, NULL
);
3276 spin_unlock(&sh
->lock
);
3278 /* wait for this device to become unblocked */
3279 if (unlikely(blocked_rdev
))
3280 md_wait_for_blocked_rdev(blocked_rdev
, conf
->mddev
);
3283 raid_run_ops(sh
, s
.ops_request
);
3287 if (dec_preread_active
) {
3288 /* We delay this until after ops_run_io so that if make_request
3289 * is waiting on a flush, it won't continue until the writes
3290 * have actually been submitted.
3292 atomic_dec(&conf
->preread_active_stripes
);
3293 if (atomic_read(&conf
->preread_active_stripes
) <
3295 md_wakeup_thread(conf
->mddev
->thread
);
3297 return_io(return_bi
);
3300 static void handle_stripe6(struct stripe_head
*sh
)
3302 raid5_conf_t
*conf
= sh
->raid_conf
;
3303 int disks
= sh
->disks
;
3304 struct bio
*return_bi
= NULL
;
3305 int i
, pd_idx
= sh
->pd_idx
, qd_idx
= sh
->qd_idx
;
3306 struct stripe_head_state s
;
3307 struct r6_state r6s
;
3308 struct r5dev
*dev
, *pdev
, *qdev
;
3309 mdk_rdev_t
*blocked_rdev
= NULL
;
3310 int dec_preread_active
= 0;
3312 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
3313 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
3314 (unsigned long long)sh
->sector
, sh
->state
,
3315 atomic_read(&sh
->count
), pd_idx
, qd_idx
,
3316 sh
->check_state
, sh
->reconstruct_state
);
3317 memset(&s
, 0, sizeof(s
));
3319 spin_lock(&sh
->lock
);
3320 clear_bit(STRIPE_HANDLE
, &sh
->state
);
3321 clear_bit(STRIPE_DELAYED
, &sh
->state
);
3323 s
.syncing
= test_bit(STRIPE_SYNCING
, &sh
->state
);
3324 s
.expanding
= test_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
3325 s
.expanded
= test_bit(STRIPE_EXPAND_READY
, &sh
->state
);
3326 /* Now to look around and see what can be done */
3329 for (i
=disks
; i
--; ) {
3333 pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
3334 i
, dev
->flags
, dev
->toread
, dev
->towrite
, dev
->written
);
3335 /* maybe we can reply to a read
3337 * new wantfill requests are only permitted while
3338 * ops_complete_biofill is guaranteed to be inactive
3340 if (test_bit(R5_UPTODATE
, &dev
->flags
) && dev
->toread
&&
3341 !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
))
3342 set_bit(R5_Wantfill
, &dev
->flags
);
3344 /* now count some things */
3345 if (test_bit(R5_LOCKED
, &dev
->flags
)) s
.locked
++;
3346 if (test_bit(R5_UPTODATE
, &dev
->flags
)) s
.uptodate
++;
3347 if (test_bit(R5_Wantcompute
, &dev
->flags
)) {
3349 BUG_ON(s
.compute
> 2);
3352 if (test_bit(R5_Wantfill
, &dev
->flags
)) {
3354 } else if (dev
->toread
)
3358 if (!test_bit(R5_OVERWRITE
, &dev
->flags
))
3363 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
3364 if (blocked_rdev
== NULL
&&
3365 rdev
&& unlikely(test_bit(Blocked
, &rdev
->flags
))) {
3366 blocked_rdev
= rdev
;
3367 atomic_inc(&rdev
->nr_pending
);
3369 clear_bit(R5_Insync
, &dev
->flags
);
3372 else if (test_bit(In_sync
, &rdev
->flags
))
3373 set_bit(R5_Insync
, &dev
->flags
);
3375 /* in sync if before recovery_offset */
3376 if (sh
->sector
+ STRIPE_SECTORS
<= rdev
->recovery_offset
)
3377 set_bit(R5_Insync
, &dev
->flags
);
3379 if (!test_bit(R5_Insync
, &dev
->flags
)) {
3380 /* The ReadError flag will just be confusing now */
3381 clear_bit(R5_ReadError
, &dev
->flags
);
3382 clear_bit(R5_ReWrite
, &dev
->flags
);
3384 if (test_bit(R5_ReadError
, &dev
->flags
))
3385 clear_bit(R5_Insync
, &dev
->flags
);
3386 if (!test_bit(R5_Insync
, &dev
->flags
)) {
3388 r6s
.failed_num
[s
.failed
] = i
;
3394 if (unlikely(blocked_rdev
)) {
3395 if (s
.syncing
|| s
.expanding
|| s
.expanded
||
3396 s
.to_write
|| s
.written
) {
3397 set_bit(STRIPE_HANDLE
, &sh
->state
);
3400 /* There is nothing for the blocked_rdev to block */
3401 rdev_dec_pending(blocked_rdev
, conf
->mddev
);
3402 blocked_rdev
= NULL
;
3405 if (s
.to_fill
&& !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
)) {
3406 set_bit(STRIPE_OP_BIOFILL
, &s
.ops_request
);
3407 set_bit(STRIPE_BIOFILL_RUN
, &sh
->state
);
3410 pr_debug("locked=%d uptodate=%d to_read=%d"
3411 " to_write=%d failed=%d failed_num=%d,%d\n",
3412 s
.locked
, s
.uptodate
, s
.to_read
, s
.to_write
, s
.failed
,
3413 r6s
.failed_num
[0], r6s
.failed_num
[1]);
3414 /* check if the array has lost >2 devices and, if so, some requests
3415 * might need to be failed
3417 if (s
.failed
> 2 && s
.to_read
+s
.to_write
+s
.written
)
3418 handle_failed_stripe(conf
, sh
, &s
, disks
, &return_bi
);
3419 if (s
.failed
> 2 && s
.syncing
) {
3420 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,0);
3421 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3426 * might be able to return some write requests if the parity blocks
3427 * are safe, or on a failed drive
3429 pdev
= &sh
->dev
[pd_idx
];
3430 r6s
.p_failed
= (s
.failed
>= 1 && r6s
.failed_num
[0] == pd_idx
)
3431 || (s
.failed
>= 2 && r6s
.failed_num
[1] == pd_idx
);
3432 qdev
= &sh
->dev
[qd_idx
];
3433 r6s
.q_failed
= (s
.failed
>= 1 && r6s
.failed_num
[0] == qd_idx
)
3434 || (s
.failed
>= 2 && r6s
.failed_num
[1] == qd_idx
);
3437 ( r6s
.p_failed
|| ((test_bit(R5_Insync
, &pdev
->flags
)
3438 && !test_bit(R5_LOCKED
, &pdev
->flags
)
3439 && test_bit(R5_UPTODATE
, &pdev
->flags
)))) &&
3440 ( r6s
.q_failed
|| ((test_bit(R5_Insync
, &qdev
->flags
)
3441 && !test_bit(R5_LOCKED
, &qdev
->flags
)
3442 && test_bit(R5_UPTODATE
, &qdev
->flags
)))))
3443 handle_stripe_clean_event(conf
, sh
, disks
, &return_bi
);
3445 /* Now we might consider reading some blocks, either to check/generate
3446 * parity, or to satisfy requests
3447 * or to load a block that is being partially written.
3449 if (s
.to_read
|| s
.non_overwrite
|| (s
.to_write
&& s
.failed
) ||
3450 (s
.syncing
&& (s
.uptodate
+ s
.compute
< disks
)) || s
.expanding
)
3451 handle_stripe_fill6(sh
, &s
, &r6s
, disks
);
3453 /* Now we check to see if any write operations have recently
3456 if (sh
->reconstruct_state
== reconstruct_state_drain_result
) {
3458 sh
->reconstruct_state
= reconstruct_state_idle
;
3459 /* All the 'written' buffers and the parity blocks are ready to
3460 * be written back to disk
3462 BUG_ON(!test_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
));
3463 BUG_ON(!test_bit(R5_UPTODATE
, &sh
->dev
[qd_idx
].flags
));
3464 for (i
= disks
; i
--; ) {
3466 if (test_bit(R5_LOCKED
, &dev
->flags
) &&
3467 (i
== sh
->pd_idx
|| i
== qd_idx
||
3469 pr_debug("Writing block %d\n", i
);
3470 BUG_ON(!test_bit(R5_UPTODATE
, &dev
->flags
));
3471 set_bit(R5_Wantwrite
, &dev
->flags
);
3472 if (!test_bit(R5_Insync
, &dev
->flags
) ||
3473 ((i
== sh
->pd_idx
|| i
== qd_idx
) &&
3475 set_bit(STRIPE_INSYNC
, &sh
->state
);
3478 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
3479 dec_preread_active
= 1;
3482 /* Now to consider new write requests and what else, if anything
3483 * should be read. We do not handle new writes when:
3484 * 1/ A 'write' operation (copy+gen_syndrome) is already in flight.
3485 * 2/ A 'check' operation is in flight, as it may clobber the parity
3488 if (s
.to_write
&& !sh
->reconstruct_state
&& !sh
->check_state
)
3489 handle_stripe_dirtying6(conf
, sh
, &s
, &r6s
, disks
);
3491 /* maybe we need to check and possibly fix the parity for this stripe
3492 * Any reads will already have been scheduled, so we just see if enough
3493 * data is available. The parity check is held off while parity
3494 * dependent operations are in flight.
3496 if (sh
->check_state
||
3497 (s
.syncing
&& s
.locked
== 0 &&
3498 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) &&
3499 !test_bit(STRIPE_INSYNC
, &sh
->state
)))
3500 handle_parity_checks6(conf
, sh
, &s
, &r6s
, disks
);
3502 if (s
.syncing
&& s
.locked
== 0 && test_bit(STRIPE_INSYNC
, &sh
->state
)) {
3503 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,1);
3504 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3507 /* If the failed drives are just a ReadError, then we might need
3508 * to progress the repair/check process
3510 if (s
.failed
<= 2 && !conf
->mddev
->ro
)
3511 for (i
= 0; i
< s
.failed
; i
++) {
3512 dev
= &sh
->dev
[r6s
.failed_num
[i
]];
3513 if (test_bit(R5_ReadError
, &dev
->flags
)
3514 && !test_bit(R5_LOCKED
, &dev
->flags
)
3515 && test_bit(R5_UPTODATE
, &dev
->flags
)
3517 if (!test_bit(R5_ReWrite
, &dev
->flags
)) {
3518 set_bit(R5_Wantwrite
, &dev
->flags
);
3519 set_bit(R5_ReWrite
, &dev
->flags
);
3520 set_bit(R5_LOCKED
, &dev
->flags
);
3523 /* let's read it back */
3524 set_bit(R5_Wantread
, &dev
->flags
);
3525 set_bit(R5_LOCKED
, &dev
->flags
);
3531 /* Finish reconstruct operations initiated by the expansion process */
3532 if (sh
->reconstruct_state
== reconstruct_state_result
) {
3533 sh
->reconstruct_state
= reconstruct_state_idle
;
3534 clear_bit(STRIPE_EXPANDING
, &sh
->state
);
3535 for (i
= conf
->raid_disks
; i
--; ) {
3536 set_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
);
3537 set_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
3542 if (s
.expanded
&& test_bit(STRIPE_EXPANDING
, &sh
->state
) &&
3543 !sh
->reconstruct_state
) {
3544 struct stripe_head
*sh2
3545 = get_active_stripe(conf
, sh
->sector
, 1, 1, 1);
3546 if (sh2
&& test_bit(STRIPE_EXPAND_SOURCE
, &sh2
->state
)) {
3547 /* sh cannot be written until sh2 has been read.
3548 * so arrange for sh to be delayed a little
3550 set_bit(STRIPE_DELAYED
, &sh
->state
);
3551 set_bit(STRIPE_HANDLE
, &sh
->state
);
3552 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
,
3554 atomic_inc(&conf
->preread_active_stripes
);
3555 release_stripe(sh2
);
3559 release_stripe(sh2
);
3561 /* Need to write out all blocks after computing P&Q */
3562 sh
->disks
= conf
->raid_disks
;
3563 stripe_set_idx(sh
->sector
, conf
, 0, sh
);
3564 schedule_reconstruction(sh
, &s
, 1, 1);
3565 } else if (s
.expanded
&& !sh
->reconstruct_state
&& s
.locked
== 0) {
3566 clear_bit(STRIPE_EXPAND_READY
, &sh
->state
);
3567 atomic_dec(&conf
->reshape_stripes
);
3568 wake_up(&conf
->wait_for_overlap
);
3569 md_done_sync(conf
->mddev
, STRIPE_SECTORS
, 1);
3572 if (s
.expanding
&& s
.locked
== 0 &&
3573 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
))
3574 handle_stripe_expansion(conf
, sh
, &r6s
);
3577 spin_unlock(&sh
->lock
);
3579 /* wait for this device to become unblocked */
3580 if (unlikely(blocked_rdev
))
3581 md_wait_for_blocked_rdev(blocked_rdev
, conf
->mddev
);
3584 raid_run_ops(sh
, s
.ops_request
);
3589 if (dec_preread_active
) {
3590 /* We delay this until after ops_run_io so that if make_request
3591 * is waiting on a flush, it won't continue until the writes
3592 * have actually been submitted.
3594 atomic_dec(&conf
->preread_active_stripes
);
3595 if (atomic_read(&conf
->preread_active_stripes
) <
3597 md_wakeup_thread(conf
->mddev
->thread
);
3600 return_io(return_bi
);
3603 static void handle_stripe(struct stripe_head
*sh
)
3605 if (sh
->raid_conf
->level
== 6)
3611 static void raid5_activate_delayed(raid5_conf_t
*conf
)
3613 if (atomic_read(&conf
->preread_active_stripes
) < IO_THRESHOLD
) {
3614 while (!list_empty(&conf
->delayed_list
)) {
3615 struct list_head
*l
= conf
->delayed_list
.next
;
3616 struct stripe_head
*sh
;
3617 sh
= list_entry(l
, struct stripe_head
, lru
);
3619 clear_bit(STRIPE_DELAYED
, &sh
->state
);
3620 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
3621 atomic_inc(&conf
->preread_active_stripes
);
3622 list_add_tail(&sh
->lru
, &conf
->hold_list
);
3627 static void activate_bit_delay(raid5_conf_t
*conf
)
3629 /* device_lock is held */
3630 struct list_head head
;
3631 list_add(&head
, &conf
->bitmap_list
);
3632 list_del_init(&conf
->bitmap_list
);
3633 while (!list_empty(&head
)) {
3634 struct stripe_head
*sh
= list_entry(head
.next
, struct stripe_head
, lru
);
3635 list_del_init(&sh
->lru
);
3636 atomic_inc(&sh
->count
);
3637 __release_stripe(conf
, sh
);
3641 int md_raid5_congested(mddev_t
*mddev
, int bits
)
3643 raid5_conf_t
*conf
= mddev
->private;
3645 /* No difference between reads and writes. Just check
3646 * how busy the stripe_cache is
3649 if (conf
->inactive_blocked
)
3653 if (list_empty_careful(&conf
->inactive_list
))
3658 EXPORT_SYMBOL_GPL(md_raid5_congested
);
3660 static int raid5_congested(void *data
, int bits
)
3662 mddev_t
*mddev
= data
;
3664 return mddev_congested(mddev
, bits
) ||
3665 md_raid5_congested(mddev
, bits
);
3668 /* We want read requests to align with chunks where possible,
3669 * but write requests don't need to.
3671 static int raid5_mergeable_bvec(struct request_queue
*q
,
3672 struct bvec_merge_data
*bvm
,
3673 struct bio_vec
*biovec
)
3675 mddev_t
*mddev
= q
->queuedata
;
3676 sector_t sector
= bvm
->bi_sector
+ get_start_sect(bvm
->bi_bdev
);
3678 unsigned int chunk_sectors
= mddev
->chunk_sectors
;
3679 unsigned int bio_sectors
= bvm
->bi_size
>> 9;
3681 if ((bvm
->bi_rw
& 1) == WRITE
)
3682 return biovec
->bv_len
; /* always allow writes to be mergeable */
3684 if (mddev
->new_chunk_sectors
< mddev
->chunk_sectors
)
3685 chunk_sectors
= mddev
->new_chunk_sectors
;
3686 max
= (chunk_sectors
- ((sector
& (chunk_sectors
- 1)) + bio_sectors
)) << 9;
3687 if (max
< 0) max
= 0;
3688 if (max
<= biovec
->bv_len
&& bio_sectors
== 0)
3689 return biovec
->bv_len
;
3695 static int in_chunk_boundary(mddev_t
*mddev
, struct bio
*bio
)
3697 sector_t sector
= bio
->bi_sector
+ get_start_sect(bio
->bi_bdev
);
3698 unsigned int chunk_sectors
= mddev
->chunk_sectors
;
3699 unsigned int bio_sectors
= bio
->bi_size
>> 9;
3701 if (mddev
->new_chunk_sectors
< mddev
->chunk_sectors
)
3702 chunk_sectors
= mddev
->new_chunk_sectors
;
3703 return chunk_sectors
>=
3704 ((sector
& (chunk_sectors
- 1)) + bio_sectors
);
3708 * add bio to the retry LIFO ( in O(1) ... we are in interrupt )
3709 * later sampled by raid5d.
3711 static void add_bio_to_retry(struct bio
*bi
,raid5_conf_t
*conf
)
3713 unsigned long flags
;
3715 spin_lock_irqsave(&conf
->device_lock
, flags
);
3717 bi
->bi_next
= conf
->retry_read_aligned_list
;
3718 conf
->retry_read_aligned_list
= bi
;
3720 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
3721 md_wakeup_thread(conf
->mddev
->thread
);
3725 static struct bio
*remove_bio_from_retry(raid5_conf_t
*conf
)
3729 bi
= conf
->retry_read_aligned
;
3731 conf
->retry_read_aligned
= NULL
;
3734 bi
= conf
->retry_read_aligned_list
;
3736 conf
->retry_read_aligned_list
= bi
->bi_next
;
3739 * this sets the active strip count to 1 and the processed
3740 * strip count to zero (upper 8 bits)
3742 bi
->bi_phys_segments
= 1; /* biased count of active stripes */
3750 * The "raid5_align_endio" should check if the read succeeded and if it
3751 * did, call bio_endio on the original bio (having bio_put the new bio
3753 * If the read failed..
3755 static void raid5_align_endio(struct bio
*bi
, int error
)
3757 struct bio
* raid_bi
= bi
->bi_private
;
3760 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
3765 rdev
= (void*)raid_bi
->bi_next
;
3766 raid_bi
->bi_next
= NULL
;
3767 mddev
= rdev
->mddev
;
3768 conf
= mddev
->private;
3770 rdev_dec_pending(rdev
, conf
->mddev
);
3772 if (!error
&& uptodate
) {
3773 bio_endio(raid_bi
, 0);
3774 if (atomic_dec_and_test(&conf
->active_aligned_reads
))
3775 wake_up(&conf
->wait_for_stripe
);
3780 pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
3782 add_bio_to_retry(raid_bi
, conf
);
3785 static int bio_fits_rdev(struct bio
*bi
)
3787 struct request_queue
*q
= bdev_get_queue(bi
->bi_bdev
);
3789 if ((bi
->bi_size
>>9) > queue_max_sectors(q
))
3791 blk_recount_segments(q
, bi
);
3792 if (bi
->bi_phys_segments
> queue_max_segments(q
))
3795 if (q
->merge_bvec_fn
)
3796 /* it's too hard to apply the merge_bvec_fn at this stage,
3805 static int chunk_aligned_read(mddev_t
*mddev
, struct bio
* raid_bio
)
3807 raid5_conf_t
*conf
= mddev
->private;
3809 struct bio
* align_bi
;
3812 if (!in_chunk_boundary(mddev
, raid_bio
)) {
3813 pr_debug("chunk_aligned_read : non aligned\n");
3817 * use bio_clone_mddev to make a copy of the bio
3819 align_bi
= bio_clone_mddev(raid_bio
, GFP_NOIO
, mddev
);
3823 * set bi_end_io to a new function, and set bi_private to the
3826 align_bi
->bi_end_io
= raid5_align_endio
;
3827 align_bi
->bi_private
= raid_bio
;
3831 align_bi
->bi_sector
= raid5_compute_sector(conf
, raid_bio
->bi_sector
,
3836 rdev
= rcu_dereference(conf
->disks
[dd_idx
].rdev
);
3837 if (rdev
&& test_bit(In_sync
, &rdev
->flags
)) {
3838 atomic_inc(&rdev
->nr_pending
);
3840 raid_bio
->bi_next
= (void*)rdev
;
3841 align_bi
->bi_bdev
= rdev
->bdev
;
3842 align_bi
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
3843 align_bi
->bi_sector
+= rdev
->data_offset
;
3845 if (!bio_fits_rdev(align_bi
)) {
3846 /* too big in some way */
3848 rdev_dec_pending(rdev
, mddev
);
3852 spin_lock_irq(&conf
->device_lock
);
3853 wait_event_lock_irq(conf
->wait_for_stripe
,
3855 conf
->device_lock
, /* nothing */);
3856 atomic_inc(&conf
->active_aligned_reads
);
3857 spin_unlock_irq(&conf
->device_lock
);
3859 generic_make_request(align_bi
);
3868 /* __get_priority_stripe - get the next stripe to process
3870 * Full stripe writes are allowed to pass preread active stripes up until
3871 * the bypass_threshold is exceeded. In general the bypass_count
3872 * increments when the handle_list is handled before the hold_list; however, it
3873 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
3874 * stripe with in flight i/o. The bypass_count will be reset when the
3875 * head of the hold_list has changed, i.e. the head was promoted to the
3878 static struct stripe_head
*__get_priority_stripe(raid5_conf_t
*conf
)
3880 struct stripe_head
*sh
;
3882 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
3884 list_empty(&conf
->handle_list
) ? "empty" : "busy",
3885 list_empty(&conf
->hold_list
) ? "empty" : "busy",
3886 atomic_read(&conf
->pending_full_writes
), conf
->bypass_count
);
3888 if (!list_empty(&conf
->handle_list
)) {
3889 sh
= list_entry(conf
->handle_list
.next
, typeof(*sh
), lru
);
3891 if (list_empty(&conf
->hold_list
))
3892 conf
->bypass_count
= 0;
3893 else if (!test_bit(STRIPE_IO_STARTED
, &sh
->state
)) {
3894 if (conf
->hold_list
.next
== conf
->last_hold
)
3895 conf
->bypass_count
++;
3897 conf
->last_hold
= conf
->hold_list
.next
;
3898 conf
->bypass_count
-= conf
->bypass_threshold
;
3899 if (conf
->bypass_count
< 0)
3900 conf
->bypass_count
= 0;
3903 } else if (!list_empty(&conf
->hold_list
) &&
3904 ((conf
->bypass_threshold
&&
3905 conf
->bypass_count
> conf
->bypass_threshold
) ||
3906 atomic_read(&conf
->pending_full_writes
) == 0)) {
3907 sh
= list_entry(conf
->hold_list
.next
,
3909 conf
->bypass_count
-= conf
->bypass_threshold
;
3910 if (conf
->bypass_count
< 0)
3911 conf
->bypass_count
= 0;
3915 list_del_init(&sh
->lru
);
3916 atomic_inc(&sh
->count
);
3917 BUG_ON(atomic_read(&sh
->count
) != 1);
3921 static int make_request(mddev_t
*mddev
, struct bio
* bi
)
3923 raid5_conf_t
*conf
= mddev
->private;
3925 sector_t new_sector
;
3926 sector_t logical_sector
, last_sector
;
3927 struct stripe_head
*sh
;
3928 const int rw
= bio_data_dir(bi
);
3932 if (unlikely(bi
->bi_rw
& REQ_FLUSH
)) {
3933 md_flush_request(mddev
, bi
);
3937 md_write_start(mddev
, bi
);
3940 mddev
->reshape_position
== MaxSector
&&
3941 chunk_aligned_read(mddev
,bi
))
3944 logical_sector
= bi
->bi_sector
& ~((sector_t
)STRIPE_SECTORS
-1);
3945 last_sector
= bi
->bi_sector
+ (bi
->bi_size
>>9);
3947 bi
->bi_phys_segments
= 1; /* over-loaded to count active stripes */
3949 plugged
= mddev_check_plugged(mddev
);
3950 for (;logical_sector
< last_sector
; logical_sector
+= STRIPE_SECTORS
) {
3952 int disks
, data_disks
;
3957 disks
= conf
->raid_disks
;
3958 prepare_to_wait(&conf
->wait_for_overlap
, &w
, TASK_UNINTERRUPTIBLE
);
3959 if (unlikely(conf
->reshape_progress
!= MaxSector
)) {
3960 /* spinlock is needed as reshape_progress may be
3961 * 64bit on a 32bit platform, and so it might be
3962 * possible to see a half-updated value
3963 * Ofcourse reshape_progress could change after
3964 * the lock is dropped, so once we get a reference
3965 * to the stripe that we think it is, we will have
3968 spin_lock_irq(&conf
->device_lock
);
3969 if (mddev
->delta_disks
< 0
3970 ? logical_sector
< conf
->reshape_progress
3971 : logical_sector
>= conf
->reshape_progress
) {
3972 disks
= conf
->previous_raid_disks
;
3975 if (mddev
->delta_disks
< 0
3976 ? logical_sector
< conf
->reshape_safe
3977 : logical_sector
>= conf
->reshape_safe
) {
3978 spin_unlock_irq(&conf
->device_lock
);
3983 spin_unlock_irq(&conf
->device_lock
);
3985 data_disks
= disks
- conf
->max_degraded
;
3987 new_sector
= raid5_compute_sector(conf
, logical_sector
,
3990 pr_debug("raid456: make_request, sector %llu logical %llu\n",
3991 (unsigned long long)new_sector
,
3992 (unsigned long long)logical_sector
);
3994 sh
= get_active_stripe(conf
, new_sector
, previous
,
3995 (bi
->bi_rw
&RWA_MASK
), 0);
3997 if (unlikely(previous
)) {
3998 /* expansion might have moved on while waiting for a
3999 * stripe, so we must do the range check again.
4000 * Expansion could still move past after this
4001 * test, but as we are holding a reference to
4002 * 'sh', we know that if that happens,
4003 * STRIPE_EXPANDING will get set and the expansion
4004 * won't proceed until we finish with the stripe.
4007 spin_lock_irq(&conf
->device_lock
);
4008 if (mddev
->delta_disks
< 0
4009 ? logical_sector
>= conf
->reshape_progress
4010 : logical_sector
< conf
->reshape_progress
)
4011 /* mismatch, need to try again */
4013 spin_unlock_irq(&conf
->device_lock
);
4021 if (bio_data_dir(bi
) == WRITE
&&
4022 logical_sector
>= mddev
->suspend_lo
&&
4023 logical_sector
< mddev
->suspend_hi
) {
4025 /* As the suspend_* range is controlled by
4026 * userspace, we want an interruptible
4029 flush_signals(current
);
4030 prepare_to_wait(&conf
->wait_for_overlap
,
4031 &w
, TASK_INTERRUPTIBLE
);
4032 if (logical_sector
>= mddev
->suspend_lo
&&
4033 logical_sector
< mddev
->suspend_hi
)
4038 if (test_bit(STRIPE_EXPANDING
, &sh
->state
) ||
4039 !add_stripe_bio(sh
, bi
, dd_idx
, (bi
->bi_rw
&RW_MASK
))) {
4040 /* Stripe is busy expanding or
4041 * add failed due to overlap. Flush everything
4044 md_wakeup_thread(mddev
->thread
);
4049 finish_wait(&conf
->wait_for_overlap
, &w
);
4050 set_bit(STRIPE_HANDLE
, &sh
->state
);
4051 clear_bit(STRIPE_DELAYED
, &sh
->state
);
4052 if ((bi
->bi_rw
& REQ_SYNC
) &&
4053 !test_and_set_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
4054 atomic_inc(&conf
->preread_active_stripes
);
4057 /* cannot get stripe for read-ahead, just give-up */
4058 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
4059 finish_wait(&conf
->wait_for_overlap
, &w
);
4065 md_wakeup_thread(mddev
->thread
);
4067 spin_lock_irq(&conf
->device_lock
);
4068 remaining
= raid5_dec_bi_phys_segments(bi
);
4069 spin_unlock_irq(&conf
->device_lock
);
4070 if (remaining
== 0) {
4073 md_write_end(mddev
);
4081 static sector_t
raid5_size(mddev_t
*mddev
, sector_t sectors
, int raid_disks
);
4083 static sector_t
reshape_request(mddev_t
*mddev
, sector_t sector_nr
, int *skipped
)
4085 /* reshaping is quite different to recovery/resync so it is
4086 * handled quite separately ... here.
4088 * On each call to sync_request, we gather one chunk worth of
4089 * destination stripes and flag them as expanding.
4090 * Then we find all the source stripes and request reads.
4091 * As the reads complete, handle_stripe will copy the data
4092 * into the destination stripe and release that stripe.
4094 raid5_conf_t
*conf
= mddev
->private;
4095 struct stripe_head
*sh
;
4096 sector_t first_sector
, last_sector
;
4097 int raid_disks
= conf
->previous_raid_disks
;
4098 int data_disks
= raid_disks
- conf
->max_degraded
;
4099 int new_data_disks
= conf
->raid_disks
- conf
->max_degraded
;
4102 sector_t writepos
, readpos
, safepos
;
4103 sector_t stripe_addr
;
4104 int reshape_sectors
;
4105 struct list_head stripes
;
4107 if (sector_nr
== 0) {
4108 /* If restarting in the middle, skip the initial sectors */
4109 if (mddev
->delta_disks
< 0 &&
4110 conf
->reshape_progress
< raid5_size(mddev
, 0, 0)) {
4111 sector_nr
= raid5_size(mddev
, 0, 0)
4112 - conf
->reshape_progress
;
4113 } else if (mddev
->delta_disks
>= 0 &&
4114 conf
->reshape_progress
> 0)
4115 sector_nr
= conf
->reshape_progress
;
4116 sector_div(sector_nr
, new_data_disks
);
4118 mddev
->curr_resync_completed
= sector_nr
;
4119 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
4125 /* We need to process a full chunk at a time.
4126 * If old and new chunk sizes differ, we need to process the
4129 if (mddev
->new_chunk_sectors
> mddev
->chunk_sectors
)
4130 reshape_sectors
= mddev
->new_chunk_sectors
;
4132 reshape_sectors
= mddev
->chunk_sectors
;
4134 /* we update the metadata when there is more than 3Meg
4135 * in the block range (that is rather arbitrary, should
4136 * probably be time based) or when the data about to be
4137 * copied would over-write the source of the data at
4138 * the front of the range.
4139 * i.e. one new_stripe along from reshape_progress new_maps
4140 * to after where reshape_safe old_maps to
4142 writepos
= conf
->reshape_progress
;
4143 sector_div(writepos
, new_data_disks
);
4144 readpos
= conf
->reshape_progress
;
4145 sector_div(readpos
, data_disks
);
4146 safepos
= conf
->reshape_safe
;
4147 sector_div(safepos
, data_disks
);
4148 if (mddev
->delta_disks
< 0) {
4149 writepos
-= min_t(sector_t
, reshape_sectors
, writepos
);
4150 readpos
+= reshape_sectors
;
4151 safepos
+= reshape_sectors
;
4153 writepos
+= reshape_sectors
;
4154 readpos
-= min_t(sector_t
, reshape_sectors
, readpos
);
4155 safepos
-= min_t(sector_t
, reshape_sectors
, safepos
);
4158 /* 'writepos' is the most advanced device address we might write.
4159 * 'readpos' is the least advanced device address we might read.
4160 * 'safepos' is the least address recorded in the metadata as having
4162 * If 'readpos' is behind 'writepos', then there is no way that we can
4163 * ensure safety in the face of a crash - that must be done by userspace
4164 * making a backup of the data. So in that case there is no particular
4165 * rush to update metadata.
4166 * Otherwise if 'safepos' is behind 'writepos', then we really need to
4167 * update the metadata to advance 'safepos' to match 'readpos' so that
4168 * we can be safe in the event of a crash.
4169 * So we insist on updating metadata if safepos is behind writepos and
4170 * readpos is beyond writepos.
4171 * In any case, update the metadata every 10 seconds.
4172 * Maybe that number should be configurable, but I'm not sure it is
4173 * worth it.... maybe it could be a multiple of safemode_delay???
4175 if ((mddev
->delta_disks
< 0
4176 ? (safepos
> writepos
&& readpos
< writepos
)
4177 : (safepos
< writepos
&& readpos
> writepos
)) ||
4178 time_after(jiffies
, conf
->reshape_checkpoint
+ 10*HZ
)) {
4179 /* Cannot proceed until we've updated the superblock... */
4180 wait_event(conf
->wait_for_overlap
,
4181 atomic_read(&conf
->reshape_stripes
)==0);
4182 mddev
->reshape_position
= conf
->reshape_progress
;
4183 mddev
->curr_resync_completed
= sector_nr
;
4184 conf
->reshape_checkpoint
= jiffies
;
4185 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
4186 md_wakeup_thread(mddev
->thread
);
4187 wait_event(mddev
->sb_wait
, mddev
->flags
== 0 ||
4188 kthread_should_stop());
4189 spin_lock_irq(&conf
->device_lock
);
4190 conf
->reshape_safe
= mddev
->reshape_position
;
4191 spin_unlock_irq(&conf
->device_lock
);
4192 wake_up(&conf
->wait_for_overlap
);
4193 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
4196 if (mddev
->delta_disks
< 0) {
4197 BUG_ON(conf
->reshape_progress
== 0);
4198 stripe_addr
= writepos
;
4199 BUG_ON((mddev
->dev_sectors
&
4200 ~((sector_t
)reshape_sectors
- 1))
4201 - reshape_sectors
- stripe_addr
4204 BUG_ON(writepos
!= sector_nr
+ reshape_sectors
);
4205 stripe_addr
= sector_nr
;
4207 INIT_LIST_HEAD(&stripes
);
4208 for (i
= 0; i
< reshape_sectors
; i
+= STRIPE_SECTORS
) {
4210 int skipped_disk
= 0;
4211 sh
= get_active_stripe(conf
, stripe_addr
+i
, 0, 0, 1);
4212 set_bit(STRIPE_EXPANDING
, &sh
->state
);
4213 atomic_inc(&conf
->reshape_stripes
);
4214 /* If any of this stripe is beyond the end of the old
4215 * array, then we need to zero those blocks
4217 for (j
=sh
->disks
; j
--;) {
4219 if (j
== sh
->pd_idx
)
4221 if (conf
->level
== 6 &&
4224 s
= compute_blocknr(sh
, j
, 0);
4225 if (s
< raid5_size(mddev
, 0, 0)) {
4229 memset(page_address(sh
->dev
[j
].page
), 0, STRIPE_SIZE
);
4230 set_bit(R5_Expanded
, &sh
->dev
[j
].flags
);
4231 set_bit(R5_UPTODATE
, &sh
->dev
[j
].flags
);
4233 if (!skipped_disk
) {
4234 set_bit(STRIPE_EXPAND_READY
, &sh
->state
);
4235 set_bit(STRIPE_HANDLE
, &sh
->state
);
4237 list_add(&sh
->lru
, &stripes
);
4239 spin_lock_irq(&conf
->device_lock
);
4240 if (mddev
->delta_disks
< 0)
4241 conf
->reshape_progress
-= reshape_sectors
* new_data_disks
;
4243 conf
->reshape_progress
+= reshape_sectors
* new_data_disks
;
4244 spin_unlock_irq(&conf
->device_lock
);
4245 /* Ok, those stripe are ready. We can start scheduling
4246 * reads on the source stripes.
4247 * The source stripes are determined by mapping the first and last
4248 * block on the destination stripes.
4251 raid5_compute_sector(conf
, stripe_addr
*(new_data_disks
),
4254 raid5_compute_sector(conf
, ((stripe_addr
+reshape_sectors
)
4255 * new_data_disks
- 1),
4257 if (last_sector
>= mddev
->dev_sectors
)
4258 last_sector
= mddev
->dev_sectors
- 1;
4259 while (first_sector
<= last_sector
) {
4260 sh
= get_active_stripe(conf
, first_sector
, 1, 0, 1);
4261 set_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
4262 set_bit(STRIPE_HANDLE
, &sh
->state
);
4264 first_sector
+= STRIPE_SECTORS
;
4266 /* Now that the sources are clearly marked, we can release
4267 * the destination stripes
4269 while (!list_empty(&stripes
)) {
4270 sh
= list_entry(stripes
.next
, struct stripe_head
, lru
);
4271 list_del_init(&sh
->lru
);
4274 /* If this takes us to the resync_max point where we have to pause,
4275 * then we need to write out the superblock.
4277 sector_nr
+= reshape_sectors
;
4278 if ((sector_nr
- mddev
->curr_resync_completed
) * 2
4279 >= mddev
->resync_max
- mddev
->curr_resync_completed
) {
4280 /* Cannot proceed until we've updated the superblock... */
4281 wait_event(conf
->wait_for_overlap
,
4282 atomic_read(&conf
->reshape_stripes
) == 0);
4283 mddev
->reshape_position
= conf
->reshape_progress
;
4284 mddev
->curr_resync_completed
= sector_nr
;
4285 conf
->reshape_checkpoint
= jiffies
;
4286 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
4287 md_wakeup_thread(mddev
->thread
);
4288 wait_event(mddev
->sb_wait
,
4289 !test_bit(MD_CHANGE_DEVS
, &mddev
->flags
)
4290 || kthread_should_stop());
4291 spin_lock_irq(&conf
->device_lock
);
4292 conf
->reshape_safe
= mddev
->reshape_position
;
4293 spin_unlock_irq(&conf
->device_lock
);
4294 wake_up(&conf
->wait_for_overlap
);
4295 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
4297 return reshape_sectors
;
4300 /* FIXME go_faster isn't used */
4301 static inline sector_t
sync_request(mddev_t
*mddev
, sector_t sector_nr
, int *skipped
, int go_faster
)
4303 raid5_conf_t
*conf
= mddev
->private;
4304 struct stripe_head
*sh
;
4305 sector_t max_sector
= mddev
->dev_sectors
;
4306 sector_t sync_blocks
;
4307 int still_degraded
= 0;
4310 if (sector_nr
>= max_sector
) {
4311 /* just being told to finish up .. nothing much to do */
4313 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
)) {
4318 if (mddev
->curr_resync
< max_sector
) /* aborted */
4319 bitmap_end_sync(mddev
->bitmap
, mddev
->curr_resync
,
4321 else /* completed sync */
4323 bitmap_close_sync(mddev
->bitmap
);
4328 /* Allow raid5_quiesce to complete */
4329 wait_event(conf
->wait_for_overlap
, conf
->quiesce
!= 2);
4331 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
4332 return reshape_request(mddev
, sector_nr
, skipped
);
4334 /* No need to check resync_max as we never do more than one
4335 * stripe, and as resync_max will always be on a chunk boundary,
4336 * if the check in md_do_sync didn't fire, there is no chance
4337 * of overstepping resync_max here
4340 /* if there is too many failed drives and we are trying
4341 * to resync, then assert that we are finished, because there is
4342 * nothing we can do.
4344 if (mddev
->degraded
>= conf
->max_degraded
&&
4345 test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
4346 sector_t rv
= mddev
->dev_sectors
- sector_nr
;
4350 if (!bitmap_start_sync(mddev
->bitmap
, sector_nr
, &sync_blocks
, 1) &&
4351 !test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
) &&
4352 !conf
->fullsync
&& sync_blocks
>= STRIPE_SECTORS
) {
4353 /* we can skip this block, and probably more */
4354 sync_blocks
/= STRIPE_SECTORS
;
4356 return sync_blocks
* STRIPE_SECTORS
; /* keep things rounded to whole stripes */
4360 bitmap_cond_end_sync(mddev
->bitmap
, sector_nr
);
4362 sh
= get_active_stripe(conf
, sector_nr
, 0, 1, 0);
4364 sh
= get_active_stripe(conf
, sector_nr
, 0, 0, 0);
4365 /* make sure we don't swamp the stripe cache if someone else
4366 * is trying to get access
4368 schedule_timeout_uninterruptible(1);
4370 /* Need to check if array will still be degraded after recovery/resync
4371 * We don't need to check the 'failed' flag as when that gets set,
4374 for (i
= 0; i
< conf
->raid_disks
; i
++)
4375 if (conf
->disks
[i
].rdev
== NULL
)
4378 bitmap_start_sync(mddev
->bitmap
, sector_nr
, &sync_blocks
, still_degraded
);
4380 spin_lock(&sh
->lock
);
4381 set_bit(STRIPE_SYNCING
, &sh
->state
);
4382 clear_bit(STRIPE_INSYNC
, &sh
->state
);
4383 spin_unlock(&sh
->lock
);
4388 return STRIPE_SECTORS
;
4391 static int retry_aligned_read(raid5_conf_t
*conf
, struct bio
*raid_bio
)
4393 /* We may not be able to submit a whole bio at once as there
4394 * may not be enough stripe_heads available.
4395 * We cannot pre-allocate enough stripe_heads as we may need
4396 * more than exist in the cache (if we allow ever large chunks).
4397 * So we do one stripe head at a time and record in
4398 * ->bi_hw_segments how many have been done.
4400 * We *know* that this entire raid_bio is in one chunk, so
4401 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
4403 struct stripe_head
*sh
;
4405 sector_t sector
, logical_sector
, last_sector
;
4410 logical_sector
= raid_bio
->bi_sector
& ~((sector_t
)STRIPE_SECTORS
-1);
4411 sector
= raid5_compute_sector(conf
, logical_sector
,
4413 last_sector
= raid_bio
->bi_sector
+ (raid_bio
->bi_size
>>9);
4415 for (; logical_sector
< last_sector
;
4416 logical_sector
+= STRIPE_SECTORS
,
4417 sector
+= STRIPE_SECTORS
,
4420 if (scnt
< raid5_bi_hw_segments(raid_bio
))
4421 /* already done this stripe */
4424 sh
= get_active_stripe(conf
, sector
, 0, 1, 0);
4427 /* failed to get a stripe - must wait */
4428 raid5_set_bi_hw_segments(raid_bio
, scnt
);
4429 conf
->retry_read_aligned
= raid_bio
;
4433 set_bit(R5_ReadError
, &sh
->dev
[dd_idx
].flags
);
4434 if (!add_stripe_bio(sh
, raid_bio
, dd_idx
, 0)) {
4436 raid5_set_bi_hw_segments(raid_bio
, scnt
);
4437 conf
->retry_read_aligned
= raid_bio
;
4445 spin_lock_irq(&conf
->device_lock
);
4446 remaining
= raid5_dec_bi_phys_segments(raid_bio
);
4447 spin_unlock_irq(&conf
->device_lock
);
4449 bio_endio(raid_bio
, 0);
4450 if (atomic_dec_and_test(&conf
->active_aligned_reads
))
4451 wake_up(&conf
->wait_for_stripe
);
4457 * This is our raid5 kernel thread.
4459 * We scan the hash table for stripes which can be handled now.
4460 * During the scan, completed stripes are saved for us by the interrupt
4461 * handler, so that they will not have to wait for our next wakeup.
4463 static void raid5d(mddev_t
*mddev
)
4465 struct stripe_head
*sh
;
4466 raid5_conf_t
*conf
= mddev
->private;
4468 struct blk_plug plug
;
4470 pr_debug("+++ raid5d active\n");
4472 md_check_recovery(mddev
);
4474 blk_start_plug(&plug
);
4476 spin_lock_irq(&conf
->device_lock
);
4480 if (atomic_read(&mddev
->plug_cnt
) == 0 &&
4481 !list_empty(&conf
->bitmap_list
)) {
4482 /* Now is a good time to flush some bitmap updates */
4484 spin_unlock_irq(&conf
->device_lock
);
4485 bitmap_unplug(mddev
->bitmap
);
4486 spin_lock_irq(&conf
->device_lock
);
4487 conf
->seq_write
= conf
->seq_flush
;
4488 activate_bit_delay(conf
);
4490 if (atomic_read(&mddev
->plug_cnt
) == 0)
4491 raid5_activate_delayed(conf
);
4493 while ((bio
= remove_bio_from_retry(conf
))) {
4495 spin_unlock_irq(&conf
->device_lock
);
4496 ok
= retry_aligned_read(conf
, bio
);
4497 spin_lock_irq(&conf
->device_lock
);
4503 sh
= __get_priority_stripe(conf
);
4507 spin_unlock_irq(&conf
->device_lock
);
4514 spin_lock_irq(&conf
->device_lock
);
4516 pr_debug("%d stripes handled\n", handled
);
4518 spin_unlock_irq(&conf
->device_lock
);
4520 async_tx_issue_pending_all();
4521 blk_finish_plug(&plug
);
4523 pr_debug("--- raid5d inactive\n");
4527 raid5_show_stripe_cache_size(mddev_t
*mddev
, char *page
)
4529 raid5_conf_t
*conf
= mddev
->private;
4531 return sprintf(page
, "%d\n", conf
->max_nr_stripes
);
4537 raid5_set_cache_size(mddev_t
*mddev
, int size
)
4539 raid5_conf_t
*conf
= mddev
->private;
4542 if (size
<= 16 || size
> 32768)
4544 while (size
< conf
->max_nr_stripes
) {
4545 if (drop_one_stripe(conf
))
4546 conf
->max_nr_stripes
--;
4550 err
= md_allow_write(mddev
);
4553 while (size
> conf
->max_nr_stripes
) {
4554 if (grow_one_stripe(conf
))
4555 conf
->max_nr_stripes
++;
4560 EXPORT_SYMBOL(raid5_set_cache_size
);
4563 raid5_store_stripe_cache_size(mddev_t
*mddev
, const char *page
, size_t len
)
4565 raid5_conf_t
*conf
= mddev
->private;
4569 if (len
>= PAGE_SIZE
)
4574 if (strict_strtoul(page
, 10, &new))
4576 err
= raid5_set_cache_size(mddev
, new);
4582 static struct md_sysfs_entry
4583 raid5_stripecache_size
= __ATTR(stripe_cache_size
, S_IRUGO
| S_IWUSR
,
4584 raid5_show_stripe_cache_size
,
4585 raid5_store_stripe_cache_size
);
4588 raid5_show_preread_threshold(mddev_t
*mddev
, char *page
)
4590 raid5_conf_t
*conf
= mddev
->private;
4592 return sprintf(page
, "%d\n", conf
->bypass_threshold
);
4598 raid5_store_preread_threshold(mddev_t
*mddev
, const char *page
, size_t len
)
4600 raid5_conf_t
*conf
= mddev
->private;
4602 if (len
>= PAGE_SIZE
)
4607 if (strict_strtoul(page
, 10, &new))
4609 if (new > conf
->max_nr_stripes
)
4611 conf
->bypass_threshold
= new;
4615 static struct md_sysfs_entry
4616 raid5_preread_bypass_threshold
= __ATTR(preread_bypass_threshold
,
4618 raid5_show_preread_threshold
,
4619 raid5_store_preread_threshold
);
4622 stripe_cache_active_show(mddev_t
*mddev
, char *page
)
4624 raid5_conf_t
*conf
= mddev
->private;
4626 return sprintf(page
, "%d\n", atomic_read(&conf
->active_stripes
));
4631 static struct md_sysfs_entry
4632 raid5_stripecache_active
= __ATTR_RO(stripe_cache_active
);
4634 static struct attribute
*raid5_attrs
[] = {
4635 &raid5_stripecache_size
.attr
,
4636 &raid5_stripecache_active
.attr
,
4637 &raid5_preread_bypass_threshold
.attr
,
4640 static struct attribute_group raid5_attrs_group
= {
4642 .attrs
= raid5_attrs
,
4646 raid5_size(mddev_t
*mddev
, sector_t sectors
, int raid_disks
)
4648 raid5_conf_t
*conf
= mddev
->private;
4651 sectors
= mddev
->dev_sectors
;
4653 /* size is defined by the smallest of previous and new size */
4654 raid_disks
= min(conf
->raid_disks
, conf
->previous_raid_disks
);
4656 sectors
&= ~((sector_t
)mddev
->chunk_sectors
- 1);
4657 sectors
&= ~((sector_t
)mddev
->new_chunk_sectors
- 1);
4658 return sectors
* (raid_disks
- conf
->max_degraded
);
4661 static void raid5_free_percpu(raid5_conf_t
*conf
)
4663 struct raid5_percpu
*percpu
;
4670 for_each_possible_cpu(cpu
) {
4671 percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
4672 safe_put_page(percpu
->spare_page
);
4673 kfree(percpu
->scribble
);
4675 #ifdef CONFIG_HOTPLUG_CPU
4676 unregister_cpu_notifier(&conf
->cpu_notify
);
4680 free_percpu(conf
->percpu
);
4683 static void free_conf(raid5_conf_t
*conf
)
4685 shrink_stripes(conf
);
4686 raid5_free_percpu(conf
);
4688 kfree(conf
->stripe_hashtbl
);
4692 #ifdef CONFIG_HOTPLUG_CPU
4693 static int raid456_cpu_notify(struct notifier_block
*nfb
, unsigned long action
,
4696 raid5_conf_t
*conf
= container_of(nfb
, raid5_conf_t
, cpu_notify
);
4697 long cpu
= (long)hcpu
;
4698 struct raid5_percpu
*percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
4701 case CPU_UP_PREPARE
:
4702 case CPU_UP_PREPARE_FROZEN
:
4703 if (conf
->level
== 6 && !percpu
->spare_page
)
4704 percpu
->spare_page
= alloc_page(GFP_KERNEL
);
4705 if (!percpu
->scribble
)
4706 percpu
->scribble
= kmalloc(conf
->scribble_len
, GFP_KERNEL
);
4708 if (!percpu
->scribble
||
4709 (conf
->level
== 6 && !percpu
->spare_page
)) {
4710 safe_put_page(percpu
->spare_page
);
4711 kfree(percpu
->scribble
);
4712 pr_err("%s: failed memory allocation for cpu%ld\n",
4714 return notifier_from_errno(-ENOMEM
);
4718 case CPU_DEAD_FROZEN
:
4719 safe_put_page(percpu
->spare_page
);
4720 kfree(percpu
->scribble
);
4721 percpu
->spare_page
= NULL
;
4722 percpu
->scribble
= NULL
;
4731 static int raid5_alloc_percpu(raid5_conf_t
*conf
)
4734 struct page
*spare_page
;
4735 struct raid5_percpu __percpu
*allcpus
;
4739 allcpus
= alloc_percpu(struct raid5_percpu
);
4742 conf
->percpu
= allcpus
;
4746 for_each_present_cpu(cpu
) {
4747 if (conf
->level
== 6) {
4748 spare_page
= alloc_page(GFP_KERNEL
);
4753 per_cpu_ptr(conf
->percpu
, cpu
)->spare_page
= spare_page
;
4755 scribble
= kmalloc(conf
->scribble_len
, GFP_KERNEL
);
4760 per_cpu_ptr(conf
->percpu
, cpu
)->scribble
= scribble
;
4762 #ifdef CONFIG_HOTPLUG_CPU
4763 conf
->cpu_notify
.notifier_call
= raid456_cpu_notify
;
4764 conf
->cpu_notify
.priority
= 0;
4766 err
= register_cpu_notifier(&conf
->cpu_notify
);
4773 static raid5_conf_t
*setup_conf(mddev_t
*mddev
)
4776 int raid_disk
, memory
, max_disks
;
4778 struct disk_info
*disk
;
4780 if (mddev
->new_level
!= 5
4781 && mddev
->new_level
!= 4
4782 && mddev
->new_level
!= 6) {
4783 printk(KERN_ERR
"md/raid:%s: raid level not set to 4/5/6 (%d)\n",
4784 mdname(mddev
), mddev
->new_level
);
4785 return ERR_PTR(-EIO
);
4787 if ((mddev
->new_level
== 5
4788 && !algorithm_valid_raid5(mddev
->new_layout
)) ||
4789 (mddev
->new_level
== 6
4790 && !algorithm_valid_raid6(mddev
->new_layout
))) {
4791 printk(KERN_ERR
"md/raid:%s: layout %d not supported\n",
4792 mdname(mddev
), mddev
->new_layout
);
4793 return ERR_PTR(-EIO
);
4795 if (mddev
->new_level
== 6 && mddev
->raid_disks
< 4) {
4796 printk(KERN_ERR
"md/raid:%s: not enough configured devices (%d, minimum 4)\n",
4797 mdname(mddev
), mddev
->raid_disks
);
4798 return ERR_PTR(-EINVAL
);
4801 if (!mddev
->new_chunk_sectors
||
4802 (mddev
->new_chunk_sectors
<< 9) % PAGE_SIZE
||
4803 !is_power_of_2(mddev
->new_chunk_sectors
)) {
4804 printk(KERN_ERR
"md/raid:%s: invalid chunk size %d\n",
4805 mdname(mddev
), mddev
->new_chunk_sectors
<< 9);
4806 return ERR_PTR(-EINVAL
);
4809 conf
= kzalloc(sizeof(raid5_conf_t
), GFP_KERNEL
);
4812 spin_lock_init(&conf
->device_lock
);
4813 init_waitqueue_head(&conf
->wait_for_stripe
);
4814 init_waitqueue_head(&conf
->wait_for_overlap
);
4815 INIT_LIST_HEAD(&conf
->handle_list
);
4816 INIT_LIST_HEAD(&conf
->hold_list
);
4817 INIT_LIST_HEAD(&conf
->delayed_list
);
4818 INIT_LIST_HEAD(&conf
->bitmap_list
);
4819 INIT_LIST_HEAD(&conf
->inactive_list
);
4820 atomic_set(&conf
->active_stripes
, 0);
4821 atomic_set(&conf
->preread_active_stripes
, 0);
4822 atomic_set(&conf
->active_aligned_reads
, 0);
4823 conf
->bypass_threshold
= BYPASS_THRESHOLD
;
4825 conf
->raid_disks
= mddev
->raid_disks
;
4826 if (mddev
->reshape_position
== MaxSector
)
4827 conf
->previous_raid_disks
= mddev
->raid_disks
;
4829 conf
->previous_raid_disks
= mddev
->raid_disks
- mddev
->delta_disks
;
4830 max_disks
= max(conf
->raid_disks
, conf
->previous_raid_disks
);
4831 conf
->scribble_len
= scribble_len(max_disks
);
4833 conf
->disks
= kzalloc(max_disks
* sizeof(struct disk_info
),
4838 conf
->mddev
= mddev
;
4840 if ((conf
->stripe_hashtbl
= kzalloc(PAGE_SIZE
, GFP_KERNEL
)) == NULL
)
4843 conf
->level
= mddev
->new_level
;
4844 if (raid5_alloc_percpu(conf
) != 0)
4847 pr_debug("raid456: run(%s) called.\n", mdname(mddev
));
4849 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
4850 raid_disk
= rdev
->raid_disk
;
4851 if (raid_disk
>= max_disks
4854 disk
= conf
->disks
+ raid_disk
;
4858 if (test_bit(In_sync
, &rdev
->flags
)) {
4859 char b
[BDEVNAME_SIZE
];
4860 printk(KERN_INFO
"md/raid:%s: device %s operational as raid"
4862 mdname(mddev
), bdevname(rdev
->bdev
, b
), raid_disk
);
4864 /* Cannot rely on bitmap to complete recovery */
4868 conf
->chunk_sectors
= mddev
->new_chunk_sectors
;
4869 conf
->level
= mddev
->new_level
;
4870 if (conf
->level
== 6)
4871 conf
->max_degraded
= 2;
4873 conf
->max_degraded
= 1;
4874 conf
->algorithm
= mddev
->new_layout
;
4875 conf
->max_nr_stripes
= NR_STRIPES
;
4876 conf
->reshape_progress
= mddev
->reshape_position
;
4877 if (conf
->reshape_progress
!= MaxSector
) {
4878 conf
->prev_chunk_sectors
= mddev
->chunk_sectors
;
4879 conf
->prev_algo
= mddev
->layout
;
4882 memory
= conf
->max_nr_stripes
* (sizeof(struct stripe_head
) +
4883 max_disks
* ((sizeof(struct bio
) + PAGE_SIZE
))) / 1024;
4884 if (grow_stripes(conf
, conf
->max_nr_stripes
)) {
4886 "md/raid:%s: couldn't allocate %dkB for buffers\n",
4887 mdname(mddev
), memory
);
4890 printk(KERN_INFO
"md/raid:%s: allocated %dkB\n",
4891 mdname(mddev
), memory
);
4893 conf
->thread
= md_register_thread(raid5d
, mddev
, NULL
);
4894 if (!conf
->thread
) {
4896 "md/raid:%s: couldn't allocate thread.\n",
4906 return ERR_PTR(-EIO
);
4908 return ERR_PTR(-ENOMEM
);
4912 static int only_parity(int raid_disk
, int algo
, int raid_disks
, int max_degraded
)
4915 case ALGORITHM_PARITY_0
:
4916 if (raid_disk
< max_degraded
)
4919 case ALGORITHM_PARITY_N
:
4920 if (raid_disk
>= raid_disks
- max_degraded
)
4923 case ALGORITHM_PARITY_0_6
:
4924 if (raid_disk
== 0 ||
4925 raid_disk
== raid_disks
- 1)
4928 case ALGORITHM_LEFT_ASYMMETRIC_6
:
4929 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
4930 case ALGORITHM_LEFT_SYMMETRIC_6
:
4931 case ALGORITHM_RIGHT_SYMMETRIC_6
:
4932 if (raid_disk
== raid_disks
- 1)
4938 static int run(mddev_t
*mddev
)
4941 int working_disks
= 0;
4942 int dirty_parity_disks
= 0;
4944 sector_t reshape_offset
= 0;
4946 if (mddev
->recovery_cp
!= MaxSector
)
4947 printk(KERN_NOTICE
"md/raid:%s: not clean"
4948 " -- starting background reconstruction\n",
4950 if (mddev
->reshape_position
!= MaxSector
) {
4951 /* Check that we can continue the reshape.
4952 * Currently only disks can change, it must
4953 * increase, and we must be past the point where
4954 * a stripe over-writes itself
4956 sector_t here_new
, here_old
;
4958 int max_degraded
= (mddev
->level
== 6 ? 2 : 1);
4960 if (mddev
->new_level
!= mddev
->level
) {
4961 printk(KERN_ERR
"md/raid:%s: unsupported reshape "
4962 "required - aborting.\n",
4966 old_disks
= mddev
->raid_disks
- mddev
->delta_disks
;
4967 /* reshape_position must be on a new-stripe boundary, and one
4968 * further up in new geometry must map after here in old
4971 here_new
= mddev
->reshape_position
;
4972 if (sector_div(here_new
, mddev
->new_chunk_sectors
*
4973 (mddev
->raid_disks
- max_degraded
))) {
4974 printk(KERN_ERR
"md/raid:%s: reshape_position not "
4975 "on a stripe boundary\n", mdname(mddev
));
4978 reshape_offset
= here_new
* mddev
->new_chunk_sectors
;
4979 /* here_new is the stripe we will write to */
4980 here_old
= mddev
->reshape_position
;
4981 sector_div(here_old
, mddev
->chunk_sectors
*
4982 (old_disks
-max_degraded
));
4983 /* here_old is the first stripe that we might need to read
4985 if (mddev
->delta_disks
== 0) {
4986 /* We cannot be sure it is safe to start an in-place
4987 * reshape. It is only safe if user-space if monitoring
4988 * and taking constant backups.
4989 * mdadm always starts a situation like this in
4990 * readonly mode so it can take control before
4991 * allowing any writes. So just check for that.
4993 if ((here_new
* mddev
->new_chunk_sectors
!=
4994 here_old
* mddev
->chunk_sectors
) ||
4996 printk(KERN_ERR
"md/raid:%s: in-place reshape must be started"
4997 " in read-only mode - aborting\n",
5001 } else if (mddev
->delta_disks
< 0
5002 ? (here_new
* mddev
->new_chunk_sectors
<=
5003 here_old
* mddev
->chunk_sectors
)
5004 : (here_new
* mddev
->new_chunk_sectors
>=
5005 here_old
* mddev
->chunk_sectors
)) {
5006 /* Reading from the same stripe as writing to - bad */
5007 printk(KERN_ERR
"md/raid:%s: reshape_position too early for "
5008 "auto-recovery - aborting.\n",
5012 printk(KERN_INFO
"md/raid:%s: reshape will continue\n",
5014 /* OK, we should be able to continue; */
5016 BUG_ON(mddev
->level
!= mddev
->new_level
);
5017 BUG_ON(mddev
->layout
!= mddev
->new_layout
);
5018 BUG_ON(mddev
->chunk_sectors
!= mddev
->new_chunk_sectors
);
5019 BUG_ON(mddev
->delta_disks
!= 0);
5022 if (mddev
->private == NULL
)
5023 conf
= setup_conf(mddev
);
5025 conf
= mddev
->private;
5028 return PTR_ERR(conf
);
5030 mddev
->thread
= conf
->thread
;
5031 conf
->thread
= NULL
;
5032 mddev
->private = conf
;
5035 * 0 for a fully functional array, 1 or 2 for a degraded array.
5037 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
5038 if (rdev
->raid_disk
< 0)
5040 if (test_bit(In_sync
, &rdev
->flags
)) {
5044 /* This disc is not fully in-sync. However if it
5045 * just stored parity (beyond the recovery_offset),
5046 * when we don't need to be concerned about the
5047 * array being dirty.
5048 * When reshape goes 'backwards', we never have
5049 * partially completed devices, so we only need
5050 * to worry about reshape going forwards.
5052 /* Hack because v0.91 doesn't store recovery_offset properly. */
5053 if (mddev
->major_version
== 0 &&
5054 mddev
->minor_version
> 90)
5055 rdev
->recovery_offset
= reshape_offset
;
5057 if (rdev
->recovery_offset
< reshape_offset
) {
5058 /* We need to check old and new layout */
5059 if (!only_parity(rdev
->raid_disk
,
5062 conf
->max_degraded
))
5065 if (!only_parity(rdev
->raid_disk
,
5067 conf
->previous_raid_disks
,
5068 conf
->max_degraded
))
5070 dirty_parity_disks
++;
5073 mddev
->degraded
= (max(conf
->raid_disks
, conf
->previous_raid_disks
)
5076 if (has_failed(conf
)) {
5077 printk(KERN_ERR
"md/raid:%s: not enough operational devices"
5078 " (%d/%d failed)\n",
5079 mdname(mddev
), mddev
->degraded
, conf
->raid_disks
);
5083 /* device size must be a multiple of chunk size */
5084 mddev
->dev_sectors
&= ~(mddev
->chunk_sectors
- 1);
5085 mddev
->resync_max_sectors
= mddev
->dev_sectors
;
5087 if (mddev
->degraded
> dirty_parity_disks
&&
5088 mddev
->recovery_cp
!= MaxSector
) {
5089 if (mddev
->ok_start_degraded
)
5091 "md/raid:%s: starting dirty degraded array"
5092 " - data corruption possible.\n",
5096 "md/raid:%s: cannot start dirty degraded array.\n",
5102 if (mddev
->degraded
== 0)
5103 printk(KERN_INFO
"md/raid:%s: raid level %d active with %d out of %d"
5104 " devices, algorithm %d\n", mdname(mddev
), conf
->level
,
5105 mddev
->raid_disks
-mddev
->degraded
, mddev
->raid_disks
,
5108 printk(KERN_ALERT
"md/raid:%s: raid level %d active with %d"
5109 " out of %d devices, algorithm %d\n",
5110 mdname(mddev
), conf
->level
,
5111 mddev
->raid_disks
- mddev
->degraded
,
5112 mddev
->raid_disks
, mddev
->new_layout
);
5114 print_raid5_conf(conf
);
5116 if (conf
->reshape_progress
!= MaxSector
) {
5117 conf
->reshape_safe
= conf
->reshape_progress
;
5118 atomic_set(&conf
->reshape_stripes
, 0);
5119 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
5120 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
5121 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
5122 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
5123 mddev
->sync_thread
= md_register_thread(md_do_sync
, mddev
,
5128 /* Ok, everything is just fine now */
5129 if (mddev
->to_remove
== &raid5_attrs_group
)
5130 mddev
->to_remove
= NULL
;
5131 else if (mddev
->kobj
.sd
&&
5132 sysfs_create_group(&mddev
->kobj
, &raid5_attrs_group
))
5134 "raid5: failed to create sysfs attributes for %s\n",
5136 md_set_array_sectors(mddev
, raid5_size(mddev
, 0, 0));
5140 /* read-ahead size must cover two whole stripes, which
5141 * is 2 * (datadisks) * chunksize where 'n' is the
5142 * number of raid devices
5144 int data_disks
= conf
->previous_raid_disks
- conf
->max_degraded
;
5145 int stripe
= data_disks
*
5146 ((mddev
->chunk_sectors
<< 9) / PAGE_SIZE
);
5147 if (mddev
->queue
->backing_dev_info
.ra_pages
< 2 * stripe
)
5148 mddev
->queue
->backing_dev_info
.ra_pages
= 2 * stripe
;
5150 blk_queue_merge_bvec(mddev
->queue
, raid5_mergeable_bvec
);
5152 mddev
->queue
->backing_dev_info
.congested_data
= mddev
;
5153 mddev
->queue
->backing_dev_info
.congested_fn
= raid5_congested
;
5155 chunk_size
= mddev
->chunk_sectors
<< 9;
5156 blk_queue_io_min(mddev
->queue
, chunk_size
);
5157 blk_queue_io_opt(mddev
->queue
, chunk_size
*
5158 (conf
->raid_disks
- conf
->max_degraded
));
5160 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
5161 disk_stack_limits(mddev
->gendisk
, rdev
->bdev
,
5162 rdev
->data_offset
<< 9);
5167 md_unregister_thread(mddev
->thread
);
5168 mddev
->thread
= NULL
;
5170 print_raid5_conf(conf
);
5173 mddev
->private = NULL
;
5174 printk(KERN_ALERT
"md/raid:%s: failed to run raid set.\n", mdname(mddev
));
5178 static int stop(mddev_t
*mddev
)
5180 raid5_conf_t
*conf
= mddev
->private;
5182 md_unregister_thread(mddev
->thread
);
5183 mddev
->thread
= NULL
;
5185 mddev
->queue
->backing_dev_info
.congested_fn
= NULL
;
5187 mddev
->private = NULL
;
5188 mddev
->to_remove
= &raid5_attrs_group
;
5193 static void print_sh(struct seq_file
*seq
, struct stripe_head
*sh
)
5197 seq_printf(seq
, "sh %llu, pd_idx %d, state %ld.\n",
5198 (unsigned long long)sh
->sector
, sh
->pd_idx
, sh
->state
);
5199 seq_printf(seq
, "sh %llu, count %d.\n",
5200 (unsigned long long)sh
->sector
, atomic_read(&sh
->count
));
5201 seq_printf(seq
, "sh %llu, ", (unsigned long long)sh
->sector
);
5202 for (i
= 0; i
< sh
->disks
; i
++) {
5203 seq_printf(seq
, "(cache%d: %p %ld) ",
5204 i
, sh
->dev
[i
].page
, sh
->dev
[i
].flags
);
5206 seq_printf(seq
, "\n");
5209 static void printall(struct seq_file
*seq
, raid5_conf_t
*conf
)
5211 struct stripe_head
*sh
;
5212 struct hlist_node
*hn
;
5215 spin_lock_irq(&conf
->device_lock
);
5216 for (i
= 0; i
< NR_HASH
; i
++) {
5217 hlist_for_each_entry(sh
, hn
, &conf
->stripe_hashtbl
[i
], hash
) {
5218 if (sh
->raid_conf
!= conf
)
5223 spin_unlock_irq(&conf
->device_lock
);
5227 static void status(struct seq_file
*seq
, mddev_t
*mddev
)
5229 raid5_conf_t
*conf
= mddev
->private;
5232 seq_printf(seq
, " level %d, %dk chunk, algorithm %d", mddev
->level
,
5233 mddev
->chunk_sectors
/ 2, mddev
->layout
);
5234 seq_printf (seq
, " [%d/%d] [", conf
->raid_disks
, conf
->raid_disks
- mddev
->degraded
);
5235 for (i
= 0; i
< conf
->raid_disks
; i
++)
5236 seq_printf (seq
, "%s",
5237 conf
->disks
[i
].rdev
&&
5238 test_bit(In_sync
, &conf
->disks
[i
].rdev
->flags
) ? "U" : "_");
5239 seq_printf (seq
, "]");
5241 seq_printf (seq
, "\n");
5242 printall(seq
, conf
);
5246 static void print_raid5_conf (raid5_conf_t
*conf
)
5249 struct disk_info
*tmp
;
5251 printk(KERN_DEBUG
"RAID conf printout:\n");
5253 printk("(conf==NULL)\n");
5256 printk(KERN_DEBUG
" --- level:%d rd:%d wd:%d\n", conf
->level
,
5258 conf
->raid_disks
- conf
->mddev
->degraded
);
5260 for (i
= 0; i
< conf
->raid_disks
; i
++) {
5261 char b
[BDEVNAME_SIZE
];
5262 tmp
= conf
->disks
+ i
;
5264 printk(KERN_DEBUG
" disk %d, o:%d, dev:%s\n",
5265 i
, !test_bit(Faulty
, &tmp
->rdev
->flags
),
5266 bdevname(tmp
->rdev
->bdev
, b
));
5270 static int raid5_spare_active(mddev_t
*mddev
)
5273 raid5_conf_t
*conf
= mddev
->private;
5274 struct disk_info
*tmp
;
5276 unsigned long flags
;
5278 for (i
= 0; i
< conf
->raid_disks
; i
++) {
5279 tmp
= conf
->disks
+ i
;
5281 && tmp
->rdev
->recovery_offset
== MaxSector
5282 && !test_bit(Faulty
, &tmp
->rdev
->flags
)
5283 && !test_and_set_bit(In_sync
, &tmp
->rdev
->flags
)) {
5285 sysfs_notify_dirent_safe(tmp
->rdev
->sysfs_state
);
5288 spin_lock_irqsave(&conf
->device_lock
, flags
);
5289 mddev
->degraded
-= count
;
5290 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
5291 print_raid5_conf(conf
);
5295 static int raid5_remove_disk(mddev_t
*mddev
, int number
)
5297 raid5_conf_t
*conf
= mddev
->private;
5300 struct disk_info
*p
= conf
->disks
+ number
;
5302 print_raid5_conf(conf
);
5305 if (number
>= conf
->raid_disks
&&
5306 conf
->reshape_progress
== MaxSector
)
5307 clear_bit(In_sync
, &rdev
->flags
);
5309 if (test_bit(In_sync
, &rdev
->flags
) ||
5310 atomic_read(&rdev
->nr_pending
)) {
5314 /* Only remove non-faulty devices if recovery
5317 if (!test_bit(Faulty
, &rdev
->flags
) &&
5318 !has_failed(conf
) &&
5319 number
< conf
->raid_disks
) {
5325 if (atomic_read(&rdev
->nr_pending
)) {
5326 /* lost the race, try later */
5333 print_raid5_conf(conf
);
5337 static int raid5_add_disk(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
5339 raid5_conf_t
*conf
= mddev
->private;
5342 struct disk_info
*p
;
5344 int last
= conf
->raid_disks
- 1;
5346 if (has_failed(conf
))
5347 /* no point adding a device */
5350 if (rdev
->raid_disk
>= 0)
5351 first
= last
= rdev
->raid_disk
;
5354 * find the disk ... but prefer rdev->saved_raid_disk
5357 if (rdev
->saved_raid_disk
>= 0 &&
5358 rdev
->saved_raid_disk
>= first
&&
5359 conf
->disks
[rdev
->saved_raid_disk
].rdev
== NULL
)
5360 disk
= rdev
->saved_raid_disk
;
5363 for ( ; disk
<= last
; disk
++)
5364 if ((p
=conf
->disks
+ disk
)->rdev
== NULL
) {
5365 clear_bit(In_sync
, &rdev
->flags
);
5366 rdev
->raid_disk
= disk
;
5368 if (rdev
->saved_raid_disk
!= disk
)
5370 rcu_assign_pointer(p
->rdev
, rdev
);
5373 print_raid5_conf(conf
);
5377 static int raid5_resize(mddev_t
*mddev
, sector_t sectors
)
5379 /* no resync is happening, and there is enough space
5380 * on all devices, so we can resize.
5381 * We need to make sure resync covers any new space.
5382 * If the array is shrinking we should possibly wait until
5383 * any io in the removed space completes, but it hardly seems
5386 sectors
&= ~((sector_t
)mddev
->chunk_sectors
- 1);
5387 md_set_array_sectors(mddev
, raid5_size(mddev
, sectors
,
5388 mddev
->raid_disks
));
5389 if (mddev
->array_sectors
>
5390 raid5_size(mddev
, sectors
, mddev
->raid_disks
))
5392 set_capacity(mddev
->gendisk
, mddev
->array_sectors
);
5393 revalidate_disk(mddev
->gendisk
);
5394 if (sectors
> mddev
->dev_sectors
&& mddev
->recovery_cp
== MaxSector
) {
5395 mddev
->recovery_cp
= mddev
->dev_sectors
;
5396 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
5398 mddev
->dev_sectors
= sectors
;
5399 mddev
->resync_max_sectors
= sectors
;
5403 static int check_stripe_cache(mddev_t
*mddev
)
5405 /* Can only proceed if there are plenty of stripe_heads.
5406 * We need a minimum of one full stripe,, and for sensible progress
5407 * it is best to have about 4 times that.
5408 * If we require 4 times, then the default 256 4K stripe_heads will
5409 * allow for chunk sizes up to 256K, which is probably OK.
5410 * If the chunk size is greater, user-space should request more
5411 * stripe_heads first.
5413 raid5_conf_t
*conf
= mddev
->private;
5414 if (((mddev
->chunk_sectors
<< 9) / STRIPE_SIZE
) * 4
5415 > conf
->max_nr_stripes
||
5416 ((mddev
->new_chunk_sectors
<< 9) / STRIPE_SIZE
) * 4
5417 > conf
->max_nr_stripes
) {
5418 printk(KERN_WARNING
"md/raid:%s: reshape: not enough stripes. Needed %lu\n",
5420 ((max(mddev
->chunk_sectors
, mddev
->new_chunk_sectors
) << 9)
5427 static int check_reshape(mddev_t
*mddev
)
5429 raid5_conf_t
*conf
= mddev
->private;
5431 if (mddev
->delta_disks
== 0 &&
5432 mddev
->new_layout
== mddev
->layout
&&
5433 mddev
->new_chunk_sectors
== mddev
->chunk_sectors
)
5434 return 0; /* nothing to do */
5436 /* Cannot grow a bitmap yet */
5438 if (has_failed(conf
))
5440 if (mddev
->delta_disks
< 0) {
5441 /* We might be able to shrink, but the devices must
5442 * be made bigger first.
5443 * For raid6, 4 is the minimum size.
5444 * Otherwise 2 is the minimum
5447 if (mddev
->level
== 6)
5449 if (mddev
->raid_disks
+ mddev
->delta_disks
< min
)
5453 if (!check_stripe_cache(mddev
))
5456 return resize_stripes(conf
, conf
->raid_disks
+ mddev
->delta_disks
);
5459 static int raid5_start_reshape(mddev_t
*mddev
)
5461 raid5_conf_t
*conf
= mddev
->private;
5464 unsigned long flags
;
5466 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
))
5469 if (!check_stripe_cache(mddev
))
5472 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
5473 if (!test_bit(In_sync
, &rdev
->flags
)
5474 && !test_bit(Faulty
, &rdev
->flags
))
5477 if (spares
- mddev
->degraded
< mddev
->delta_disks
- conf
->max_degraded
)
5478 /* Not enough devices even to make a degraded array
5483 /* Refuse to reduce size of the array. Any reductions in
5484 * array size must be through explicit setting of array_size
5487 if (raid5_size(mddev
, 0, conf
->raid_disks
+ mddev
->delta_disks
)
5488 < mddev
->array_sectors
) {
5489 printk(KERN_ERR
"md/raid:%s: array size must be reduced "
5490 "before number of disks\n", mdname(mddev
));
5494 atomic_set(&conf
->reshape_stripes
, 0);
5495 spin_lock_irq(&conf
->device_lock
);
5496 conf
->previous_raid_disks
= conf
->raid_disks
;
5497 conf
->raid_disks
+= mddev
->delta_disks
;
5498 conf
->prev_chunk_sectors
= conf
->chunk_sectors
;
5499 conf
->chunk_sectors
= mddev
->new_chunk_sectors
;
5500 conf
->prev_algo
= conf
->algorithm
;
5501 conf
->algorithm
= mddev
->new_layout
;
5502 if (mddev
->delta_disks
< 0)
5503 conf
->reshape_progress
= raid5_size(mddev
, 0, 0);
5505 conf
->reshape_progress
= 0;
5506 conf
->reshape_safe
= conf
->reshape_progress
;
5508 spin_unlock_irq(&conf
->device_lock
);
5510 /* Add some new drives, as many as will fit.
5511 * We know there are enough to make the newly sized array work.
5512 * Don't add devices if we are reducing the number of
5513 * devices in the array. This is because it is not possible
5514 * to correctly record the "partially reconstructed" state of
5515 * such devices during the reshape and confusion could result.
5517 if (mddev
->delta_disks
>= 0) {
5518 int added_devices
= 0;
5519 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
5520 if (rdev
->raid_disk
< 0 &&
5521 !test_bit(Faulty
, &rdev
->flags
)) {
5522 if (raid5_add_disk(mddev
, rdev
) == 0) {
5525 >= conf
->previous_raid_disks
) {
5526 set_bit(In_sync
, &rdev
->flags
);
5529 rdev
->recovery_offset
= 0;
5530 sprintf(nm
, "rd%d", rdev
->raid_disk
);
5531 if (sysfs_create_link(&mddev
->kobj
,
5533 /* Failure here is OK */;
5535 } else if (rdev
->raid_disk
>= conf
->previous_raid_disks
5536 && !test_bit(Faulty
, &rdev
->flags
)) {
5537 /* This is a spare that was manually added */
5538 set_bit(In_sync
, &rdev
->flags
);
5542 /* When a reshape changes the number of devices,
5543 * ->degraded is measured against the larger of the
5544 * pre and post number of devices.
5546 spin_lock_irqsave(&conf
->device_lock
, flags
);
5547 mddev
->degraded
+= (conf
->raid_disks
- conf
->previous_raid_disks
)
5549 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
5551 mddev
->raid_disks
= conf
->raid_disks
;
5552 mddev
->reshape_position
= conf
->reshape_progress
;
5553 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
5555 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
5556 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
5557 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
5558 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
5559 mddev
->sync_thread
= md_register_thread(md_do_sync
, mddev
,
5561 if (!mddev
->sync_thread
) {
5562 mddev
->recovery
= 0;
5563 spin_lock_irq(&conf
->device_lock
);
5564 mddev
->raid_disks
= conf
->raid_disks
= conf
->previous_raid_disks
;
5565 conf
->reshape_progress
= MaxSector
;
5566 spin_unlock_irq(&conf
->device_lock
);
5569 conf
->reshape_checkpoint
= jiffies
;
5570 md_wakeup_thread(mddev
->sync_thread
);
5571 md_new_event(mddev
);
5575 /* This is called from the reshape thread and should make any
5576 * changes needed in 'conf'
5578 static void end_reshape(raid5_conf_t
*conf
)
5581 if (!test_bit(MD_RECOVERY_INTR
, &conf
->mddev
->recovery
)) {
5583 spin_lock_irq(&conf
->device_lock
);
5584 conf
->previous_raid_disks
= conf
->raid_disks
;
5585 conf
->reshape_progress
= MaxSector
;
5586 spin_unlock_irq(&conf
->device_lock
);
5587 wake_up(&conf
->wait_for_overlap
);
5589 /* read-ahead size must cover two whole stripes, which is
5590 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
5592 if (conf
->mddev
->queue
) {
5593 int data_disks
= conf
->raid_disks
- conf
->max_degraded
;
5594 int stripe
= data_disks
* ((conf
->chunk_sectors
<< 9)
5596 if (conf
->mddev
->queue
->backing_dev_info
.ra_pages
< 2 * stripe
)
5597 conf
->mddev
->queue
->backing_dev_info
.ra_pages
= 2 * stripe
;
5602 /* This is called from the raid5d thread with mddev_lock held.
5603 * It makes config changes to the device.
5605 static void raid5_finish_reshape(mddev_t
*mddev
)
5607 raid5_conf_t
*conf
= mddev
->private;
5609 if (!test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
)) {
5611 if (mddev
->delta_disks
> 0) {
5612 md_set_array_sectors(mddev
, raid5_size(mddev
, 0, 0));
5613 set_capacity(mddev
->gendisk
, mddev
->array_sectors
);
5614 revalidate_disk(mddev
->gendisk
);
5617 mddev
->degraded
= conf
->raid_disks
;
5618 for (d
= 0; d
< conf
->raid_disks
; d
++)
5619 if (conf
->disks
[d
].rdev
&&
5621 &conf
->disks
[d
].rdev
->flags
))
5623 for (d
= conf
->raid_disks
;
5624 d
< conf
->raid_disks
- mddev
->delta_disks
;
5626 mdk_rdev_t
*rdev
= conf
->disks
[d
].rdev
;
5627 if (rdev
&& raid5_remove_disk(mddev
, d
) == 0) {
5629 sprintf(nm
, "rd%d", rdev
->raid_disk
);
5630 sysfs_remove_link(&mddev
->kobj
, nm
);
5631 rdev
->raid_disk
= -1;
5635 mddev
->layout
= conf
->algorithm
;
5636 mddev
->chunk_sectors
= conf
->chunk_sectors
;
5637 mddev
->reshape_position
= MaxSector
;
5638 mddev
->delta_disks
= 0;
5642 static void raid5_quiesce(mddev_t
*mddev
, int state
)
5644 raid5_conf_t
*conf
= mddev
->private;
5647 case 2: /* resume for a suspend */
5648 wake_up(&conf
->wait_for_overlap
);
5651 case 1: /* stop all writes */
5652 spin_lock_irq(&conf
->device_lock
);
5653 /* '2' tells resync/reshape to pause so that all
5654 * active stripes can drain
5657 wait_event_lock_irq(conf
->wait_for_stripe
,
5658 atomic_read(&conf
->active_stripes
) == 0 &&
5659 atomic_read(&conf
->active_aligned_reads
) == 0,
5660 conf
->device_lock
, /* nothing */);
5662 spin_unlock_irq(&conf
->device_lock
);
5663 /* allow reshape to continue */
5664 wake_up(&conf
->wait_for_overlap
);
5667 case 0: /* re-enable writes */
5668 spin_lock_irq(&conf
->device_lock
);
5670 wake_up(&conf
->wait_for_stripe
);
5671 wake_up(&conf
->wait_for_overlap
);
5672 spin_unlock_irq(&conf
->device_lock
);
5678 static void *raid45_takeover_raid0(mddev_t
*mddev
, int level
)
5680 struct raid0_private_data
*raid0_priv
= mddev
->private;
5683 /* for raid0 takeover only one zone is supported */
5684 if (raid0_priv
->nr_strip_zones
> 1) {
5685 printk(KERN_ERR
"md/raid:%s: cannot takeover raid0 with more than one zone.\n",
5687 return ERR_PTR(-EINVAL
);
5690 sectors
= raid0_priv
->strip_zone
[0].zone_end
;
5691 sector_div(sectors
, raid0_priv
->strip_zone
[0].nb_dev
);
5692 mddev
->dev_sectors
= sectors
;
5693 mddev
->new_level
= level
;
5694 mddev
->new_layout
= ALGORITHM_PARITY_N
;
5695 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
5696 mddev
->raid_disks
+= 1;
5697 mddev
->delta_disks
= 1;
5698 /* make sure it will be not marked as dirty */
5699 mddev
->recovery_cp
= MaxSector
;
5701 return setup_conf(mddev
);
5705 static void *raid5_takeover_raid1(mddev_t
*mddev
)
5709 if (mddev
->raid_disks
!= 2 ||
5710 mddev
->degraded
> 1)
5711 return ERR_PTR(-EINVAL
);
5713 /* Should check if there are write-behind devices? */
5715 chunksect
= 64*2; /* 64K by default */
5717 /* The array must be an exact multiple of chunksize */
5718 while (chunksect
&& (mddev
->array_sectors
& (chunksect
-1)))
5721 if ((chunksect
<<9) < STRIPE_SIZE
)
5722 /* array size does not allow a suitable chunk size */
5723 return ERR_PTR(-EINVAL
);
5725 mddev
->new_level
= 5;
5726 mddev
->new_layout
= ALGORITHM_LEFT_SYMMETRIC
;
5727 mddev
->new_chunk_sectors
= chunksect
;
5729 return setup_conf(mddev
);
5732 static void *raid5_takeover_raid6(mddev_t
*mddev
)
5736 switch (mddev
->layout
) {
5737 case ALGORITHM_LEFT_ASYMMETRIC_6
:
5738 new_layout
= ALGORITHM_LEFT_ASYMMETRIC
;
5740 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
5741 new_layout
= ALGORITHM_RIGHT_ASYMMETRIC
;
5743 case ALGORITHM_LEFT_SYMMETRIC_6
:
5744 new_layout
= ALGORITHM_LEFT_SYMMETRIC
;
5746 case ALGORITHM_RIGHT_SYMMETRIC_6
:
5747 new_layout
= ALGORITHM_RIGHT_SYMMETRIC
;
5749 case ALGORITHM_PARITY_0_6
:
5750 new_layout
= ALGORITHM_PARITY_0
;
5752 case ALGORITHM_PARITY_N
:
5753 new_layout
= ALGORITHM_PARITY_N
;
5756 return ERR_PTR(-EINVAL
);
5758 mddev
->new_level
= 5;
5759 mddev
->new_layout
= new_layout
;
5760 mddev
->delta_disks
= -1;
5761 mddev
->raid_disks
-= 1;
5762 return setup_conf(mddev
);
5766 static int raid5_check_reshape(mddev_t
*mddev
)
5768 /* For a 2-drive array, the layout and chunk size can be changed
5769 * immediately as not restriping is needed.
5770 * For larger arrays we record the new value - after validation
5771 * to be used by a reshape pass.
5773 raid5_conf_t
*conf
= mddev
->private;
5774 int new_chunk
= mddev
->new_chunk_sectors
;
5776 if (mddev
->new_layout
>= 0 && !algorithm_valid_raid5(mddev
->new_layout
))
5778 if (new_chunk
> 0) {
5779 if (!is_power_of_2(new_chunk
))
5781 if (new_chunk
< (PAGE_SIZE
>>9))
5783 if (mddev
->array_sectors
& (new_chunk
-1))
5784 /* not factor of array size */
5788 /* They look valid */
5790 if (mddev
->raid_disks
== 2) {
5791 /* can make the change immediately */
5792 if (mddev
->new_layout
>= 0) {
5793 conf
->algorithm
= mddev
->new_layout
;
5794 mddev
->layout
= mddev
->new_layout
;
5796 if (new_chunk
> 0) {
5797 conf
->chunk_sectors
= new_chunk
;
5798 mddev
->chunk_sectors
= new_chunk
;
5800 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
5801 md_wakeup_thread(mddev
->thread
);
5803 return check_reshape(mddev
);
5806 static int raid6_check_reshape(mddev_t
*mddev
)
5808 int new_chunk
= mddev
->new_chunk_sectors
;
5810 if (mddev
->new_layout
>= 0 && !algorithm_valid_raid6(mddev
->new_layout
))
5812 if (new_chunk
> 0) {
5813 if (!is_power_of_2(new_chunk
))
5815 if (new_chunk
< (PAGE_SIZE
>> 9))
5817 if (mddev
->array_sectors
& (new_chunk
-1))
5818 /* not factor of array size */
5822 /* They look valid */
5823 return check_reshape(mddev
);
5826 static void *raid5_takeover(mddev_t
*mddev
)
5828 /* raid5 can take over:
5829 * raid0 - if there is only one strip zone - make it a raid4 layout
5830 * raid1 - if there are two drives. We need to know the chunk size
5831 * raid4 - trivial - just use a raid4 layout.
5832 * raid6 - Providing it is a *_6 layout
5834 if (mddev
->level
== 0)
5835 return raid45_takeover_raid0(mddev
, 5);
5836 if (mddev
->level
== 1)
5837 return raid5_takeover_raid1(mddev
);
5838 if (mddev
->level
== 4) {
5839 mddev
->new_layout
= ALGORITHM_PARITY_N
;
5840 mddev
->new_level
= 5;
5841 return setup_conf(mddev
);
5843 if (mddev
->level
== 6)
5844 return raid5_takeover_raid6(mddev
);
5846 return ERR_PTR(-EINVAL
);
5849 static void *raid4_takeover(mddev_t
*mddev
)
5851 /* raid4 can take over:
5852 * raid0 - if there is only one strip zone
5853 * raid5 - if layout is right
5855 if (mddev
->level
== 0)
5856 return raid45_takeover_raid0(mddev
, 4);
5857 if (mddev
->level
== 5 &&
5858 mddev
->layout
== ALGORITHM_PARITY_N
) {
5859 mddev
->new_layout
= 0;
5860 mddev
->new_level
= 4;
5861 return setup_conf(mddev
);
5863 return ERR_PTR(-EINVAL
);
5866 static struct mdk_personality raid5_personality
;
5868 static void *raid6_takeover(mddev_t
*mddev
)
5870 /* Currently can only take over a raid5. We map the
5871 * personality to an equivalent raid6 personality
5872 * with the Q block at the end.
5876 if (mddev
->pers
!= &raid5_personality
)
5877 return ERR_PTR(-EINVAL
);
5878 if (mddev
->degraded
> 1)
5879 return ERR_PTR(-EINVAL
);
5880 if (mddev
->raid_disks
> 253)
5881 return ERR_PTR(-EINVAL
);
5882 if (mddev
->raid_disks
< 3)
5883 return ERR_PTR(-EINVAL
);
5885 switch (mddev
->layout
) {
5886 case ALGORITHM_LEFT_ASYMMETRIC
:
5887 new_layout
= ALGORITHM_LEFT_ASYMMETRIC_6
;
5889 case ALGORITHM_RIGHT_ASYMMETRIC
:
5890 new_layout
= ALGORITHM_RIGHT_ASYMMETRIC_6
;
5892 case ALGORITHM_LEFT_SYMMETRIC
:
5893 new_layout
= ALGORITHM_LEFT_SYMMETRIC_6
;
5895 case ALGORITHM_RIGHT_SYMMETRIC
:
5896 new_layout
= ALGORITHM_RIGHT_SYMMETRIC_6
;
5898 case ALGORITHM_PARITY_0
:
5899 new_layout
= ALGORITHM_PARITY_0_6
;
5901 case ALGORITHM_PARITY_N
:
5902 new_layout
= ALGORITHM_PARITY_N
;
5905 return ERR_PTR(-EINVAL
);
5907 mddev
->new_level
= 6;
5908 mddev
->new_layout
= new_layout
;
5909 mddev
->delta_disks
= 1;
5910 mddev
->raid_disks
+= 1;
5911 return setup_conf(mddev
);
5915 static struct mdk_personality raid6_personality
=
5919 .owner
= THIS_MODULE
,
5920 .make_request
= make_request
,
5924 .error_handler
= error
,
5925 .hot_add_disk
= raid5_add_disk
,
5926 .hot_remove_disk
= raid5_remove_disk
,
5927 .spare_active
= raid5_spare_active
,
5928 .sync_request
= sync_request
,
5929 .resize
= raid5_resize
,
5931 .check_reshape
= raid6_check_reshape
,
5932 .start_reshape
= raid5_start_reshape
,
5933 .finish_reshape
= raid5_finish_reshape
,
5934 .quiesce
= raid5_quiesce
,
5935 .takeover
= raid6_takeover
,
5937 static struct mdk_personality raid5_personality
=
5941 .owner
= THIS_MODULE
,
5942 .make_request
= make_request
,
5946 .error_handler
= error
,
5947 .hot_add_disk
= raid5_add_disk
,
5948 .hot_remove_disk
= raid5_remove_disk
,
5949 .spare_active
= raid5_spare_active
,
5950 .sync_request
= sync_request
,
5951 .resize
= raid5_resize
,
5953 .check_reshape
= raid5_check_reshape
,
5954 .start_reshape
= raid5_start_reshape
,
5955 .finish_reshape
= raid5_finish_reshape
,
5956 .quiesce
= raid5_quiesce
,
5957 .takeover
= raid5_takeover
,
5960 static struct mdk_personality raid4_personality
=
5964 .owner
= THIS_MODULE
,
5965 .make_request
= make_request
,
5969 .error_handler
= error
,
5970 .hot_add_disk
= raid5_add_disk
,
5971 .hot_remove_disk
= raid5_remove_disk
,
5972 .spare_active
= raid5_spare_active
,
5973 .sync_request
= sync_request
,
5974 .resize
= raid5_resize
,
5976 .check_reshape
= raid5_check_reshape
,
5977 .start_reshape
= raid5_start_reshape
,
5978 .finish_reshape
= raid5_finish_reshape
,
5979 .quiesce
= raid5_quiesce
,
5980 .takeover
= raid4_takeover
,
5983 static int __init
raid5_init(void)
5985 register_md_personality(&raid6_personality
);
5986 register_md_personality(&raid5_personality
);
5987 register_md_personality(&raid4_personality
);
5991 static void raid5_exit(void)
5993 unregister_md_personality(&raid6_personality
);
5994 unregister_md_personality(&raid5_personality
);
5995 unregister_md_personality(&raid4_personality
);
5998 module_init(raid5_init
);
5999 module_exit(raid5_exit
);
6000 MODULE_LICENSE("GPL");
6001 MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
6002 MODULE_ALIAS("md-personality-4"); /* RAID5 */
6003 MODULE_ALIAS("md-raid5");
6004 MODULE_ALIAS("md-raid4");
6005 MODULE_ALIAS("md-level-5");
6006 MODULE_ALIAS("md-level-4");
6007 MODULE_ALIAS("md-personality-8"); /* RAID6 */
6008 MODULE_ALIAS("md-raid6");
6009 MODULE_ALIAS("md-level-6");
6011 /* This used to be two separate modules, they were: */
6012 MODULE_ALIAS("raid5");
6013 MODULE_ALIAS("raid6");