2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
5 * Copyright (C) 2002, 2003 H. Peter Anvin
7 * RAID-4/5/6 management functions.
8 * Thanks to Penguin Computing for making the RAID-6 development possible
9 * by donating a test server!
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 * The sequencing for updating the bitmap reliably is a little
25 * subtle (and I got it wrong the first time) so it deserves some
28 * We group bitmap updates into batches. Each batch has a number.
29 * We may write out several batches at once, but that isn't very important.
30 * conf->seq_write is the number of the last batch successfully written.
31 * conf->seq_flush is the number of the last batch that was closed to
33 * When we discover that we will need to write to any block in a stripe
34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
35 * the number of the batch it will be in. This is seq_flush+1.
36 * When we are ready to do a write, if that batch hasn't been written yet,
37 * we plug the array and queue the stripe for later.
38 * When an unplug happens, we increment bm_flush, thus closing the current
40 * When we notice that bm_flush > bm_write, we write out all pending updates
41 * to the bitmap, and advance bm_write to where bm_flush was.
42 * This may occasionally write a bit out twice, but is sure never to
46 #include <linux/blkdev.h>
47 #include <linux/kthread.h>
48 #include <linux/raid/pq.h>
49 #include <linux/async_tx.h>
50 #include <linux/async.h>
51 #include <linux/seq_file.h>
52 #include <linux/cpu.h>
53 #include <linux/slab.h>
63 #define NR_STRIPES 256
64 #define STRIPE_SIZE PAGE_SIZE
65 #define STRIPE_SHIFT (PAGE_SHIFT - 9)
66 #define STRIPE_SECTORS (STRIPE_SIZE>>9)
67 #define IO_THRESHOLD 1
68 #define BYPASS_THRESHOLD 1
69 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
70 #define HASH_MASK (NR_HASH - 1)
72 #define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
74 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
75 * order without overlap. There may be several bio's per stripe+device, and
76 * a bio could span several devices.
77 * When walking this list for a particular stripe+device, we must never proceed
78 * beyond a bio that extends past this device, as the next bio might no longer
80 * This macro is used to determine the 'next' bio in the list, given the sector
81 * of the current stripe+device
83 #define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
85 * The following can be used to debug the driver
87 #define RAID5_PARANOIA 1
88 #if RAID5_PARANOIA && defined(CONFIG_SMP)
89 # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
91 # define CHECK_DEVLOCK()
99 #define printk_rl(args...) ((void) (printk_ratelimit() && printk(args)))
102 * We maintain a biased count of active stripes in the bottom 16 bits of
103 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
105 static inline int raid5_bi_phys_segments(struct bio
*bio
)
107 return bio
->bi_phys_segments
& 0xffff;
110 static inline int raid5_bi_hw_segments(struct bio
*bio
)
112 return (bio
->bi_phys_segments
>> 16) & 0xffff;
115 static inline int raid5_dec_bi_phys_segments(struct bio
*bio
)
117 --bio
->bi_phys_segments
;
118 return raid5_bi_phys_segments(bio
);
121 static inline int raid5_dec_bi_hw_segments(struct bio
*bio
)
123 unsigned short val
= raid5_bi_hw_segments(bio
);
126 bio
->bi_phys_segments
= (val
<< 16) | raid5_bi_phys_segments(bio
);
130 static inline void raid5_set_bi_hw_segments(struct bio
*bio
, unsigned int cnt
)
132 bio
->bi_phys_segments
= raid5_bi_phys_segments(bio
) | (cnt
<< 16);
135 /* Find first data disk in a raid6 stripe */
136 static inline int raid6_d0(struct stripe_head
*sh
)
139 /* ddf always start from first device */
141 /* md starts just after Q block */
142 if (sh
->qd_idx
== sh
->disks
- 1)
145 return sh
->qd_idx
+ 1;
147 static inline int raid6_next_disk(int disk
, int raid_disks
)
150 return (disk
< raid_disks
) ? disk
: 0;
153 /* When walking through the disks in a raid5, starting at raid6_d0,
154 * We need to map each disk to a 'slot', where the data disks are slot
155 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
156 * is raid_disks-1. This help does that mapping.
158 static int raid6_idx_to_slot(int idx
, struct stripe_head
*sh
,
159 int *count
, int syndrome_disks
)
165 if (idx
== sh
->pd_idx
)
166 return syndrome_disks
;
167 if (idx
== sh
->qd_idx
)
168 return syndrome_disks
+ 1;
174 static void return_io(struct bio
*return_bi
)
176 struct bio
*bi
= return_bi
;
179 return_bi
= bi
->bi_next
;
187 static void print_raid5_conf (raid5_conf_t
*conf
);
189 static int stripe_operations_active(struct stripe_head
*sh
)
191 return sh
->check_state
|| sh
->reconstruct_state
||
192 test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
) ||
193 test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
196 static void __release_stripe(raid5_conf_t
*conf
, struct stripe_head
*sh
)
198 if (atomic_dec_and_test(&sh
->count
)) {
199 BUG_ON(!list_empty(&sh
->lru
));
200 BUG_ON(atomic_read(&conf
->active_stripes
)==0);
201 if (test_bit(STRIPE_HANDLE
, &sh
->state
)) {
202 if (test_bit(STRIPE_DELAYED
, &sh
->state
))
203 list_add_tail(&sh
->lru
, &conf
->delayed_list
);
204 else if (test_bit(STRIPE_BIT_DELAY
, &sh
->state
) &&
205 sh
->bm_seq
- conf
->seq_write
> 0)
206 list_add_tail(&sh
->lru
, &conf
->bitmap_list
);
208 clear_bit(STRIPE_BIT_DELAY
, &sh
->state
);
209 list_add_tail(&sh
->lru
, &conf
->handle_list
);
211 md_wakeup_thread(conf
->mddev
->thread
);
213 BUG_ON(stripe_operations_active(sh
));
214 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
215 atomic_dec(&conf
->preread_active_stripes
);
216 if (atomic_read(&conf
->preread_active_stripes
) < IO_THRESHOLD
)
217 md_wakeup_thread(conf
->mddev
->thread
);
219 atomic_dec(&conf
->active_stripes
);
220 if (!test_bit(STRIPE_EXPANDING
, &sh
->state
)) {
221 list_add_tail(&sh
->lru
, &conf
->inactive_list
);
222 wake_up(&conf
->wait_for_stripe
);
223 if (conf
->retry_read_aligned
)
224 md_wakeup_thread(conf
->mddev
->thread
);
230 static void release_stripe(struct stripe_head
*sh
)
232 raid5_conf_t
*conf
= sh
->raid_conf
;
235 spin_lock_irqsave(&conf
->device_lock
, flags
);
236 __release_stripe(conf
, sh
);
237 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
240 static inline void remove_hash(struct stripe_head
*sh
)
242 pr_debug("remove_hash(), stripe %llu\n",
243 (unsigned long long)sh
->sector
);
245 hlist_del_init(&sh
->hash
);
248 static inline void insert_hash(raid5_conf_t
*conf
, struct stripe_head
*sh
)
250 struct hlist_head
*hp
= stripe_hash(conf
, sh
->sector
);
252 pr_debug("insert_hash(), stripe %llu\n",
253 (unsigned long long)sh
->sector
);
256 hlist_add_head(&sh
->hash
, hp
);
260 /* find an idle stripe, make sure it is unhashed, and return it. */
261 static struct stripe_head
*get_free_stripe(raid5_conf_t
*conf
)
263 struct stripe_head
*sh
= NULL
;
264 struct list_head
*first
;
267 if (list_empty(&conf
->inactive_list
))
269 first
= conf
->inactive_list
.next
;
270 sh
= list_entry(first
, struct stripe_head
, lru
);
271 list_del_init(first
);
273 atomic_inc(&conf
->active_stripes
);
278 static void shrink_buffers(struct stripe_head
*sh
)
282 int num
= sh
->raid_conf
->pool_size
;
284 for (i
= 0; i
< num
; i
++) {
288 sh
->dev
[i
].page
= NULL
;
293 static int grow_buffers(struct stripe_head
*sh
)
296 int num
= sh
->raid_conf
->pool_size
;
298 for (i
= 0; i
< num
; i
++) {
301 if (!(page
= alloc_page(GFP_KERNEL
))) {
304 sh
->dev
[i
].page
= page
;
309 static void raid5_build_block(struct stripe_head
*sh
, int i
, int previous
);
310 static void stripe_set_idx(sector_t stripe
, raid5_conf_t
*conf
, int previous
,
311 struct stripe_head
*sh
);
313 static void init_stripe(struct stripe_head
*sh
, sector_t sector
, int previous
)
315 raid5_conf_t
*conf
= sh
->raid_conf
;
318 BUG_ON(atomic_read(&sh
->count
) != 0);
319 BUG_ON(test_bit(STRIPE_HANDLE
, &sh
->state
));
320 BUG_ON(stripe_operations_active(sh
));
323 pr_debug("init_stripe called, stripe %llu\n",
324 (unsigned long long)sh
->sector
);
328 sh
->generation
= conf
->generation
- previous
;
329 sh
->disks
= previous
? conf
->previous_raid_disks
: conf
->raid_disks
;
331 stripe_set_idx(sector
, conf
, previous
, sh
);
335 for (i
= sh
->disks
; i
--; ) {
336 struct r5dev
*dev
= &sh
->dev
[i
];
338 if (dev
->toread
|| dev
->read
|| dev
->towrite
|| dev
->written
||
339 test_bit(R5_LOCKED
, &dev
->flags
)) {
340 printk(KERN_ERR
"sector=%llx i=%d %p %p %p %p %d\n",
341 (unsigned long long)sh
->sector
, i
, dev
->toread
,
342 dev
->read
, dev
->towrite
, dev
->written
,
343 test_bit(R5_LOCKED
, &dev
->flags
));
347 raid5_build_block(sh
, i
, previous
);
349 insert_hash(conf
, sh
);
352 static struct stripe_head
*__find_stripe(raid5_conf_t
*conf
, sector_t sector
,
355 struct stripe_head
*sh
;
356 struct hlist_node
*hn
;
359 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector
);
360 hlist_for_each_entry(sh
, hn
, stripe_hash(conf
, sector
), hash
)
361 if (sh
->sector
== sector
&& sh
->generation
== generation
)
363 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector
);
368 * Need to check if array has failed when deciding whether to:
370 * - remove non-faulty devices
373 * This determination is simple when no reshape is happening.
374 * However if there is a reshape, we need to carefully check
375 * both the before and after sections.
376 * This is because some failed devices may only affect one
377 * of the two sections, and some non-in_sync devices may
378 * be insync in the section most affected by failed devices.
380 static int has_failed(raid5_conf_t
*conf
)
384 if (conf
->mddev
->reshape_position
== MaxSector
)
385 return conf
->mddev
->degraded
> conf
->max_degraded
;
389 for (i
= 0; i
< conf
->previous_raid_disks
; i
++) {
390 mdk_rdev_t
*rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
391 if (!rdev
|| test_bit(Faulty
, &rdev
->flags
))
393 else if (test_bit(In_sync
, &rdev
->flags
))
396 /* not in-sync or faulty.
397 * If the reshape increases the number of devices,
398 * this is being recovered by the reshape, so
399 * this 'previous' section is not in_sync.
400 * If the number of devices is being reduced however,
401 * the device can only be part of the array if
402 * we are reverting a reshape, so this section will
405 if (conf
->raid_disks
>= conf
->previous_raid_disks
)
409 if (degraded
> conf
->max_degraded
)
413 for (i
= 0; i
< conf
->raid_disks
; i
++) {
414 mdk_rdev_t
*rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
415 if (!rdev
|| test_bit(Faulty
, &rdev
->flags
))
417 else if (test_bit(In_sync
, &rdev
->flags
))
420 /* not in-sync or faulty.
421 * If reshape increases the number of devices, this
422 * section has already been recovered, else it
423 * almost certainly hasn't.
425 if (conf
->raid_disks
<= conf
->previous_raid_disks
)
429 if (degraded
> conf
->max_degraded
)
434 static struct stripe_head
*
435 get_active_stripe(raid5_conf_t
*conf
, sector_t sector
,
436 int previous
, int noblock
, int noquiesce
)
438 struct stripe_head
*sh
;
440 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector
);
442 spin_lock_irq(&conf
->device_lock
);
445 wait_event_lock_irq(conf
->wait_for_stripe
,
446 conf
->quiesce
== 0 || noquiesce
,
447 conf
->device_lock
, /* nothing */);
448 sh
= __find_stripe(conf
, sector
, conf
->generation
- previous
);
450 if (!conf
->inactive_blocked
)
451 sh
= get_free_stripe(conf
);
452 if (noblock
&& sh
== NULL
)
455 conf
->inactive_blocked
= 1;
456 wait_event_lock_irq(conf
->wait_for_stripe
,
457 !list_empty(&conf
->inactive_list
) &&
458 (atomic_read(&conf
->active_stripes
)
459 < (conf
->max_nr_stripes
*3/4)
460 || !conf
->inactive_blocked
),
463 conf
->inactive_blocked
= 0;
465 init_stripe(sh
, sector
, previous
);
467 if (atomic_read(&sh
->count
)) {
468 BUG_ON(!list_empty(&sh
->lru
)
469 && !test_bit(STRIPE_EXPANDING
, &sh
->state
));
471 if (!test_bit(STRIPE_HANDLE
, &sh
->state
))
472 atomic_inc(&conf
->active_stripes
);
473 if (list_empty(&sh
->lru
) &&
474 !test_bit(STRIPE_EXPANDING
, &sh
->state
))
476 list_del_init(&sh
->lru
);
479 } while (sh
== NULL
);
482 atomic_inc(&sh
->count
);
484 spin_unlock_irq(&conf
->device_lock
);
489 raid5_end_read_request(struct bio
*bi
, int error
);
491 raid5_end_write_request(struct bio
*bi
, int error
);
493 static void ops_run_io(struct stripe_head
*sh
, struct stripe_head_state
*s
)
495 raid5_conf_t
*conf
= sh
->raid_conf
;
496 int i
, disks
= sh
->disks
;
500 for (i
= disks
; i
--; ) {
504 if (test_and_clear_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
)) {
505 if (test_and_clear_bit(R5_WantFUA
, &sh
->dev
[i
].flags
))
509 } else if (test_and_clear_bit(R5_Wantread
, &sh
->dev
[i
].flags
))
514 bi
= &sh
->dev
[i
].req
;
518 bi
->bi_end_io
= raid5_end_write_request
;
520 bi
->bi_end_io
= raid5_end_read_request
;
523 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
524 if (rdev
&& test_bit(Faulty
, &rdev
->flags
))
527 atomic_inc(&rdev
->nr_pending
);
531 if (s
->syncing
|| s
->expanding
|| s
->expanded
)
532 md_sync_acct(rdev
->bdev
, STRIPE_SECTORS
);
534 set_bit(STRIPE_IO_STARTED
, &sh
->state
);
536 bi
->bi_bdev
= rdev
->bdev
;
537 pr_debug("%s: for %llu schedule op %ld on disc %d\n",
538 __func__
, (unsigned long long)sh
->sector
,
540 atomic_inc(&sh
->count
);
541 bi
->bi_sector
= sh
->sector
+ rdev
->data_offset
;
542 bi
->bi_flags
= 1 << BIO_UPTODATE
;
546 bi
->bi_io_vec
= &sh
->dev
[i
].vec
;
547 bi
->bi_io_vec
[0].bv_len
= STRIPE_SIZE
;
548 bi
->bi_io_vec
[0].bv_offset
= 0;
549 bi
->bi_size
= STRIPE_SIZE
;
552 test_bit(R5_ReWrite
, &sh
->dev
[i
].flags
))
553 atomic_add(STRIPE_SECTORS
,
554 &rdev
->corrected_errors
);
555 generic_make_request(bi
);
558 set_bit(STRIPE_DEGRADED
, &sh
->state
);
559 pr_debug("skip op %ld on disc %d for sector %llu\n",
560 bi
->bi_rw
, i
, (unsigned long long)sh
->sector
);
561 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
562 set_bit(STRIPE_HANDLE
, &sh
->state
);
567 static struct dma_async_tx_descriptor
*
568 async_copy_data(int frombio
, struct bio
*bio
, struct page
*page
,
569 sector_t sector
, struct dma_async_tx_descriptor
*tx
)
572 struct page
*bio_page
;
575 struct async_submit_ctl submit
;
576 enum async_tx_flags flags
= 0;
578 if (bio
->bi_sector
>= sector
)
579 page_offset
= (signed)(bio
->bi_sector
- sector
) * 512;
581 page_offset
= (signed)(sector
- bio
->bi_sector
) * -512;
584 flags
|= ASYNC_TX_FENCE
;
585 init_async_submit(&submit
, flags
, tx
, NULL
, NULL
, NULL
);
587 bio_for_each_segment(bvl
, bio
, i
) {
588 int len
= bvl
->bv_len
;
592 if (page_offset
< 0) {
593 b_offset
= -page_offset
;
594 page_offset
+= b_offset
;
598 if (len
> 0 && page_offset
+ len
> STRIPE_SIZE
)
599 clen
= STRIPE_SIZE
- page_offset
;
604 b_offset
+= bvl
->bv_offset
;
605 bio_page
= bvl
->bv_page
;
607 tx
= async_memcpy(page
, bio_page
, page_offset
,
608 b_offset
, clen
, &submit
);
610 tx
= async_memcpy(bio_page
, page
, b_offset
,
611 page_offset
, clen
, &submit
);
613 /* chain the operations */
614 submit
.depend_tx
= tx
;
616 if (clen
< len
) /* hit end of page */
624 static void ops_complete_biofill(void *stripe_head_ref
)
626 struct stripe_head
*sh
= stripe_head_ref
;
627 struct bio
*return_bi
= NULL
;
628 raid5_conf_t
*conf
= sh
->raid_conf
;
631 pr_debug("%s: stripe %llu\n", __func__
,
632 (unsigned long long)sh
->sector
);
634 /* clear completed biofills */
635 spin_lock_irq(&conf
->device_lock
);
636 for (i
= sh
->disks
; i
--; ) {
637 struct r5dev
*dev
= &sh
->dev
[i
];
639 /* acknowledge completion of a biofill operation */
640 /* and check if we need to reply to a read request,
641 * new R5_Wantfill requests are held off until
642 * !STRIPE_BIOFILL_RUN
644 if (test_and_clear_bit(R5_Wantfill
, &dev
->flags
)) {
645 struct bio
*rbi
, *rbi2
;
650 while (rbi
&& rbi
->bi_sector
<
651 dev
->sector
+ STRIPE_SECTORS
) {
652 rbi2
= r5_next_bio(rbi
, dev
->sector
);
653 if (!raid5_dec_bi_phys_segments(rbi
)) {
654 rbi
->bi_next
= return_bi
;
661 spin_unlock_irq(&conf
->device_lock
);
662 clear_bit(STRIPE_BIOFILL_RUN
, &sh
->state
);
664 return_io(return_bi
);
666 set_bit(STRIPE_HANDLE
, &sh
->state
);
670 static void ops_run_biofill(struct stripe_head
*sh
)
672 struct dma_async_tx_descriptor
*tx
= NULL
;
673 raid5_conf_t
*conf
= sh
->raid_conf
;
674 struct async_submit_ctl submit
;
677 pr_debug("%s: stripe %llu\n", __func__
,
678 (unsigned long long)sh
->sector
);
680 for (i
= sh
->disks
; i
--; ) {
681 struct r5dev
*dev
= &sh
->dev
[i
];
682 if (test_bit(R5_Wantfill
, &dev
->flags
)) {
684 spin_lock_irq(&conf
->device_lock
);
685 dev
->read
= rbi
= dev
->toread
;
687 spin_unlock_irq(&conf
->device_lock
);
688 while (rbi
&& rbi
->bi_sector
<
689 dev
->sector
+ STRIPE_SECTORS
) {
690 tx
= async_copy_data(0, rbi
, dev
->page
,
692 rbi
= r5_next_bio(rbi
, dev
->sector
);
697 atomic_inc(&sh
->count
);
698 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, ops_complete_biofill
, sh
, NULL
);
699 async_trigger_callback(&submit
);
702 static void mark_target_uptodate(struct stripe_head
*sh
, int target
)
709 tgt
= &sh
->dev
[target
];
710 set_bit(R5_UPTODATE
, &tgt
->flags
);
711 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
712 clear_bit(R5_Wantcompute
, &tgt
->flags
);
715 static void ops_complete_compute(void *stripe_head_ref
)
717 struct stripe_head
*sh
= stripe_head_ref
;
719 pr_debug("%s: stripe %llu\n", __func__
,
720 (unsigned long long)sh
->sector
);
722 /* mark the computed target(s) as uptodate */
723 mark_target_uptodate(sh
, sh
->ops
.target
);
724 mark_target_uptodate(sh
, sh
->ops
.target2
);
726 clear_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
727 if (sh
->check_state
== check_state_compute_run
)
728 sh
->check_state
= check_state_compute_result
;
729 set_bit(STRIPE_HANDLE
, &sh
->state
);
733 /* return a pointer to the address conversion region of the scribble buffer */
734 static addr_conv_t
*to_addr_conv(struct stripe_head
*sh
,
735 struct raid5_percpu
*percpu
)
737 return percpu
->scribble
+ sizeof(struct page
*) * (sh
->disks
+ 2);
740 static struct dma_async_tx_descriptor
*
741 ops_run_compute5(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
743 int disks
= sh
->disks
;
744 struct page
**xor_srcs
= percpu
->scribble
;
745 int target
= sh
->ops
.target
;
746 struct r5dev
*tgt
= &sh
->dev
[target
];
747 struct page
*xor_dest
= tgt
->page
;
749 struct dma_async_tx_descriptor
*tx
;
750 struct async_submit_ctl submit
;
753 pr_debug("%s: stripe %llu block: %d\n",
754 __func__
, (unsigned long long)sh
->sector
, target
);
755 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
757 for (i
= disks
; i
--; )
759 xor_srcs
[count
++] = sh
->dev
[i
].page
;
761 atomic_inc(&sh
->count
);
763 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
, NULL
,
764 ops_complete_compute
, sh
, to_addr_conv(sh
, percpu
));
765 if (unlikely(count
== 1))
766 tx
= async_memcpy(xor_dest
, xor_srcs
[0], 0, 0, STRIPE_SIZE
, &submit
);
768 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
, &submit
);
773 /* set_syndrome_sources - populate source buffers for gen_syndrome
774 * @srcs - (struct page *) array of size sh->disks
775 * @sh - stripe_head to parse
777 * Populates srcs in proper layout order for the stripe and returns the
778 * 'count' of sources to be used in a call to async_gen_syndrome. The P
779 * destination buffer is recorded in srcs[count] and the Q destination
780 * is recorded in srcs[count+1]].
782 static int set_syndrome_sources(struct page
**srcs
, struct stripe_head
*sh
)
784 int disks
= sh
->disks
;
785 int syndrome_disks
= sh
->ddf_layout
? disks
: (disks
- 2);
786 int d0_idx
= raid6_d0(sh
);
790 for (i
= 0; i
< disks
; i
++)
796 int slot
= raid6_idx_to_slot(i
, sh
, &count
, syndrome_disks
);
798 srcs
[slot
] = sh
->dev
[i
].page
;
799 i
= raid6_next_disk(i
, disks
);
800 } while (i
!= d0_idx
);
802 return syndrome_disks
;
805 static struct dma_async_tx_descriptor
*
806 ops_run_compute6_1(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
808 int disks
= sh
->disks
;
809 struct page
**blocks
= percpu
->scribble
;
811 int qd_idx
= sh
->qd_idx
;
812 struct dma_async_tx_descriptor
*tx
;
813 struct async_submit_ctl submit
;
819 if (sh
->ops
.target
< 0)
820 target
= sh
->ops
.target2
;
821 else if (sh
->ops
.target2
< 0)
822 target
= sh
->ops
.target
;
824 /* we should only have one valid target */
827 pr_debug("%s: stripe %llu block: %d\n",
828 __func__
, (unsigned long long)sh
->sector
, target
);
830 tgt
= &sh
->dev
[target
];
831 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
834 atomic_inc(&sh
->count
);
836 if (target
== qd_idx
) {
837 count
= set_syndrome_sources(blocks
, sh
);
838 blocks
[count
] = NULL
; /* regenerating p is not necessary */
839 BUG_ON(blocks
[count
+1] != dest
); /* q should already be set */
840 init_async_submit(&submit
, ASYNC_TX_FENCE
, NULL
,
841 ops_complete_compute
, sh
,
842 to_addr_conv(sh
, percpu
));
843 tx
= async_gen_syndrome(blocks
, 0, count
+2, STRIPE_SIZE
, &submit
);
845 /* Compute any data- or p-drive using XOR */
847 for (i
= disks
; i
-- ; ) {
848 if (i
== target
|| i
== qd_idx
)
850 blocks
[count
++] = sh
->dev
[i
].page
;
853 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
,
854 NULL
, ops_complete_compute
, sh
,
855 to_addr_conv(sh
, percpu
));
856 tx
= async_xor(dest
, blocks
, 0, count
, STRIPE_SIZE
, &submit
);
862 static struct dma_async_tx_descriptor
*
863 ops_run_compute6_2(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
865 int i
, count
, disks
= sh
->disks
;
866 int syndrome_disks
= sh
->ddf_layout
? disks
: disks
-2;
867 int d0_idx
= raid6_d0(sh
);
868 int faila
= -1, failb
= -1;
869 int target
= sh
->ops
.target
;
870 int target2
= sh
->ops
.target2
;
871 struct r5dev
*tgt
= &sh
->dev
[target
];
872 struct r5dev
*tgt2
= &sh
->dev
[target2
];
873 struct dma_async_tx_descriptor
*tx
;
874 struct page
**blocks
= percpu
->scribble
;
875 struct async_submit_ctl submit
;
877 pr_debug("%s: stripe %llu block1: %d block2: %d\n",
878 __func__
, (unsigned long long)sh
->sector
, target
, target2
);
879 BUG_ON(target
< 0 || target2
< 0);
880 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
881 BUG_ON(!test_bit(R5_Wantcompute
, &tgt2
->flags
));
883 /* we need to open-code set_syndrome_sources to handle the
884 * slot number conversion for 'faila' and 'failb'
886 for (i
= 0; i
< disks
; i
++)
891 int slot
= raid6_idx_to_slot(i
, sh
, &count
, syndrome_disks
);
893 blocks
[slot
] = sh
->dev
[i
].page
;
899 i
= raid6_next_disk(i
, disks
);
900 } while (i
!= d0_idx
);
902 BUG_ON(faila
== failb
);
905 pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
906 __func__
, (unsigned long long)sh
->sector
, faila
, failb
);
908 atomic_inc(&sh
->count
);
910 if (failb
== syndrome_disks
+1) {
911 /* Q disk is one of the missing disks */
912 if (faila
== syndrome_disks
) {
913 /* Missing P+Q, just recompute */
914 init_async_submit(&submit
, ASYNC_TX_FENCE
, NULL
,
915 ops_complete_compute
, sh
,
916 to_addr_conv(sh
, percpu
));
917 return async_gen_syndrome(blocks
, 0, syndrome_disks
+2,
918 STRIPE_SIZE
, &submit
);
922 int qd_idx
= sh
->qd_idx
;
924 /* Missing D+Q: recompute D from P, then recompute Q */
925 if (target
== qd_idx
)
926 data_target
= target2
;
928 data_target
= target
;
931 for (i
= disks
; i
-- ; ) {
932 if (i
== data_target
|| i
== qd_idx
)
934 blocks
[count
++] = sh
->dev
[i
].page
;
936 dest
= sh
->dev
[data_target
].page
;
937 init_async_submit(&submit
,
938 ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
,
940 to_addr_conv(sh
, percpu
));
941 tx
= async_xor(dest
, blocks
, 0, count
, STRIPE_SIZE
,
944 count
= set_syndrome_sources(blocks
, sh
);
945 init_async_submit(&submit
, ASYNC_TX_FENCE
, tx
,
946 ops_complete_compute
, sh
,
947 to_addr_conv(sh
, percpu
));
948 return async_gen_syndrome(blocks
, 0, count
+2,
949 STRIPE_SIZE
, &submit
);
952 init_async_submit(&submit
, ASYNC_TX_FENCE
, NULL
,
953 ops_complete_compute
, sh
,
954 to_addr_conv(sh
, percpu
));
955 if (failb
== syndrome_disks
) {
956 /* We're missing D+P. */
957 return async_raid6_datap_recov(syndrome_disks
+2,
961 /* We're missing D+D. */
962 return async_raid6_2data_recov(syndrome_disks
+2,
963 STRIPE_SIZE
, faila
, failb
,
970 static void ops_complete_prexor(void *stripe_head_ref
)
972 struct stripe_head
*sh
= stripe_head_ref
;
974 pr_debug("%s: stripe %llu\n", __func__
,
975 (unsigned long long)sh
->sector
);
978 static struct dma_async_tx_descriptor
*
979 ops_run_prexor(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
980 struct dma_async_tx_descriptor
*tx
)
982 int disks
= sh
->disks
;
983 struct page
**xor_srcs
= percpu
->scribble
;
984 int count
= 0, pd_idx
= sh
->pd_idx
, i
;
985 struct async_submit_ctl submit
;
987 /* existing parity data subtracted */
988 struct page
*xor_dest
= xor_srcs
[count
++] = sh
->dev
[pd_idx
].page
;
990 pr_debug("%s: stripe %llu\n", __func__
,
991 (unsigned long long)sh
->sector
);
993 for (i
= disks
; i
--; ) {
994 struct r5dev
*dev
= &sh
->dev
[i
];
995 /* Only process blocks that are known to be uptodate */
996 if (test_bit(R5_Wantdrain
, &dev
->flags
))
997 xor_srcs
[count
++] = dev
->page
;
1000 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_DROP_DST
, tx
,
1001 ops_complete_prexor
, sh
, to_addr_conv(sh
, percpu
));
1002 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
, &submit
);
1007 static struct dma_async_tx_descriptor
*
1008 ops_run_biodrain(struct stripe_head
*sh
, struct dma_async_tx_descriptor
*tx
)
1010 int disks
= sh
->disks
;
1013 pr_debug("%s: stripe %llu\n", __func__
,
1014 (unsigned long long)sh
->sector
);
1016 for (i
= disks
; i
--; ) {
1017 struct r5dev
*dev
= &sh
->dev
[i
];
1020 if (test_and_clear_bit(R5_Wantdrain
, &dev
->flags
)) {
1023 spin_lock(&sh
->lock
);
1024 chosen
= dev
->towrite
;
1025 dev
->towrite
= NULL
;
1026 BUG_ON(dev
->written
);
1027 wbi
= dev
->written
= chosen
;
1028 spin_unlock(&sh
->lock
);
1030 while (wbi
&& wbi
->bi_sector
<
1031 dev
->sector
+ STRIPE_SECTORS
) {
1032 if (wbi
->bi_rw
& REQ_FUA
)
1033 set_bit(R5_WantFUA
, &dev
->flags
);
1034 tx
= async_copy_data(1, wbi
, dev
->page
,
1036 wbi
= r5_next_bio(wbi
, dev
->sector
);
1044 static void ops_complete_reconstruct(void *stripe_head_ref
)
1046 struct stripe_head
*sh
= stripe_head_ref
;
1047 int disks
= sh
->disks
;
1048 int pd_idx
= sh
->pd_idx
;
1049 int qd_idx
= sh
->qd_idx
;
1053 pr_debug("%s: stripe %llu\n", __func__
,
1054 (unsigned long long)sh
->sector
);
1056 for (i
= disks
; i
--; )
1057 fua
|= test_bit(R5_WantFUA
, &sh
->dev
[i
].flags
);
1059 for (i
= disks
; i
--; ) {
1060 struct r5dev
*dev
= &sh
->dev
[i
];
1062 if (dev
->written
|| i
== pd_idx
|| i
== qd_idx
) {
1063 set_bit(R5_UPTODATE
, &dev
->flags
);
1065 set_bit(R5_WantFUA
, &dev
->flags
);
1069 if (sh
->reconstruct_state
== reconstruct_state_drain_run
)
1070 sh
->reconstruct_state
= reconstruct_state_drain_result
;
1071 else if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_run
)
1072 sh
->reconstruct_state
= reconstruct_state_prexor_drain_result
;
1074 BUG_ON(sh
->reconstruct_state
!= reconstruct_state_run
);
1075 sh
->reconstruct_state
= reconstruct_state_result
;
1078 set_bit(STRIPE_HANDLE
, &sh
->state
);
1083 ops_run_reconstruct5(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
1084 struct dma_async_tx_descriptor
*tx
)
1086 int disks
= sh
->disks
;
1087 struct page
**xor_srcs
= percpu
->scribble
;
1088 struct async_submit_ctl submit
;
1089 int count
= 0, pd_idx
= sh
->pd_idx
, i
;
1090 struct page
*xor_dest
;
1092 unsigned long flags
;
1094 pr_debug("%s: stripe %llu\n", __func__
,
1095 (unsigned long long)sh
->sector
);
1097 /* check if prexor is active which means only process blocks
1098 * that are part of a read-modify-write (written)
1100 if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_run
) {
1102 xor_dest
= xor_srcs
[count
++] = sh
->dev
[pd_idx
].page
;
1103 for (i
= disks
; i
--; ) {
1104 struct r5dev
*dev
= &sh
->dev
[i
];
1106 xor_srcs
[count
++] = dev
->page
;
1109 xor_dest
= sh
->dev
[pd_idx
].page
;
1110 for (i
= disks
; i
--; ) {
1111 struct r5dev
*dev
= &sh
->dev
[i
];
1113 xor_srcs
[count
++] = dev
->page
;
1117 /* 1/ if we prexor'd then the dest is reused as a source
1118 * 2/ if we did not prexor then we are redoing the parity
1119 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
1120 * for the synchronous xor case
1122 flags
= ASYNC_TX_ACK
|
1123 (prexor
? ASYNC_TX_XOR_DROP_DST
: ASYNC_TX_XOR_ZERO_DST
);
1125 atomic_inc(&sh
->count
);
1127 init_async_submit(&submit
, flags
, tx
, ops_complete_reconstruct
, sh
,
1128 to_addr_conv(sh
, percpu
));
1129 if (unlikely(count
== 1))
1130 tx
= async_memcpy(xor_dest
, xor_srcs
[0], 0, 0, STRIPE_SIZE
, &submit
);
1132 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
, &submit
);
1136 ops_run_reconstruct6(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
1137 struct dma_async_tx_descriptor
*tx
)
1139 struct async_submit_ctl submit
;
1140 struct page
**blocks
= percpu
->scribble
;
1143 pr_debug("%s: stripe %llu\n", __func__
, (unsigned long long)sh
->sector
);
1145 count
= set_syndrome_sources(blocks
, sh
);
1147 atomic_inc(&sh
->count
);
1149 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, ops_complete_reconstruct
,
1150 sh
, to_addr_conv(sh
, percpu
));
1151 async_gen_syndrome(blocks
, 0, count
+2, STRIPE_SIZE
, &submit
);
1154 static void ops_complete_check(void *stripe_head_ref
)
1156 struct stripe_head
*sh
= stripe_head_ref
;
1158 pr_debug("%s: stripe %llu\n", __func__
,
1159 (unsigned long long)sh
->sector
);
1161 sh
->check_state
= check_state_check_result
;
1162 set_bit(STRIPE_HANDLE
, &sh
->state
);
1166 static void ops_run_check_p(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
1168 int disks
= sh
->disks
;
1169 int pd_idx
= sh
->pd_idx
;
1170 int qd_idx
= sh
->qd_idx
;
1171 struct page
*xor_dest
;
1172 struct page
**xor_srcs
= percpu
->scribble
;
1173 struct dma_async_tx_descriptor
*tx
;
1174 struct async_submit_ctl submit
;
1178 pr_debug("%s: stripe %llu\n", __func__
,
1179 (unsigned long long)sh
->sector
);
1182 xor_dest
= sh
->dev
[pd_idx
].page
;
1183 xor_srcs
[count
++] = xor_dest
;
1184 for (i
= disks
; i
--; ) {
1185 if (i
== pd_idx
|| i
== qd_idx
)
1187 xor_srcs
[count
++] = sh
->dev
[i
].page
;
1190 init_async_submit(&submit
, 0, NULL
, NULL
, NULL
,
1191 to_addr_conv(sh
, percpu
));
1192 tx
= async_xor_val(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
,
1193 &sh
->ops
.zero_sum_result
, &submit
);
1195 atomic_inc(&sh
->count
);
1196 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, ops_complete_check
, sh
, NULL
);
1197 tx
= async_trigger_callback(&submit
);
1200 static void ops_run_check_pq(struct stripe_head
*sh
, struct raid5_percpu
*percpu
, int checkp
)
1202 struct page
**srcs
= percpu
->scribble
;
1203 struct async_submit_ctl submit
;
1206 pr_debug("%s: stripe %llu checkp: %d\n", __func__
,
1207 (unsigned long long)sh
->sector
, checkp
);
1209 count
= set_syndrome_sources(srcs
, sh
);
1213 atomic_inc(&sh
->count
);
1214 init_async_submit(&submit
, ASYNC_TX_ACK
, NULL
, ops_complete_check
,
1215 sh
, to_addr_conv(sh
, percpu
));
1216 async_syndrome_val(srcs
, 0, count
+2, STRIPE_SIZE
,
1217 &sh
->ops
.zero_sum_result
, percpu
->spare_page
, &submit
);
1220 static void __raid_run_ops(struct stripe_head
*sh
, unsigned long ops_request
)
1222 int overlap_clear
= 0, i
, disks
= sh
->disks
;
1223 struct dma_async_tx_descriptor
*tx
= NULL
;
1224 raid5_conf_t
*conf
= sh
->raid_conf
;
1225 int level
= conf
->level
;
1226 struct raid5_percpu
*percpu
;
1230 percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
1231 if (test_bit(STRIPE_OP_BIOFILL
, &ops_request
)) {
1232 ops_run_biofill(sh
);
1236 if (test_bit(STRIPE_OP_COMPUTE_BLK
, &ops_request
)) {
1238 tx
= ops_run_compute5(sh
, percpu
);
1240 if (sh
->ops
.target2
< 0 || sh
->ops
.target
< 0)
1241 tx
= ops_run_compute6_1(sh
, percpu
);
1243 tx
= ops_run_compute6_2(sh
, percpu
);
1245 /* terminate the chain if reconstruct is not set to be run */
1246 if (tx
&& !test_bit(STRIPE_OP_RECONSTRUCT
, &ops_request
))
1250 if (test_bit(STRIPE_OP_PREXOR
, &ops_request
))
1251 tx
= ops_run_prexor(sh
, percpu
, tx
);
1253 if (test_bit(STRIPE_OP_BIODRAIN
, &ops_request
)) {
1254 tx
= ops_run_biodrain(sh
, tx
);
1258 if (test_bit(STRIPE_OP_RECONSTRUCT
, &ops_request
)) {
1260 ops_run_reconstruct5(sh
, percpu
, tx
);
1262 ops_run_reconstruct6(sh
, percpu
, tx
);
1265 if (test_bit(STRIPE_OP_CHECK
, &ops_request
)) {
1266 if (sh
->check_state
== check_state_run
)
1267 ops_run_check_p(sh
, percpu
);
1268 else if (sh
->check_state
== check_state_run_q
)
1269 ops_run_check_pq(sh
, percpu
, 0);
1270 else if (sh
->check_state
== check_state_run_pq
)
1271 ops_run_check_pq(sh
, percpu
, 1);
1277 for (i
= disks
; i
--; ) {
1278 struct r5dev
*dev
= &sh
->dev
[i
];
1279 if (test_and_clear_bit(R5_Overlap
, &dev
->flags
))
1280 wake_up(&sh
->raid_conf
->wait_for_overlap
);
1285 #ifdef CONFIG_MULTICORE_RAID456
1286 static void async_run_ops(void *param
, async_cookie_t cookie
)
1288 struct stripe_head
*sh
= param
;
1289 unsigned long ops_request
= sh
->ops
.request
;
1291 clear_bit_unlock(STRIPE_OPS_REQ_PENDING
, &sh
->state
);
1292 wake_up(&sh
->ops
.wait_for_ops
);
1294 __raid_run_ops(sh
, ops_request
);
1298 static void raid_run_ops(struct stripe_head
*sh
, unsigned long ops_request
)
1300 /* since handle_stripe can be called outside of raid5d context
1301 * we need to ensure sh->ops.request is de-staged before another
1304 wait_event(sh
->ops
.wait_for_ops
,
1305 !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING
, &sh
->state
));
1306 sh
->ops
.request
= ops_request
;
1308 atomic_inc(&sh
->count
);
1309 async_schedule(async_run_ops
, sh
);
1312 #define raid_run_ops __raid_run_ops
1315 static int grow_one_stripe(raid5_conf_t
*conf
)
1317 struct stripe_head
*sh
;
1318 sh
= kmem_cache_alloc(conf
->slab_cache
, GFP_KERNEL
);
1321 memset(sh
, 0, sizeof(*sh
) + (conf
->pool_size
-1)*sizeof(struct r5dev
));
1322 sh
->raid_conf
= conf
;
1323 spin_lock_init(&sh
->lock
);
1324 #ifdef CONFIG_MULTICORE_RAID456
1325 init_waitqueue_head(&sh
->ops
.wait_for_ops
);
1328 if (grow_buffers(sh
)) {
1330 kmem_cache_free(conf
->slab_cache
, sh
);
1333 /* we just created an active stripe so... */
1334 atomic_set(&sh
->count
, 1);
1335 atomic_inc(&conf
->active_stripes
);
1336 INIT_LIST_HEAD(&sh
->lru
);
1341 static int grow_stripes(raid5_conf_t
*conf
, int num
)
1343 struct kmem_cache
*sc
;
1344 int devs
= max(conf
->raid_disks
, conf
->previous_raid_disks
);
1346 if (conf
->mddev
->gendisk
)
1347 sprintf(conf
->cache_name
[0],
1348 "raid%d-%s", conf
->level
, mdname(conf
->mddev
));
1350 sprintf(conf
->cache_name
[0],
1351 "raid%d-%p", conf
->level
, conf
->mddev
);
1352 sprintf(conf
->cache_name
[1], "%s-alt", conf
->cache_name
[0]);
1354 conf
->active_name
= 0;
1355 sc
= kmem_cache_create(conf
->cache_name
[conf
->active_name
],
1356 sizeof(struct stripe_head
)+(devs
-1)*sizeof(struct r5dev
),
1360 conf
->slab_cache
= sc
;
1361 conf
->pool_size
= devs
;
1363 if (!grow_one_stripe(conf
))
1369 * scribble_len - return the required size of the scribble region
1370 * @num - total number of disks in the array
1372 * The size must be enough to contain:
1373 * 1/ a struct page pointer for each device in the array +2
1374 * 2/ room to convert each entry in (1) to its corresponding dma
1375 * (dma_map_page()) or page (page_address()) address.
1377 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
1378 * calculate over all devices (not just the data blocks), using zeros in place
1379 * of the P and Q blocks.
1381 static size_t scribble_len(int num
)
1385 len
= sizeof(struct page
*) * (num
+2) + sizeof(addr_conv_t
) * (num
+2);
1390 static int resize_stripes(raid5_conf_t
*conf
, int newsize
)
1392 /* Make all the stripes able to hold 'newsize' devices.
1393 * New slots in each stripe get 'page' set to a new page.
1395 * This happens in stages:
1396 * 1/ create a new kmem_cache and allocate the required number of
1398 * 2/ gather all the old stripe_heads and tranfer the pages across
1399 * to the new stripe_heads. This will have the side effect of
1400 * freezing the array as once all stripe_heads have been collected,
1401 * no IO will be possible. Old stripe heads are freed once their
1402 * pages have been transferred over, and the old kmem_cache is
1403 * freed when all stripes are done.
1404 * 3/ reallocate conf->disks to be suitable bigger. If this fails,
1405 * we simple return a failre status - no need to clean anything up.
1406 * 4/ allocate new pages for the new slots in the new stripe_heads.
1407 * If this fails, we don't bother trying the shrink the
1408 * stripe_heads down again, we just leave them as they are.
1409 * As each stripe_head is processed the new one is released into
1412 * Once step2 is started, we cannot afford to wait for a write,
1413 * so we use GFP_NOIO allocations.
1415 struct stripe_head
*osh
, *nsh
;
1416 LIST_HEAD(newstripes
);
1417 struct disk_info
*ndisks
;
1420 struct kmem_cache
*sc
;
1423 if (newsize
<= conf
->pool_size
)
1424 return 0; /* never bother to shrink */
1426 err
= md_allow_write(conf
->mddev
);
1431 sc
= kmem_cache_create(conf
->cache_name
[1-conf
->active_name
],
1432 sizeof(struct stripe_head
)+(newsize
-1)*sizeof(struct r5dev
),
1437 for (i
= conf
->max_nr_stripes
; i
; i
--) {
1438 nsh
= kmem_cache_alloc(sc
, GFP_KERNEL
);
1442 memset(nsh
, 0, sizeof(*nsh
) + (newsize
-1)*sizeof(struct r5dev
));
1444 nsh
->raid_conf
= conf
;
1445 spin_lock_init(&nsh
->lock
);
1446 #ifdef CONFIG_MULTICORE_RAID456
1447 init_waitqueue_head(&nsh
->ops
.wait_for_ops
);
1450 list_add(&nsh
->lru
, &newstripes
);
1453 /* didn't get enough, give up */
1454 while (!list_empty(&newstripes
)) {
1455 nsh
= list_entry(newstripes
.next
, struct stripe_head
, lru
);
1456 list_del(&nsh
->lru
);
1457 kmem_cache_free(sc
, nsh
);
1459 kmem_cache_destroy(sc
);
1462 /* Step 2 - Must use GFP_NOIO now.
1463 * OK, we have enough stripes, start collecting inactive
1464 * stripes and copying them over
1466 list_for_each_entry(nsh
, &newstripes
, lru
) {
1467 spin_lock_irq(&conf
->device_lock
);
1468 wait_event_lock_irq(conf
->wait_for_stripe
,
1469 !list_empty(&conf
->inactive_list
),
1472 osh
= get_free_stripe(conf
);
1473 spin_unlock_irq(&conf
->device_lock
);
1474 atomic_set(&nsh
->count
, 1);
1475 for(i
=0; i
<conf
->pool_size
; i
++)
1476 nsh
->dev
[i
].page
= osh
->dev
[i
].page
;
1477 for( ; i
<newsize
; i
++)
1478 nsh
->dev
[i
].page
= NULL
;
1479 kmem_cache_free(conf
->slab_cache
, osh
);
1481 kmem_cache_destroy(conf
->slab_cache
);
1484 * At this point, we are holding all the stripes so the array
1485 * is completely stalled, so now is a good time to resize
1486 * conf->disks and the scribble region
1488 ndisks
= kzalloc(newsize
* sizeof(struct disk_info
), GFP_NOIO
);
1490 for (i
=0; i
<conf
->raid_disks
; i
++)
1491 ndisks
[i
] = conf
->disks
[i
];
1493 conf
->disks
= ndisks
;
1498 conf
->scribble_len
= scribble_len(newsize
);
1499 for_each_present_cpu(cpu
) {
1500 struct raid5_percpu
*percpu
;
1503 percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
1504 scribble
= kmalloc(conf
->scribble_len
, GFP_NOIO
);
1507 kfree(percpu
->scribble
);
1508 percpu
->scribble
= scribble
;
1516 /* Step 4, return new stripes to service */
1517 while(!list_empty(&newstripes
)) {
1518 nsh
= list_entry(newstripes
.next
, struct stripe_head
, lru
);
1519 list_del_init(&nsh
->lru
);
1521 for (i
=conf
->raid_disks
; i
< newsize
; i
++)
1522 if (nsh
->dev
[i
].page
== NULL
) {
1523 struct page
*p
= alloc_page(GFP_NOIO
);
1524 nsh
->dev
[i
].page
= p
;
1528 release_stripe(nsh
);
1530 /* critical section pass, GFP_NOIO no longer needed */
1532 conf
->slab_cache
= sc
;
1533 conf
->active_name
= 1-conf
->active_name
;
1534 conf
->pool_size
= newsize
;
1538 static int drop_one_stripe(raid5_conf_t
*conf
)
1540 struct stripe_head
*sh
;
1542 spin_lock_irq(&conf
->device_lock
);
1543 sh
= get_free_stripe(conf
);
1544 spin_unlock_irq(&conf
->device_lock
);
1547 BUG_ON(atomic_read(&sh
->count
));
1549 kmem_cache_free(conf
->slab_cache
, sh
);
1550 atomic_dec(&conf
->active_stripes
);
1554 static void shrink_stripes(raid5_conf_t
*conf
)
1556 while (drop_one_stripe(conf
))
1559 if (conf
->slab_cache
)
1560 kmem_cache_destroy(conf
->slab_cache
);
1561 conf
->slab_cache
= NULL
;
1564 static void raid5_end_read_request(struct bio
* bi
, int error
)
1566 struct stripe_head
*sh
= bi
->bi_private
;
1567 raid5_conf_t
*conf
= sh
->raid_conf
;
1568 int disks
= sh
->disks
, i
;
1569 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
1570 char b
[BDEVNAME_SIZE
];
1574 for (i
=0 ; i
<disks
; i
++)
1575 if (bi
== &sh
->dev
[i
].req
)
1578 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
1579 (unsigned long long)sh
->sector
, i
, atomic_read(&sh
->count
),
1587 set_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
);
1588 if (test_bit(R5_ReadError
, &sh
->dev
[i
].flags
)) {
1589 rdev
= conf
->disks
[i
].rdev
;
1590 printk_rl(KERN_INFO
"md/raid:%s: read error corrected"
1591 " (%lu sectors at %llu on %s)\n",
1592 mdname(conf
->mddev
), STRIPE_SECTORS
,
1593 (unsigned long long)(sh
->sector
1594 + rdev
->data_offset
),
1595 bdevname(rdev
->bdev
, b
));
1596 clear_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
1597 clear_bit(R5_ReWrite
, &sh
->dev
[i
].flags
);
1599 if (atomic_read(&conf
->disks
[i
].rdev
->read_errors
))
1600 atomic_set(&conf
->disks
[i
].rdev
->read_errors
, 0);
1602 const char *bdn
= bdevname(conf
->disks
[i
].rdev
->bdev
, b
);
1604 rdev
= conf
->disks
[i
].rdev
;
1606 clear_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
);
1607 atomic_inc(&rdev
->read_errors
);
1608 if (conf
->mddev
->degraded
>= conf
->max_degraded
)
1609 printk_rl(KERN_WARNING
1610 "md/raid:%s: read error not correctable "
1611 "(sector %llu on %s).\n",
1612 mdname(conf
->mddev
),
1613 (unsigned long long)(sh
->sector
1614 + rdev
->data_offset
),
1616 else if (test_bit(R5_ReWrite
, &sh
->dev
[i
].flags
))
1618 printk_rl(KERN_WARNING
1619 "md/raid:%s: read error NOT corrected!! "
1620 "(sector %llu on %s).\n",
1621 mdname(conf
->mddev
),
1622 (unsigned long long)(sh
->sector
1623 + rdev
->data_offset
),
1625 else if (atomic_read(&rdev
->read_errors
)
1626 > conf
->max_nr_stripes
)
1628 "md/raid:%s: Too many read errors, failing device %s.\n",
1629 mdname(conf
->mddev
), bdn
);
1633 set_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
1635 clear_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
1636 clear_bit(R5_ReWrite
, &sh
->dev
[i
].flags
);
1637 md_error(conf
->mddev
, rdev
);
1640 rdev_dec_pending(conf
->disks
[i
].rdev
, conf
->mddev
);
1641 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
1642 set_bit(STRIPE_HANDLE
, &sh
->state
);
1646 static void raid5_end_write_request(struct bio
*bi
, int error
)
1648 struct stripe_head
*sh
= bi
->bi_private
;
1649 raid5_conf_t
*conf
= sh
->raid_conf
;
1650 int disks
= sh
->disks
, i
;
1651 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
1653 for (i
=0 ; i
<disks
; i
++)
1654 if (bi
== &sh
->dev
[i
].req
)
1657 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
1658 (unsigned long long)sh
->sector
, i
, atomic_read(&sh
->count
),
1666 md_error(conf
->mddev
, conf
->disks
[i
].rdev
);
1668 rdev_dec_pending(conf
->disks
[i
].rdev
, conf
->mddev
);
1670 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
1671 set_bit(STRIPE_HANDLE
, &sh
->state
);
1676 static sector_t
compute_blocknr(struct stripe_head
*sh
, int i
, int previous
);
1678 static void raid5_build_block(struct stripe_head
*sh
, int i
, int previous
)
1680 struct r5dev
*dev
= &sh
->dev
[i
];
1682 bio_init(&dev
->req
);
1683 dev
->req
.bi_io_vec
= &dev
->vec
;
1685 dev
->req
.bi_max_vecs
++;
1686 dev
->vec
.bv_page
= dev
->page
;
1687 dev
->vec
.bv_len
= STRIPE_SIZE
;
1688 dev
->vec
.bv_offset
= 0;
1690 dev
->req
.bi_sector
= sh
->sector
;
1691 dev
->req
.bi_private
= sh
;
1694 dev
->sector
= compute_blocknr(sh
, i
, previous
);
1697 static void error(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1699 char b
[BDEVNAME_SIZE
];
1700 raid5_conf_t
*conf
= mddev
->private;
1701 pr_debug("raid456: error called\n");
1703 if (test_and_clear_bit(In_sync
, &rdev
->flags
)) {
1704 unsigned long flags
;
1705 spin_lock_irqsave(&conf
->device_lock
, flags
);
1707 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1709 * if recovery was running, make sure it aborts.
1711 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
1713 set_bit(Faulty
, &rdev
->flags
);
1714 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
1716 "md/raid:%s: Disk failure on %s, disabling device.\n"
1717 "md/raid:%s: Operation continuing on %d devices.\n",
1719 bdevname(rdev
->bdev
, b
),
1721 conf
->raid_disks
- mddev
->degraded
);
1725 * Input: a 'big' sector number,
1726 * Output: index of the data and parity disk, and the sector # in them.
1728 static sector_t
raid5_compute_sector(raid5_conf_t
*conf
, sector_t r_sector
,
1729 int previous
, int *dd_idx
,
1730 struct stripe_head
*sh
)
1732 sector_t stripe
, stripe2
;
1733 sector_t chunk_number
;
1734 unsigned int chunk_offset
;
1737 sector_t new_sector
;
1738 int algorithm
= previous
? conf
->prev_algo
1740 int sectors_per_chunk
= previous
? conf
->prev_chunk_sectors
1741 : conf
->chunk_sectors
;
1742 int raid_disks
= previous
? conf
->previous_raid_disks
1744 int data_disks
= raid_disks
- conf
->max_degraded
;
1746 /* First compute the information on this sector */
1749 * Compute the chunk number and the sector offset inside the chunk
1751 chunk_offset
= sector_div(r_sector
, sectors_per_chunk
);
1752 chunk_number
= r_sector
;
1755 * Compute the stripe number
1757 stripe
= chunk_number
;
1758 *dd_idx
= sector_div(stripe
, data_disks
);
1761 * Select the parity disk based on the user selected algorithm.
1763 pd_idx
= qd_idx
= ~0;
1764 switch(conf
->level
) {
1766 pd_idx
= data_disks
;
1769 switch (algorithm
) {
1770 case ALGORITHM_LEFT_ASYMMETRIC
:
1771 pd_idx
= data_disks
- sector_div(stripe2
, raid_disks
);
1772 if (*dd_idx
>= pd_idx
)
1775 case ALGORITHM_RIGHT_ASYMMETRIC
:
1776 pd_idx
= sector_div(stripe2
, raid_disks
);
1777 if (*dd_idx
>= pd_idx
)
1780 case ALGORITHM_LEFT_SYMMETRIC
:
1781 pd_idx
= data_disks
- sector_div(stripe2
, raid_disks
);
1782 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
1784 case ALGORITHM_RIGHT_SYMMETRIC
:
1785 pd_idx
= sector_div(stripe2
, raid_disks
);
1786 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
1788 case ALGORITHM_PARITY_0
:
1792 case ALGORITHM_PARITY_N
:
1793 pd_idx
= data_disks
;
1801 switch (algorithm
) {
1802 case ALGORITHM_LEFT_ASYMMETRIC
:
1803 pd_idx
= raid_disks
- 1 - sector_div(stripe2
, raid_disks
);
1804 qd_idx
= pd_idx
+ 1;
1805 if (pd_idx
== raid_disks
-1) {
1806 (*dd_idx
)++; /* Q D D D P */
1808 } else if (*dd_idx
>= pd_idx
)
1809 (*dd_idx
) += 2; /* D D P Q D */
1811 case ALGORITHM_RIGHT_ASYMMETRIC
:
1812 pd_idx
= sector_div(stripe2
, raid_disks
);
1813 qd_idx
= pd_idx
+ 1;
1814 if (pd_idx
== raid_disks
-1) {
1815 (*dd_idx
)++; /* Q D D D P */
1817 } else if (*dd_idx
>= pd_idx
)
1818 (*dd_idx
) += 2; /* D D P Q D */
1820 case ALGORITHM_LEFT_SYMMETRIC
:
1821 pd_idx
= raid_disks
- 1 - sector_div(stripe2
, raid_disks
);
1822 qd_idx
= (pd_idx
+ 1) % raid_disks
;
1823 *dd_idx
= (pd_idx
+ 2 + *dd_idx
) % raid_disks
;
1825 case ALGORITHM_RIGHT_SYMMETRIC
:
1826 pd_idx
= sector_div(stripe2
, raid_disks
);
1827 qd_idx
= (pd_idx
+ 1) % raid_disks
;
1828 *dd_idx
= (pd_idx
+ 2 + *dd_idx
) % raid_disks
;
1831 case ALGORITHM_PARITY_0
:
1836 case ALGORITHM_PARITY_N
:
1837 pd_idx
= data_disks
;
1838 qd_idx
= data_disks
+ 1;
1841 case ALGORITHM_ROTATING_ZERO_RESTART
:
1842 /* Exactly the same as RIGHT_ASYMMETRIC, but or
1843 * of blocks for computing Q is different.
1845 pd_idx
= sector_div(stripe2
, raid_disks
);
1846 qd_idx
= pd_idx
+ 1;
1847 if (pd_idx
== raid_disks
-1) {
1848 (*dd_idx
)++; /* Q D D D P */
1850 } else if (*dd_idx
>= pd_idx
)
1851 (*dd_idx
) += 2; /* D D P Q D */
1855 case ALGORITHM_ROTATING_N_RESTART
:
1856 /* Same a left_asymmetric, by first stripe is
1857 * D D D P Q rather than
1861 pd_idx
= raid_disks
- 1 - sector_div(stripe2
, raid_disks
);
1862 qd_idx
= pd_idx
+ 1;
1863 if (pd_idx
== raid_disks
-1) {
1864 (*dd_idx
)++; /* Q D D D P */
1866 } else if (*dd_idx
>= pd_idx
)
1867 (*dd_idx
) += 2; /* D D P Q D */
1871 case ALGORITHM_ROTATING_N_CONTINUE
:
1872 /* Same as left_symmetric but Q is before P */
1873 pd_idx
= raid_disks
- 1 - sector_div(stripe2
, raid_disks
);
1874 qd_idx
= (pd_idx
+ raid_disks
- 1) % raid_disks
;
1875 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
1879 case ALGORITHM_LEFT_ASYMMETRIC_6
:
1880 /* RAID5 left_asymmetric, with Q on last device */
1881 pd_idx
= data_disks
- sector_div(stripe2
, raid_disks
-1);
1882 if (*dd_idx
>= pd_idx
)
1884 qd_idx
= raid_disks
- 1;
1887 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
1888 pd_idx
= sector_div(stripe2
, raid_disks
-1);
1889 if (*dd_idx
>= pd_idx
)
1891 qd_idx
= raid_disks
- 1;
1894 case ALGORITHM_LEFT_SYMMETRIC_6
:
1895 pd_idx
= data_disks
- sector_div(stripe2
, raid_disks
-1);
1896 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % (raid_disks
-1);
1897 qd_idx
= raid_disks
- 1;
1900 case ALGORITHM_RIGHT_SYMMETRIC_6
:
1901 pd_idx
= sector_div(stripe2
, raid_disks
-1);
1902 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % (raid_disks
-1);
1903 qd_idx
= raid_disks
- 1;
1906 case ALGORITHM_PARITY_0_6
:
1909 qd_idx
= raid_disks
- 1;
1919 sh
->pd_idx
= pd_idx
;
1920 sh
->qd_idx
= qd_idx
;
1921 sh
->ddf_layout
= ddf_layout
;
1924 * Finally, compute the new sector number
1926 new_sector
= (sector_t
)stripe
* sectors_per_chunk
+ chunk_offset
;
1931 static sector_t
compute_blocknr(struct stripe_head
*sh
, int i
, int previous
)
1933 raid5_conf_t
*conf
= sh
->raid_conf
;
1934 int raid_disks
= sh
->disks
;
1935 int data_disks
= raid_disks
- conf
->max_degraded
;
1936 sector_t new_sector
= sh
->sector
, check
;
1937 int sectors_per_chunk
= previous
? conf
->prev_chunk_sectors
1938 : conf
->chunk_sectors
;
1939 int algorithm
= previous
? conf
->prev_algo
1943 sector_t chunk_number
;
1944 int dummy1
, dd_idx
= i
;
1946 struct stripe_head sh2
;
1949 chunk_offset
= sector_div(new_sector
, sectors_per_chunk
);
1950 stripe
= new_sector
;
1952 if (i
== sh
->pd_idx
)
1954 switch(conf
->level
) {
1957 switch (algorithm
) {
1958 case ALGORITHM_LEFT_ASYMMETRIC
:
1959 case ALGORITHM_RIGHT_ASYMMETRIC
:
1963 case ALGORITHM_LEFT_SYMMETRIC
:
1964 case ALGORITHM_RIGHT_SYMMETRIC
:
1967 i
-= (sh
->pd_idx
+ 1);
1969 case ALGORITHM_PARITY_0
:
1972 case ALGORITHM_PARITY_N
:
1979 if (i
== sh
->qd_idx
)
1980 return 0; /* It is the Q disk */
1981 switch (algorithm
) {
1982 case ALGORITHM_LEFT_ASYMMETRIC
:
1983 case ALGORITHM_RIGHT_ASYMMETRIC
:
1984 case ALGORITHM_ROTATING_ZERO_RESTART
:
1985 case ALGORITHM_ROTATING_N_RESTART
:
1986 if (sh
->pd_idx
== raid_disks
-1)
1987 i
--; /* Q D D D P */
1988 else if (i
> sh
->pd_idx
)
1989 i
-= 2; /* D D P Q D */
1991 case ALGORITHM_LEFT_SYMMETRIC
:
1992 case ALGORITHM_RIGHT_SYMMETRIC
:
1993 if (sh
->pd_idx
== raid_disks
-1)
1994 i
--; /* Q D D D P */
1999 i
-= (sh
->pd_idx
+ 2);
2002 case ALGORITHM_PARITY_0
:
2005 case ALGORITHM_PARITY_N
:
2007 case ALGORITHM_ROTATING_N_CONTINUE
:
2008 /* Like left_symmetric, but P is before Q */
2009 if (sh
->pd_idx
== 0)
2010 i
--; /* P D D D Q */
2015 i
-= (sh
->pd_idx
+ 1);
2018 case ALGORITHM_LEFT_ASYMMETRIC_6
:
2019 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
2023 case ALGORITHM_LEFT_SYMMETRIC_6
:
2024 case ALGORITHM_RIGHT_SYMMETRIC_6
:
2026 i
+= data_disks
+ 1;
2027 i
-= (sh
->pd_idx
+ 1);
2029 case ALGORITHM_PARITY_0_6
:
2038 chunk_number
= stripe
* data_disks
+ i
;
2039 r_sector
= chunk_number
* sectors_per_chunk
+ chunk_offset
;
2041 check
= raid5_compute_sector(conf
, r_sector
,
2042 previous
, &dummy1
, &sh2
);
2043 if (check
!= sh
->sector
|| dummy1
!= dd_idx
|| sh2
.pd_idx
!= sh
->pd_idx
2044 || sh2
.qd_idx
!= sh
->qd_idx
) {
2045 printk(KERN_ERR
"md/raid:%s: compute_blocknr: map not correct\n",
2046 mdname(conf
->mddev
));
2054 schedule_reconstruction(struct stripe_head
*sh
, struct stripe_head_state
*s
,
2055 int rcw
, int expand
)
2057 int i
, pd_idx
= sh
->pd_idx
, disks
= sh
->disks
;
2058 raid5_conf_t
*conf
= sh
->raid_conf
;
2059 int level
= conf
->level
;
2062 /* if we are not expanding this is a proper write request, and
2063 * there will be bios with new data to be drained into the
2067 sh
->reconstruct_state
= reconstruct_state_drain_run
;
2068 set_bit(STRIPE_OP_BIODRAIN
, &s
->ops_request
);
2070 sh
->reconstruct_state
= reconstruct_state_run
;
2072 set_bit(STRIPE_OP_RECONSTRUCT
, &s
->ops_request
);
2074 for (i
= disks
; i
--; ) {
2075 struct r5dev
*dev
= &sh
->dev
[i
];
2078 set_bit(R5_LOCKED
, &dev
->flags
);
2079 set_bit(R5_Wantdrain
, &dev
->flags
);
2081 clear_bit(R5_UPTODATE
, &dev
->flags
);
2085 if (s
->locked
+ conf
->max_degraded
== disks
)
2086 if (!test_and_set_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2087 atomic_inc(&conf
->pending_full_writes
);
2090 BUG_ON(!(test_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
) ||
2091 test_bit(R5_Wantcompute
, &sh
->dev
[pd_idx
].flags
)));
2093 sh
->reconstruct_state
= reconstruct_state_prexor_drain_run
;
2094 set_bit(STRIPE_OP_PREXOR
, &s
->ops_request
);
2095 set_bit(STRIPE_OP_BIODRAIN
, &s
->ops_request
);
2096 set_bit(STRIPE_OP_RECONSTRUCT
, &s
->ops_request
);
2098 for (i
= disks
; i
--; ) {
2099 struct r5dev
*dev
= &sh
->dev
[i
];
2104 (test_bit(R5_UPTODATE
, &dev
->flags
) ||
2105 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2106 set_bit(R5_Wantdrain
, &dev
->flags
);
2107 set_bit(R5_LOCKED
, &dev
->flags
);
2108 clear_bit(R5_UPTODATE
, &dev
->flags
);
2114 /* keep the parity disk(s) locked while asynchronous operations
2117 set_bit(R5_LOCKED
, &sh
->dev
[pd_idx
].flags
);
2118 clear_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
);
2122 int qd_idx
= sh
->qd_idx
;
2123 struct r5dev
*dev
= &sh
->dev
[qd_idx
];
2125 set_bit(R5_LOCKED
, &dev
->flags
);
2126 clear_bit(R5_UPTODATE
, &dev
->flags
);
2130 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
2131 __func__
, (unsigned long long)sh
->sector
,
2132 s
->locked
, s
->ops_request
);
2136 * Each stripe/dev can have one or more bion attached.
2137 * toread/towrite point to the first in a chain.
2138 * The bi_next chain must be in order.
2140 static int add_stripe_bio(struct stripe_head
*sh
, struct bio
*bi
, int dd_idx
, int forwrite
)
2143 raid5_conf_t
*conf
= sh
->raid_conf
;
2146 pr_debug("adding bh b#%llu to stripe s#%llu\n",
2147 (unsigned long long)bi
->bi_sector
,
2148 (unsigned long long)sh
->sector
);
2151 spin_lock(&sh
->lock
);
2152 spin_lock_irq(&conf
->device_lock
);
2154 bip
= &sh
->dev
[dd_idx
].towrite
;
2155 if (*bip
== NULL
&& sh
->dev
[dd_idx
].written
== NULL
)
2158 bip
= &sh
->dev
[dd_idx
].toread
;
2159 while (*bip
&& (*bip
)->bi_sector
< bi
->bi_sector
) {
2160 if ((*bip
)->bi_sector
+ ((*bip
)->bi_size
>> 9) > bi
->bi_sector
)
2162 bip
= & (*bip
)->bi_next
;
2164 if (*bip
&& (*bip
)->bi_sector
< bi
->bi_sector
+ ((bi
->bi_size
)>>9))
2167 BUG_ON(*bip
&& bi
->bi_next
&& (*bip
) != bi
->bi_next
);
2171 bi
->bi_phys_segments
++;
2172 spin_unlock_irq(&conf
->device_lock
);
2173 spin_unlock(&sh
->lock
);
2175 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2176 (unsigned long long)bi
->bi_sector
,
2177 (unsigned long long)sh
->sector
, dd_idx
);
2179 if (conf
->mddev
->bitmap
&& firstwrite
) {
2180 bitmap_startwrite(conf
->mddev
->bitmap
, sh
->sector
,
2182 sh
->bm_seq
= conf
->seq_flush
+1;
2183 set_bit(STRIPE_BIT_DELAY
, &sh
->state
);
2187 /* check if page is covered */
2188 sector_t sector
= sh
->dev
[dd_idx
].sector
;
2189 for (bi
=sh
->dev
[dd_idx
].towrite
;
2190 sector
< sh
->dev
[dd_idx
].sector
+ STRIPE_SECTORS
&&
2191 bi
&& bi
->bi_sector
<= sector
;
2192 bi
= r5_next_bio(bi
, sh
->dev
[dd_idx
].sector
)) {
2193 if (bi
->bi_sector
+ (bi
->bi_size
>>9) >= sector
)
2194 sector
= bi
->bi_sector
+ (bi
->bi_size
>>9);
2196 if (sector
>= sh
->dev
[dd_idx
].sector
+ STRIPE_SECTORS
)
2197 set_bit(R5_OVERWRITE
, &sh
->dev
[dd_idx
].flags
);
2202 set_bit(R5_Overlap
, &sh
->dev
[dd_idx
].flags
);
2203 spin_unlock_irq(&conf
->device_lock
);
2204 spin_unlock(&sh
->lock
);
2208 static void end_reshape(raid5_conf_t
*conf
);
2210 static void stripe_set_idx(sector_t stripe
, raid5_conf_t
*conf
, int previous
,
2211 struct stripe_head
*sh
)
2213 int sectors_per_chunk
=
2214 previous
? conf
->prev_chunk_sectors
: conf
->chunk_sectors
;
2216 int chunk_offset
= sector_div(stripe
, sectors_per_chunk
);
2217 int disks
= previous
? conf
->previous_raid_disks
: conf
->raid_disks
;
2219 raid5_compute_sector(conf
,
2220 stripe
* (disks
- conf
->max_degraded
)
2221 *sectors_per_chunk
+ chunk_offset
,
2227 handle_failed_stripe(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2228 struct stripe_head_state
*s
, int disks
,
2229 struct bio
**return_bi
)
2232 for (i
= disks
; i
--; ) {
2236 if (test_bit(R5_ReadError
, &sh
->dev
[i
].flags
)) {
2239 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
2240 if (rdev
&& test_bit(In_sync
, &rdev
->flags
))
2241 /* multiple read failures in one stripe */
2242 md_error(conf
->mddev
, rdev
);
2245 spin_lock_irq(&conf
->device_lock
);
2246 /* fail all writes first */
2247 bi
= sh
->dev
[i
].towrite
;
2248 sh
->dev
[i
].towrite
= NULL
;
2254 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[i
].flags
))
2255 wake_up(&conf
->wait_for_overlap
);
2257 while (bi
&& bi
->bi_sector
<
2258 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
2259 struct bio
*nextbi
= r5_next_bio(bi
, sh
->dev
[i
].sector
);
2260 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2261 if (!raid5_dec_bi_phys_segments(bi
)) {
2262 md_write_end(conf
->mddev
);
2263 bi
->bi_next
= *return_bi
;
2268 /* and fail all 'written' */
2269 bi
= sh
->dev
[i
].written
;
2270 sh
->dev
[i
].written
= NULL
;
2271 if (bi
) bitmap_end
= 1;
2272 while (bi
&& bi
->bi_sector
<
2273 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
2274 struct bio
*bi2
= r5_next_bio(bi
, sh
->dev
[i
].sector
);
2275 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2276 if (!raid5_dec_bi_phys_segments(bi
)) {
2277 md_write_end(conf
->mddev
);
2278 bi
->bi_next
= *return_bi
;
2284 /* fail any reads if this device is non-operational and
2285 * the data has not reached the cache yet.
2287 if (!test_bit(R5_Wantfill
, &sh
->dev
[i
].flags
) &&
2288 (!test_bit(R5_Insync
, &sh
->dev
[i
].flags
) ||
2289 test_bit(R5_ReadError
, &sh
->dev
[i
].flags
))) {
2290 bi
= sh
->dev
[i
].toread
;
2291 sh
->dev
[i
].toread
= NULL
;
2292 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[i
].flags
))
2293 wake_up(&conf
->wait_for_overlap
);
2294 if (bi
) s
->to_read
--;
2295 while (bi
&& bi
->bi_sector
<
2296 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
2297 struct bio
*nextbi
=
2298 r5_next_bio(bi
, sh
->dev
[i
].sector
);
2299 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2300 if (!raid5_dec_bi_phys_segments(bi
)) {
2301 bi
->bi_next
= *return_bi
;
2307 spin_unlock_irq(&conf
->device_lock
);
2309 bitmap_endwrite(conf
->mddev
->bitmap
, sh
->sector
,
2310 STRIPE_SECTORS
, 0, 0);
2313 if (test_and_clear_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2314 if (atomic_dec_and_test(&conf
->pending_full_writes
))
2315 md_wakeup_thread(conf
->mddev
->thread
);
2318 /* fetch_block5 - checks the given member device to see if its data needs
2319 * to be read or computed to satisfy a request.
2321 * Returns 1 when no more member devices need to be checked, otherwise returns
2322 * 0 to tell the loop in handle_stripe_fill5 to continue
2324 static int fetch_block5(struct stripe_head
*sh
, struct stripe_head_state
*s
,
2325 int disk_idx
, int disks
)
2327 struct r5dev
*dev
= &sh
->dev
[disk_idx
];
2328 struct r5dev
*failed_dev
= &sh
->dev
[s
->failed_num
];
2330 /* is the data in this block needed, and can we get it? */
2331 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
2332 !test_bit(R5_UPTODATE
, &dev
->flags
) &&
2334 (dev
->towrite
&& !test_bit(R5_OVERWRITE
, &dev
->flags
)) ||
2335 s
->syncing
|| s
->expanding
||
2337 (failed_dev
->toread
||
2338 (failed_dev
->towrite
&&
2339 !test_bit(R5_OVERWRITE
, &failed_dev
->flags
)))))) {
2340 /* We would like to get this block, possibly by computing it,
2341 * otherwise read it if the backing disk is insync
2343 if ((s
->uptodate
== disks
- 1) &&
2344 (s
->failed
&& disk_idx
== s
->failed_num
)) {
2345 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2346 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2347 set_bit(R5_Wantcompute
, &dev
->flags
);
2348 sh
->ops
.target
= disk_idx
;
2349 sh
->ops
.target2
= -1;
2351 /* Careful: from this point on 'uptodate' is in the eye
2352 * of raid_run_ops which services 'compute' operations
2353 * before writes. R5_Wantcompute flags a block that will
2354 * be R5_UPTODATE by the time it is needed for a
2355 * subsequent operation.
2358 return 1; /* uptodate + compute == disks */
2359 } else if (test_bit(R5_Insync
, &dev
->flags
)) {
2360 set_bit(R5_LOCKED
, &dev
->flags
);
2361 set_bit(R5_Wantread
, &dev
->flags
);
2363 pr_debug("Reading block %d (sync=%d)\n", disk_idx
,
2372 * handle_stripe_fill5 - read or compute data to satisfy pending requests.
2374 static void handle_stripe_fill5(struct stripe_head
*sh
,
2375 struct stripe_head_state
*s
, int disks
)
2379 /* look for blocks to read/compute, skip this if a compute
2380 * is already in flight, or if the stripe contents are in the
2381 * midst of changing due to a write
2383 if (!test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) && !sh
->check_state
&&
2384 !sh
->reconstruct_state
)
2385 for (i
= disks
; i
--; )
2386 if (fetch_block5(sh
, s
, i
, disks
))
2388 set_bit(STRIPE_HANDLE
, &sh
->state
);
2391 /* fetch_block6 - checks the given member device to see if its data needs
2392 * to be read or computed to satisfy a request.
2394 * Returns 1 when no more member devices need to be checked, otherwise returns
2395 * 0 to tell the loop in handle_stripe_fill6 to continue
2397 static int fetch_block6(struct stripe_head
*sh
, struct stripe_head_state
*s
,
2398 struct r6_state
*r6s
, int disk_idx
, int disks
)
2400 struct r5dev
*dev
= &sh
->dev
[disk_idx
];
2401 struct r5dev
*fdev
[2] = { &sh
->dev
[r6s
->failed_num
[0]],
2402 &sh
->dev
[r6s
->failed_num
[1]] };
2404 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
2405 !test_bit(R5_UPTODATE
, &dev
->flags
) &&
2407 (dev
->towrite
&& !test_bit(R5_OVERWRITE
, &dev
->flags
)) ||
2408 s
->syncing
|| s
->expanding
||
2410 (fdev
[0]->toread
|| s
->to_write
)) ||
2412 (fdev
[1]->toread
|| s
->to_write
)))) {
2413 /* we would like to get this block, possibly by computing it,
2414 * otherwise read it if the backing disk is insync
2416 BUG_ON(test_bit(R5_Wantcompute
, &dev
->flags
));
2417 BUG_ON(test_bit(R5_Wantread
, &dev
->flags
));
2418 if ((s
->uptodate
== disks
- 1) &&
2419 (s
->failed
&& (disk_idx
== r6s
->failed_num
[0] ||
2420 disk_idx
== r6s
->failed_num
[1]))) {
2421 /* have disk failed, and we're requested to fetch it;
2424 pr_debug("Computing stripe %llu block %d\n",
2425 (unsigned long long)sh
->sector
, disk_idx
);
2426 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2427 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2428 set_bit(R5_Wantcompute
, &dev
->flags
);
2429 sh
->ops
.target
= disk_idx
;
2430 sh
->ops
.target2
= -1; /* no 2nd target */
2434 } else if (s
->uptodate
== disks
-2 && s
->failed
>= 2) {
2435 /* Computing 2-failure is *very* expensive; only
2436 * do it if failed >= 2
2439 for (other
= disks
; other
--; ) {
2440 if (other
== disk_idx
)
2442 if (!test_bit(R5_UPTODATE
,
2443 &sh
->dev
[other
].flags
))
2447 pr_debug("Computing stripe %llu blocks %d,%d\n",
2448 (unsigned long long)sh
->sector
,
2450 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2451 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2452 set_bit(R5_Wantcompute
, &sh
->dev
[disk_idx
].flags
);
2453 set_bit(R5_Wantcompute
, &sh
->dev
[other
].flags
);
2454 sh
->ops
.target
= disk_idx
;
2455 sh
->ops
.target2
= other
;
2459 } else if (test_bit(R5_Insync
, &dev
->flags
)) {
2460 set_bit(R5_LOCKED
, &dev
->flags
);
2461 set_bit(R5_Wantread
, &dev
->flags
);
2463 pr_debug("Reading block %d (sync=%d)\n",
2464 disk_idx
, s
->syncing
);
2472 * handle_stripe_fill6 - read or compute data to satisfy pending requests.
2474 static void handle_stripe_fill6(struct stripe_head
*sh
,
2475 struct stripe_head_state
*s
, struct r6_state
*r6s
,
2480 /* look for blocks to read/compute, skip this if a compute
2481 * is already in flight, or if the stripe contents are in the
2482 * midst of changing due to a write
2484 if (!test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) && !sh
->check_state
&&
2485 !sh
->reconstruct_state
)
2486 for (i
= disks
; i
--; )
2487 if (fetch_block6(sh
, s
, r6s
, i
, disks
))
2489 set_bit(STRIPE_HANDLE
, &sh
->state
);
2493 /* handle_stripe_clean_event
2494 * any written block on an uptodate or failed drive can be returned.
2495 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
2496 * never LOCKED, so we don't need to test 'failed' directly.
2498 static void handle_stripe_clean_event(raid5_conf_t
*conf
,
2499 struct stripe_head
*sh
, int disks
, struct bio
**return_bi
)
2504 for (i
= disks
; i
--; )
2505 if (sh
->dev
[i
].written
) {
2507 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
2508 test_bit(R5_UPTODATE
, &dev
->flags
)) {
2509 /* We can return any write requests */
2510 struct bio
*wbi
, *wbi2
;
2512 pr_debug("Return write for disc %d\n", i
);
2513 spin_lock_irq(&conf
->device_lock
);
2515 dev
->written
= NULL
;
2516 while (wbi
&& wbi
->bi_sector
<
2517 dev
->sector
+ STRIPE_SECTORS
) {
2518 wbi2
= r5_next_bio(wbi
, dev
->sector
);
2519 if (!raid5_dec_bi_phys_segments(wbi
)) {
2520 md_write_end(conf
->mddev
);
2521 wbi
->bi_next
= *return_bi
;
2526 if (dev
->towrite
== NULL
)
2528 spin_unlock_irq(&conf
->device_lock
);
2530 bitmap_endwrite(conf
->mddev
->bitmap
,
2533 !test_bit(STRIPE_DEGRADED
, &sh
->state
),
2538 if (test_and_clear_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2539 if (atomic_dec_and_test(&conf
->pending_full_writes
))
2540 md_wakeup_thread(conf
->mddev
->thread
);
2543 static void handle_stripe_dirtying5(raid5_conf_t
*conf
,
2544 struct stripe_head
*sh
, struct stripe_head_state
*s
, int disks
)
2546 int rmw
= 0, rcw
= 0, i
;
2547 for (i
= disks
; i
--; ) {
2548 /* would I have to read this buffer for read_modify_write */
2549 struct r5dev
*dev
= &sh
->dev
[i
];
2550 if ((dev
->towrite
|| i
== sh
->pd_idx
) &&
2551 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2552 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2553 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2554 if (test_bit(R5_Insync
, &dev
->flags
))
2557 rmw
+= 2*disks
; /* cannot read it */
2559 /* Would I have to read this buffer for reconstruct_write */
2560 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) && i
!= sh
->pd_idx
&&
2561 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2562 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2563 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2564 if (test_bit(R5_Insync
, &dev
->flags
)) rcw
++;
2569 pr_debug("for sector %llu, rmw=%d rcw=%d\n",
2570 (unsigned long long)sh
->sector
, rmw
, rcw
);
2571 set_bit(STRIPE_HANDLE
, &sh
->state
);
2572 if (rmw
< rcw
&& rmw
> 0)
2573 /* prefer read-modify-write, but need to get some data */
2574 for (i
= disks
; i
--; ) {
2575 struct r5dev
*dev
= &sh
->dev
[i
];
2576 if ((dev
->towrite
|| i
== sh
->pd_idx
) &&
2577 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2578 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2579 test_bit(R5_Wantcompute
, &dev
->flags
)) &&
2580 test_bit(R5_Insync
, &dev
->flags
)) {
2582 test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2583 pr_debug("Read_old block "
2584 "%d for r-m-w\n", i
);
2585 set_bit(R5_LOCKED
, &dev
->flags
);
2586 set_bit(R5_Wantread
, &dev
->flags
);
2589 set_bit(STRIPE_DELAYED
, &sh
->state
);
2590 set_bit(STRIPE_HANDLE
, &sh
->state
);
2594 if (rcw
<= rmw
&& rcw
> 0)
2595 /* want reconstruct write, but need to get some data */
2596 for (i
= disks
; i
--; ) {
2597 struct r5dev
*dev
= &sh
->dev
[i
];
2598 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) &&
2600 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2601 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2602 test_bit(R5_Wantcompute
, &dev
->flags
)) &&
2603 test_bit(R5_Insync
, &dev
->flags
)) {
2605 test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2606 pr_debug("Read_old block "
2607 "%d for Reconstruct\n", i
);
2608 set_bit(R5_LOCKED
, &dev
->flags
);
2609 set_bit(R5_Wantread
, &dev
->flags
);
2612 set_bit(STRIPE_DELAYED
, &sh
->state
);
2613 set_bit(STRIPE_HANDLE
, &sh
->state
);
2617 /* now if nothing is locked, and if we have enough data,
2618 * we can start a write request
2620 /* since handle_stripe can be called at any time we need to handle the
2621 * case where a compute block operation has been submitted and then a
2622 * subsequent call wants to start a write request. raid_run_ops only
2623 * handles the case where compute block and reconstruct are requested
2624 * simultaneously. If this is not the case then new writes need to be
2625 * held off until the compute completes.
2627 if ((s
->req_compute
|| !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
)) &&
2628 (s
->locked
== 0 && (rcw
== 0 || rmw
== 0) &&
2629 !test_bit(STRIPE_BIT_DELAY
, &sh
->state
)))
2630 schedule_reconstruction(sh
, s
, rcw
== 0, 0);
2633 static void handle_stripe_dirtying6(raid5_conf_t
*conf
,
2634 struct stripe_head
*sh
, struct stripe_head_state
*s
,
2635 struct r6_state
*r6s
, int disks
)
2637 int rcw
= 0, pd_idx
= sh
->pd_idx
, i
;
2638 int qd_idx
= sh
->qd_idx
;
2640 set_bit(STRIPE_HANDLE
, &sh
->state
);
2641 for (i
= disks
; i
--; ) {
2642 struct r5dev
*dev
= &sh
->dev
[i
];
2643 /* check if we haven't enough data */
2644 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) &&
2645 i
!= pd_idx
&& i
!= qd_idx
&&
2646 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2647 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2648 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2650 if (!test_bit(R5_Insync
, &dev
->flags
))
2651 continue; /* it's a failed drive */
2654 test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2655 pr_debug("Read_old stripe %llu "
2656 "block %d for Reconstruct\n",
2657 (unsigned long long)sh
->sector
, i
);
2658 set_bit(R5_LOCKED
, &dev
->flags
);
2659 set_bit(R5_Wantread
, &dev
->flags
);
2662 pr_debug("Request delayed stripe %llu "
2663 "block %d for Reconstruct\n",
2664 (unsigned long long)sh
->sector
, i
);
2665 set_bit(STRIPE_DELAYED
, &sh
->state
);
2666 set_bit(STRIPE_HANDLE
, &sh
->state
);
2670 /* now if nothing is locked, and if we have enough data, we can start a
2673 if ((s
->req_compute
|| !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
)) &&
2674 s
->locked
== 0 && rcw
== 0 &&
2675 !test_bit(STRIPE_BIT_DELAY
, &sh
->state
)) {
2676 schedule_reconstruction(sh
, s
, 1, 0);
2680 static void handle_parity_checks5(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2681 struct stripe_head_state
*s
, int disks
)
2683 struct r5dev
*dev
= NULL
;
2685 set_bit(STRIPE_HANDLE
, &sh
->state
);
2687 switch (sh
->check_state
) {
2688 case check_state_idle
:
2689 /* start a new check operation if there are no failures */
2690 if (s
->failed
== 0) {
2691 BUG_ON(s
->uptodate
!= disks
);
2692 sh
->check_state
= check_state_run
;
2693 set_bit(STRIPE_OP_CHECK
, &s
->ops_request
);
2694 clear_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
);
2698 dev
= &sh
->dev
[s
->failed_num
];
2700 case check_state_compute_result
:
2701 sh
->check_state
= check_state_idle
;
2703 dev
= &sh
->dev
[sh
->pd_idx
];
2705 /* check that a write has not made the stripe insync */
2706 if (test_bit(STRIPE_INSYNC
, &sh
->state
))
2709 /* either failed parity check, or recovery is happening */
2710 BUG_ON(!test_bit(R5_UPTODATE
, &dev
->flags
));
2711 BUG_ON(s
->uptodate
!= disks
);
2713 set_bit(R5_LOCKED
, &dev
->flags
);
2715 set_bit(R5_Wantwrite
, &dev
->flags
);
2717 clear_bit(STRIPE_DEGRADED
, &sh
->state
);
2718 set_bit(STRIPE_INSYNC
, &sh
->state
);
2720 case check_state_run
:
2721 break; /* we will be called again upon completion */
2722 case check_state_check_result
:
2723 sh
->check_state
= check_state_idle
;
2725 /* if a failure occurred during the check operation, leave
2726 * STRIPE_INSYNC not set and let the stripe be handled again
2731 /* handle a successful check operation, if parity is correct
2732 * we are done. Otherwise update the mismatch count and repair
2733 * parity if !MD_RECOVERY_CHECK
2735 if ((sh
->ops
.zero_sum_result
& SUM_CHECK_P_RESULT
) == 0)
2736 /* parity is correct (on disc,
2737 * not in buffer any more)
2739 set_bit(STRIPE_INSYNC
, &sh
->state
);
2741 conf
->mddev
->resync_mismatches
+= STRIPE_SECTORS
;
2742 if (test_bit(MD_RECOVERY_CHECK
, &conf
->mddev
->recovery
))
2743 /* don't try to repair!! */
2744 set_bit(STRIPE_INSYNC
, &sh
->state
);
2746 sh
->check_state
= check_state_compute_run
;
2747 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2748 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2749 set_bit(R5_Wantcompute
,
2750 &sh
->dev
[sh
->pd_idx
].flags
);
2751 sh
->ops
.target
= sh
->pd_idx
;
2752 sh
->ops
.target2
= -1;
2757 case check_state_compute_run
:
2760 printk(KERN_ERR
"%s: unknown check_state: %d sector: %llu\n",
2761 __func__
, sh
->check_state
,
2762 (unsigned long long) sh
->sector
);
2768 static void handle_parity_checks6(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2769 struct stripe_head_state
*s
,
2770 struct r6_state
*r6s
, int disks
)
2772 int pd_idx
= sh
->pd_idx
;
2773 int qd_idx
= sh
->qd_idx
;
2776 set_bit(STRIPE_HANDLE
, &sh
->state
);
2778 BUG_ON(s
->failed
> 2);
2780 /* Want to check and possibly repair P and Q.
2781 * However there could be one 'failed' device, in which
2782 * case we can only check one of them, possibly using the
2783 * other to generate missing data
2786 switch (sh
->check_state
) {
2787 case check_state_idle
:
2788 /* start a new check operation if there are < 2 failures */
2789 if (s
->failed
== r6s
->q_failed
) {
2790 /* The only possible failed device holds Q, so it
2791 * makes sense to check P (If anything else were failed,
2792 * we would have used P to recreate it).
2794 sh
->check_state
= check_state_run
;
2796 if (!r6s
->q_failed
&& s
->failed
< 2) {
2797 /* Q is not failed, and we didn't use it to generate
2798 * anything, so it makes sense to check it
2800 if (sh
->check_state
== check_state_run
)
2801 sh
->check_state
= check_state_run_pq
;
2803 sh
->check_state
= check_state_run_q
;
2806 /* discard potentially stale zero_sum_result */
2807 sh
->ops
.zero_sum_result
= 0;
2809 if (sh
->check_state
== check_state_run
) {
2810 /* async_xor_zero_sum destroys the contents of P */
2811 clear_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
);
2814 if (sh
->check_state
>= check_state_run
&&
2815 sh
->check_state
<= check_state_run_pq
) {
2816 /* async_syndrome_zero_sum preserves P and Q, so
2817 * no need to mark them !uptodate here
2819 set_bit(STRIPE_OP_CHECK
, &s
->ops_request
);
2823 /* we have 2-disk failure */
2824 BUG_ON(s
->failed
!= 2);
2826 case check_state_compute_result
:
2827 sh
->check_state
= check_state_idle
;
2829 /* check that a write has not made the stripe insync */
2830 if (test_bit(STRIPE_INSYNC
, &sh
->state
))
2833 /* now write out any block on a failed drive,
2834 * or P or Q if they were recomputed
2836 BUG_ON(s
->uptodate
< disks
- 1); /* We don't need Q to recover */
2837 if (s
->failed
== 2) {
2838 dev
= &sh
->dev
[r6s
->failed_num
[1]];
2840 set_bit(R5_LOCKED
, &dev
->flags
);
2841 set_bit(R5_Wantwrite
, &dev
->flags
);
2843 if (s
->failed
>= 1) {
2844 dev
= &sh
->dev
[r6s
->failed_num
[0]];
2846 set_bit(R5_LOCKED
, &dev
->flags
);
2847 set_bit(R5_Wantwrite
, &dev
->flags
);
2849 if (sh
->ops
.zero_sum_result
& SUM_CHECK_P_RESULT
) {
2850 dev
= &sh
->dev
[pd_idx
];
2852 set_bit(R5_LOCKED
, &dev
->flags
);
2853 set_bit(R5_Wantwrite
, &dev
->flags
);
2855 if (sh
->ops
.zero_sum_result
& SUM_CHECK_Q_RESULT
) {
2856 dev
= &sh
->dev
[qd_idx
];
2858 set_bit(R5_LOCKED
, &dev
->flags
);
2859 set_bit(R5_Wantwrite
, &dev
->flags
);
2861 clear_bit(STRIPE_DEGRADED
, &sh
->state
);
2863 set_bit(STRIPE_INSYNC
, &sh
->state
);
2865 case check_state_run
:
2866 case check_state_run_q
:
2867 case check_state_run_pq
:
2868 break; /* we will be called again upon completion */
2869 case check_state_check_result
:
2870 sh
->check_state
= check_state_idle
;
2872 /* handle a successful check operation, if parity is correct
2873 * we are done. Otherwise update the mismatch count and repair
2874 * parity if !MD_RECOVERY_CHECK
2876 if (sh
->ops
.zero_sum_result
== 0) {
2877 /* both parities are correct */
2879 set_bit(STRIPE_INSYNC
, &sh
->state
);
2881 /* in contrast to the raid5 case we can validate
2882 * parity, but still have a failure to write
2885 sh
->check_state
= check_state_compute_result
;
2886 /* Returning at this point means that we may go
2887 * off and bring p and/or q uptodate again so
2888 * we make sure to check zero_sum_result again
2889 * to verify if p or q need writeback
2893 conf
->mddev
->resync_mismatches
+= STRIPE_SECTORS
;
2894 if (test_bit(MD_RECOVERY_CHECK
, &conf
->mddev
->recovery
))
2895 /* don't try to repair!! */
2896 set_bit(STRIPE_INSYNC
, &sh
->state
);
2898 int *target
= &sh
->ops
.target
;
2900 sh
->ops
.target
= -1;
2901 sh
->ops
.target2
= -1;
2902 sh
->check_state
= check_state_compute_run
;
2903 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2904 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2905 if (sh
->ops
.zero_sum_result
& SUM_CHECK_P_RESULT
) {
2906 set_bit(R5_Wantcompute
,
2907 &sh
->dev
[pd_idx
].flags
);
2909 target
= &sh
->ops
.target2
;
2912 if (sh
->ops
.zero_sum_result
& SUM_CHECK_Q_RESULT
) {
2913 set_bit(R5_Wantcompute
,
2914 &sh
->dev
[qd_idx
].flags
);
2921 case check_state_compute_run
:
2924 printk(KERN_ERR
"%s: unknown check_state: %d sector: %llu\n",
2925 __func__
, sh
->check_state
,
2926 (unsigned long long) sh
->sector
);
2931 static void handle_stripe_expansion(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2932 struct r6_state
*r6s
)
2936 /* We have read all the blocks in this stripe and now we need to
2937 * copy some of them into a target stripe for expand.
2939 struct dma_async_tx_descriptor
*tx
= NULL
;
2940 clear_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
2941 for (i
= 0; i
< sh
->disks
; i
++)
2942 if (i
!= sh
->pd_idx
&& i
!= sh
->qd_idx
) {
2944 struct stripe_head
*sh2
;
2945 struct async_submit_ctl submit
;
2947 sector_t bn
= compute_blocknr(sh
, i
, 1);
2948 sector_t s
= raid5_compute_sector(conf
, bn
, 0,
2950 sh2
= get_active_stripe(conf
, s
, 0, 1, 1);
2952 /* so far only the early blocks of this stripe
2953 * have been requested. When later blocks
2954 * get requested, we will try again
2957 if (!test_bit(STRIPE_EXPANDING
, &sh2
->state
) ||
2958 test_bit(R5_Expanded
, &sh2
->dev
[dd_idx
].flags
)) {
2959 /* must have already done this block */
2960 release_stripe(sh2
);
2964 /* place all the copies on one channel */
2965 init_async_submit(&submit
, 0, tx
, NULL
, NULL
, NULL
);
2966 tx
= async_memcpy(sh2
->dev
[dd_idx
].page
,
2967 sh
->dev
[i
].page
, 0, 0, STRIPE_SIZE
,
2970 set_bit(R5_Expanded
, &sh2
->dev
[dd_idx
].flags
);
2971 set_bit(R5_UPTODATE
, &sh2
->dev
[dd_idx
].flags
);
2972 for (j
= 0; j
< conf
->raid_disks
; j
++)
2973 if (j
!= sh2
->pd_idx
&&
2974 (!r6s
|| j
!= sh2
->qd_idx
) &&
2975 !test_bit(R5_Expanded
, &sh2
->dev
[j
].flags
))
2977 if (j
== conf
->raid_disks
) {
2978 set_bit(STRIPE_EXPAND_READY
, &sh2
->state
);
2979 set_bit(STRIPE_HANDLE
, &sh2
->state
);
2981 release_stripe(sh2
);
2984 /* done submitting copies, wait for them to complete */
2987 dma_wait_for_async_tx(tx
);
2993 * handle_stripe - do things to a stripe.
2995 * We lock the stripe and then examine the state of various bits
2996 * to see what needs to be done.
2998 * return some read request which now have data
2999 * return some write requests which are safely on disc
3000 * schedule a read on some buffers
3001 * schedule a write of some buffers
3002 * return confirmation of parity correctness
3004 * buffers are taken off read_list or write_list, and bh_cache buffers
3005 * get BH_Lock set before the stripe lock is released.
3009 static void handle_stripe5(struct stripe_head
*sh
)
3011 raid5_conf_t
*conf
= sh
->raid_conf
;
3012 int disks
= sh
->disks
, i
;
3013 struct bio
*return_bi
= NULL
;
3014 struct stripe_head_state s
;
3016 mdk_rdev_t
*blocked_rdev
= NULL
;
3018 int dec_preread_active
= 0;
3020 memset(&s
, 0, sizeof(s
));
3021 pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d "
3022 "reconstruct:%d\n", (unsigned long long)sh
->sector
, sh
->state
,
3023 atomic_read(&sh
->count
), sh
->pd_idx
, sh
->check_state
,
3024 sh
->reconstruct_state
);
3026 spin_lock(&sh
->lock
);
3027 clear_bit(STRIPE_HANDLE
, &sh
->state
);
3028 clear_bit(STRIPE_DELAYED
, &sh
->state
);
3030 s
.syncing
= test_bit(STRIPE_SYNCING
, &sh
->state
);
3031 s
.expanding
= test_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
3032 s
.expanded
= test_bit(STRIPE_EXPAND_READY
, &sh
->state
);
3034 /* Now to look around and see what can be done */
3036 for (i
=disks
; i
--; ) {
3041 pr_debug("check %d: state 0x%lx toread %p read %p write %p "
3042 "written %p\n", i
, dev
->flags
, dev
->toread
, dev
->read
,
3043 dev
->towrite
, dev
->written
);
3045 /* maybe we can request a biofill operation
3047 * new wantfill requests are only permitted while
3048 * ops_complete_biofill is guaranteed to be inactive
3050 if (test_bit(R5_UPTODATE
, &dev
->flags
) && dev
->toread
&&
3051 !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
))
3052 set_bit(R5_Wantfill
, &dev
->flags
);
3054 /* now count some things */
3055 if (test_bit(R5_LOCKED
, &dev
->flags
)) s
.locked
++;
3056 if (test_bit(R5_UPTODATE
, &dev
->flags
)) s
.uptodate
++;
3057 if (test_bit(R5_Wantcompute
, &dev
->flags
)) s
.compute
++;
3059 if (test_bit(R5_Wantfill
, &dev
->flags
))
3061 else if (dev
->toread
)
3065 if (!test_bit(R5_OVERWRITE
, &dev
->flags
))
3070 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
3071 if (blocked_rdev
== NULL
&&
3072 rdev
&& unlikely(test_bit(Blocked
, &rdev
->flags
))) {
3073 blocked_rdev
= rdev
;
3074 atomic_inc(&rdev
->nr_pending
);
3076 clear_bit(R5_Insync
, &dev
->flags
);
3079 else if (test_bit(In_sync
, &rdev
->flags
))
3080 set_bit(R5_Insync
, &dev
->flags
);
3081 else if (!test_bit(Faulty
, &rdev
->flags
)) {
3082 /* could be in-sync depending on recovery/reshape status */
3083 if (sh
->sector
+ STRIPE_SECTORS
<= rdev
->recovery_offset
)
3084 set_bit(R5_Insync
, &dev
->flags
);
3086 if (!test_bit(R5_Insync
, &dev
->flags
)) {
3087 /* The ReadError flag will just be confusing now */
3088 clear_bit(R5_ReadError
, &dev
->flags
);
3089 clear_bit(R5_ReWrite
, &dev
->flags
);
3091 if (test_bit(R5_ReadError
, &dev
->flags
))
3092 clear_bit(R5_Insync
, &dev
->flags
);
3093 if (!test_bit(R5_Insync
, &dev
->flags
)) {
3100 if (unlikely(blocked_rdev
)) {
3101 if (s
.syncing
|| s
.expanding
|| s
.expanded
||
3102 s
.to_write
|| s
.written
) {
3103 set_bit(STRIPE_HANDLE
, &sh
->state
);
3106 /* There is nothing for the blocked_rdev to block */
3107 rdev_dec_pending(blocked_rdev
, conf
->mddev
);
3108 blocked_rdev
= NULL
;
3111 if (s
.to_fill
&& !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
)) {
3112 set_bit(STRIPE_OP_BIOFILL
, &s
.ops_request
);
3113 set_bit(STRIPE_BIOFILL_RUN
, &sh
->state
);
3116 pr_debug("locked=%d uptodate=%d to_read=%d"
3117 " to_write=%d failed=%d failed_num=%d\n",
3118 s
.locked
, s
.uptodate
, s
.to_read
, s
.to_write
,
3119 s
.failed
, s
.failed_num
);
3120 /* check if the array has lost two devices and, if so, some requests might
3124 sh
->check_state
= 0;
3125 sh
->reconstruct_state
= 0;
3126 if (s
.to_read
+s
.to_write
+s
.written
)
3127 handle_failed_stripe(conf
, sh
, &s
, disks
, &return_bi
);
3129 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,0);
3130 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3135 /* might be able to return some write requests if the parity block
3136 * is safe, or on a failed drive
3138 dev
= &sh
->dev
[sh
->pd_idx
];
3140 ((test_bit(R5_Insync
, &dev
->flags
) &&
3141 !test_bit(R5_LOCKED
, &dev
->flags
) &&
3142 test_bit(R5_UPTODATE
, &dev
->flags
)) ||
3143 (s
.failed
== 1 && s
.failed_num
== sh
->pd_idx
)))
3144 handle_stripe_clean_event(conf
, sh
, disks
, &return_bi
);
3146 /* Now we might consider reading some blocks, either to check/generate
3147 * parity, or to satisfy requests
3148 * or to load a block that is being partially written.
3150 if (s
.to_read
|| s
.non_overwrite
||
3151 (s
.syncing
&& (s
.uptodate
+ s
.compute
< disks
)) || s
.expanding
)
3152 handle_stripe_fill5(sh
, &s
, disks
);
3154 /* Now we check to see if any write operations have recently
3158 if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_result
)
3160 if (sh
->reconstruct_state
== reconstruct_state_drain_result
||
3161 sh
->reconstruct_state
== reconstruct_state_prexor_drain_result
) {
3162 sh
->reconstruct_state
= reconstruct_state_idle
;
3164 /* All the 'written' buffers and the parity block are ready to
3165 * be written back to disk
3167 BUG_ON(!test_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
));
3168 for (i
= disks
; i
--; ) {
3170 if (test_bit(R5_LOCKED
, &dev
->flags
) &&
3171 (i
== sh
->pd_idx
|| dev
->written
)) {
3172 pr_debug("Writing block %d\n", i
);
3173 set_bit(R5_Wantwrite
, &dev
->flags
);
3176 if (!test_bit(R5_Insync
, &dev
->flags
) ||
3177 (i
== sh
->pd_idx
&& s
.failed
== 0))
3178 set_bit(STRIPE_INSYNC
, &sh
->state
);
3181 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
3182 dec_preread_active
= 1;
3185 /* Now to consider new write requests and what else, if anything
3186 * should be read. We do not handle new writes when:
3187 * 1/ A 'write' operation (copy+xor) is already in flight.
3188 * 2/ A 'check' operation is in flight, as it may clobber the parity
3191 if (s
.to_write
&& !sh
->reconstruct_state
&& !sh
->check_state
)
3192 handle_stripe_dirtying5(conf
, sh
, &s
, disks
);
3194 /* maybe we need to check and possibly fix the parity for this stripe
3195 * Any reads will already have been scheduled, so we just see if enough
3196 * data is available. The parity check is held off while parity
3197 * dependent operations are in flight.
3199 if (sh
->check_state
||
3200 (s
.syncing
&& s
.locked
== 0 &&
3201 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) &&
3202 !test_bit(STRIPE_INSYNC
, &sh
->state
)))
3203 handle_parity_checks5(conf
, sh
, &s
, disks
);
3205 if (s
.syncing
&& s
.locked
== 0 && test_bit(STRIPE_INSYNC
, &sh
->state
)) {
3206 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,1);
3207 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3210 /* If the failed drive is just a ReadError, then we might need to progress
3211 * the repair/check process
3213 if (s
.failed
== 1 && !conf
->mddev
->ro
&&
3214 test_bit(R5_ReadError
, &sh
->dev
[s
.failed_num
].flags
)
3215 && !test_bit(R5_LOCKED
, &sh
->dev
[s
.failed_num
].flags
)
3216 && test_bit(R5_UPTODATE
, &sh
->dev
[s
.failed_num
].flags
)
3218 dev
= &sh
->dev
[s
.failed_num
];
3219 if (!test_bit(R5_ReWrite
, &dev
->flags
)) {
3220 set_bit(R5_Wantwrite
, &dev
->flags
);
3221 set_bit(R5_ReWrite
, &dev
->flags
);
3222 set_bit(R5_LOCKED
, &dev
->flags
);
3225 /* let's read it back */
3226 set_bit(R5_Wantread
, &dev
->flags
);
3227 set_bit(R5_LOCKED
, &dev
->flags
);
3232 /* Finish reconstruct operations initiated by the expansion process */
3233 if (sh
->reconstruct_state
== reconstruct_state_result
) {
3234 struct stripe_head
*sh2
3235 = get_active_stripe(conf
, sh
->sector
, 1, 1, 1);
3236 if (sh2
&& test_bit(STRIPE_EXPAND_SOURCE
, &sh2
->state
)) {
3237 /* sh cannot be written until sh2 has been read.
3238 * so arrange for sh to be delayed a little
3240 set_bit(STRIPE_DELAYED
, &sh
->state
);
3241 set_bit(STRIPE_HANDLE
, &sh
->state
);
3242 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
,
3244 atomic_inc(&conf
->preread_active_stripes
);
3245 release_stripe(sh2
);
3249 release_stripe(sh2
);
3251 sh
->reconstruct_state
= reconstruct_state_idle
;
3252 clear_bit(STRIPE_EXPANDING
, &sh
->state
);
3253 for (i
= conf
->raid_disks
; i
--; ) {
3254 set_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
);
3255 set_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
3260 if (s
.expanded
&& test_bit(STRIPE_EXPANDING
, &sh
->state
) &&
3261 !sh
->reconstruct_state
) {
3262 /* Need to write out all blocks after computing parity */
3263 sh
->disks
= conf
->raid_disks
;
3264 stripe_set_idx(sh
->sector
, conf
, 0, sh
);
3265 schedule_reconstruction(sh
, &s
, 1, 1);
3266 } else if (s
.expanded
&& !sh
->reconstruct_state
&& s
.locked
== 0) {
3267 clear_bit(STRIPE_EXPAND_READY
, &sh
->state
);
3268 atomic_dec(&conf
->reshape_stripes
);
3269 wake_up(&conf
->wait_for_overlap
);
3270 md_done_sync(conf
->mddev
, STRIPE_SECTORS
, 1);
3273 if (s
.expanding
&& s
.locked
== 0 &&
3274 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
))
3275 handle_stripe_expansion(conf
, sh
, NULL
);
3278 spin_unlock(&sh
->lock
);
3280 /* wait for this device to become unblocked */
3281 if (unlikely(blocked_rdev
))
3282 md_wait_for_blocked_rdev(blocked_rdev
, conf
->mddev
);
3285 raid_run_ops(sh
, s
.ops_request
);
3289 if (dec_preread_active
) {
3290 /* We delay this until after ops_run_io so that if make_request
3291 * is waiting on a flush, it won't continue until the writes
3292 * have actually been submitted.
3294 atomic_dec(&conf
->preread_active_stripes
);
3295 if (atomic_read(&conf
->preread_active_stripes
) <
3297 md_wakeup_thread(conf
->mddev
->thread
);
3299 return_io(return_bi
);
3302 static void handle_stripe6(struct stripe_head
*sh
)
3304 raid5_conf_t
*conf
= sh
->raid_conf
;
3305 int disks
= sh
->disks
;
3306 struct bio
*return_bi
= NULL
;
3307 int i
, pd_idx
= sh
->pd_idx
, qd_idx
= sh
->qd_idx
;
3308 struct stripe_head_state s
;
3309 struct r6_state r6s
;
3310 struct r5dev
*dev
, *pdev
, *qdev
;
3311 mdk_rdev_t
*blocked_rdev
= NULL
;
3312 int dec_preread_active
= 0;
3314 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
3315 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
3316 (unsigned long long)sh
->sector
, sh
->state
,
3317 atomic_read(&sh
->count
), pd_idx
, qd_idx
,
3318 sh
->check_state
, sh
->reconstruct_state
);
3319 memset(&s
, 0, sizeof(s
));
3321 spin_lock(&sh
->lock
);
3322 clear_bit(STRIPE_HANDLE
, &sh
->state
);
3323 clear_bit(STRIPE_DELAYED
, &sh
->state
);
3325 s
.syncing
= test_bit(STRIPE_SYNCING
, &sh
->state
);
3326 s
.expanding
= test_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
3327 s
.expanded
= test_bit(STRIPE_EXPAND_READY
, &sh
->state
);
3328 /* Now to look around and see what can be done */
3331 for (i
=disks
; i
--; ) {
3335 pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
3336 i
, dev
->flags
, dev
->toread
, dev
->towrite
, dev
->written
);
3337 /* maybe we can reply to a read
3339 * new wantfill requests are only permitted while
3340 * ops_complete_biofill is guaranteed to be inactive
3342 if (test_bit(R5_UPTODATE
, &dev
->flags
) && dev
->toread
&&
3343 !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
))
3344 set_bit(R5_Wantfill
, &dev
->flags
);
3346 /* now count some things */
3347 if (test_bit(R5_LOCKED
, &dev
->flags
)) s
.locked
++;
3348 if (test_bit(R5_UPTODATE
, &dev
->flags
)) s
.uptodate
++;
3349 if (test_bit(R5_Wantcompute
, &dev
->flags
)) {
3351 BUG_ON(s
.compute
> 2);
3354 if (test_bit(R5_Wantfill
, &dev
->flags
)) {
3356 } else if (dev
->toread
)
3360 if (!test_bit(R5_OVERWRITE
, &dev
->flags
))
3365 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
3366 if (blocked_rdev
== NULL
&&
3367 rdev
&& unlikely(test_bit(Blocked
, &rdev
->flags
))) {
3368 blocked_rdev
= rdev
;
3369 atomic_inc(&rdev
->nr_pending
);
3371 clear_bit(R5_Insync
, &dev
->flags
);
3374 else if (test_bit(In_sync
, &rdev
->flags
))
3375 set_bit(R5_Insync
, &dev
->flags
);
3376 else if (!test_bit(Faulty
, &rdev
->flags
)) {
3377 /* in sync if before recovery_offset */
3378 if (sh
->sector
+ STRIPE_SECTORS
<= rdev
->recovery_offset
)
3379 set_bit(R5_Insync
, &dev
->flags
);
3381 if (!test_bit(R5_Insync
, &dev
->flags
)) {
3382 /* The ReadError flag will just be confusing now */
3383 clear_bit(R5_ReadError
, &dev
->flags
);
3384 clear_bit(R5_ReWrite
, &dev
->flags
);
3386 if (test_bit(R5_ReadError
, &dev
->flags
))
3387 clear_bit(R5_Insync
, &dev
->flags
);
3388 if (!test_bit(R5_Insync
, &dev
->flags
)) {
3390 r6s
.failed_num
[s
.failed
] = i
;
3396 if (unlikely(blocked_rdev
)) {
3397 if (s
.syncing
|| s
.expanding
|| s
.expanded
||
3398 s
.to_write
|| s
.written
) {
3399 set_bit(STRIPE_HANDLE
, &sh
->state
);
3402 /* There is nothing for the blocked_rdev to block */
3403 rdev_dec_pending(blocked_rdev
, conf
->mddev
);
3404 blocked_rdev
= NULL
;
3407 if (s
.to_fill
&& !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
)) {
3408 set_bit(STRIPE_OP_BIOFILL
, &s
.ops_request
);
3409 set_bit(STRIPE_BIOFILL_RUN
, &sh
->state
);
3412 pr_debug("locked=%d uptodate=%d to_read=%d"
3413 " to_write=%d failed=%d failed_num=%d,%d\n",
3414 s
.locked
, s
.uptodate
, s
.to_read
, s
.to_write
, s
.failed
,
3415 r6s
.failed_num
[0], r6s
.failed_num
[1]);
3416 /* check if the array has lost >2 devices and, if so, some requests
3417 * might need to be failed
3420 sh
->check_state
= 0;
3421 sh
->reconstruct_state
= 0;
3422 if (s
.to_read
+s
.to_write
+s
.written
)
3423 handle_failed_stripe(conf
, sh
, &s
, disks
, &return_bi
);
3425 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,0);
3426 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3432 * might be able to return some write requests if the parity blocks
3433 * are safe, or on a failed drive
3435 pdev
= &sh
->dev
[pd_idx
];
3436 r6s
.p_failed
= (s
.failed
>= 1 && r6s
.failed_num
[0] == pd_idx
)
3437 || (s
.failed
>= 2 && r6s
.failed_num
[1] == pd_idx
);
3438 qdev
= &sh
->dev
[qd_idx
];
3439 r6s
.q_failed
= (s
.failed
>= 1 && r6s
.failed_num
[0] == qd_idx
)
3440 || (s
.failed
>= 2 && r6s
.failed_num
[1] == qd_idx
);
3443 ( r6s
.p_failed
|| ((test_bit(R5_Insync
, &pdev
->flags
)
3444 && !test_bit(R5_LOCKED
, &pdev
->flags
)
3445 && test_bit(R5_UPTODATE
, &pdev
->flags
)))) &&
3446 ( r6s
.q_failed
|| ((test_bit(R5_Insync
, &qdev
->flags
)
3447 && !test_bit(R5_LOCKED
, &qdev
->flags
)
3448 && test_bit(R5_UPTODATE
, &qdev
->flags
)))))
3449 handle_stripe_clean_event(conf
, sh
, disks
, &return_bi
);
3451 /* Now we might consider reading some blocks, either to check/generate
3452 * parity, or to satisfy requests
3453 * or to load a block that is being partially written.
3455 if (s
.to_read
|| s
.non_overwrite
|| (s
.to_write
&& s
.failed
) ||
3456 (s
.syncing
&& (s
.uptodate
+ s
.compute
< disks
)) || s
.expanding
)
3457 handle_stripe_fill6(sh
, &s
, &r6s
, disks
);
3459 /* Now we check to see if any write operations have recently
3462 if (sh
->reconstruct_state
== reconstruct_state_drain_result
) {
3464 sh
->reconstruct_state
= reconstruct_state_idle
;
3465 /* All the 'written' buffers and the parity blocks are ready to
3466 * be written back to disk
3468 BUG_ON(!test_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
));
3469 BUG_ON(!test_bit(R5_UPTODATE
, &sh
->dev
[qd_idx
].flags
));
3470 for (i
= disks
; i
--; ) {
3472 if (test_bit(R5_LOCKED
, &dev
->flags
) &&
3473 (i
== sh
->pd_idx
|| i
== qd_idx
||
3475 pr_debug("Writing block %d\n", i
);
3476 BUG_ON(!test_bit(R5_UPTODATE
, &dev
->flags
));
3477 set_bit(R5_Wantwrite
, &dev
->flags
);
3478 if (!test_bit(R5_Insync
, &dev
->flags
) ||
3479 ((i
== sh
->pd_idx
|| i
== qd_idx
) &&
3481 set_bit(STRIPE_INSYNC
, &sh
->state
);
3484 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
3485 dec_preread_active
= 1;
3488 /* Now to consider new write requests and what else, if anything
3489 * should be read. We do not handle new writes when:
3490 * 1/ A 'write' operation (copy+gen_syndrome) is already in flight.
3491 * 2/ A 'check' operation is in flight, as it may clobber the parity
3494 if (s
.to_write
&& !sh
->reconstruct_state
&& !sh
->check_state
)
3495 handle_stripe_dirtying6(conf
, sh
, &s
, &r6s
, disks
);
3497 /* maybe we need to check and possibly fix the parity for this stripe
3498 * Any reads will already have been scheduled, so we just see if enough
3499 * data is available. The parity check is held off while parity
3500 * dependent operations are in flight.
3502 if (sh
->check_state
||
3503 (s
.syncing
&& s
.locked
== 0 &&
3504 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) &&
3505 !test_bit(STRIPE_INSYNC
, &sh
->state
)))
3506 handle_parity_checks6(conf
, sh
, &s
, &r6s
, disks
);
3508 if (s
.syncing
&& s
.locked
== 0 && test_bit(STRIPE_INSYNC
, &sh
->state
)) {
3509 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,1);
3510 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3513 /* If the failed drives are just a ReadError, then we might need
3514 * to progress the repair/check process
3516 if (s
.failed
<= 2 && !conf
->mddev
->ro
)
3517 for (i
= 0; i
< s
.failed
; i
++) {
3518 dev
= &sh
->dev
[r6s
.failed_num
[i
]];
3519 if (test_bit(R5_ReadError
, &dev
->flags
)
3520 && !test_bit(R5_LOCKED
, &dev
->flags
)
3521 && test_bit(R5_UPTODATE
, &dev
->flags
)
3523 if (!test_bit(R5_ReWrite
, &dev
->flags
)) {
3524 set_bit(R5_Wantwrite
, &dev
->flags
);
3525 set_bit(R5_ReWrite
, &dev
->flags
);
3526 set_bit(R5_LOCKED
, &dev
->flags
);
3529 /* let's read it back */
3530 set_bit(R5_Wantread
, &dev
->flags
);
3531 set_bit(R5_LOCKED
, &dev
->flags
);
3537 /* Finish reconstruct operations initiated by the expansion process */
3538 if (sh
->reconstruct_state
== reconstruct_state_result
) {
3539 sh
->reconstruct_state
= reconstruct_state_idle
;
3540 clear_bit(STRIPE_EXPANDING
, &sh
->state
);
3541 for (i
= conf
->raid_disks
; i
--; ) {
3542 set_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
);
3543 set_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
3548 if (s
.expanded
&& test_bit(STRIPE_EXPANDING
, &sh
->state
) &&
3549 !sh
->reconstruct_state
) {
3550 struct stripe_head
*sh2
3551 = get_active_stripe(conf
, sh
->sector
, 1, 1, 1);
3552 if (sh2
&& test_bit(STRIPE_EXPAND_SOURCE
, &sh2
->state
)) {
3553 /* sh cannot be written until sh2 has been read.
3554 * so arrange for sh to be delayed a little
3556 set_bit(STRIPE_DELAYED
, &sh
->state
);
3557 set_bit(STRIPE_HANDLE
, &sh
->state
);
3558 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
,
3560 atomic_inc(&conf
->preread_active_stripes
);
3561 release_stripe(sh2
);
3565 release_stripe(sh2
);
3567 /* Need to write out all blocks after computing P&Q */
3568 sh
->disks
= conf
->raid_disks
;
3569 stripe_set_idx(sh
->sector
, conf
, 0, sh
);
3570 schedule_reconstruction(sh
, &s
, 1, 1);
3571 } else if (s
.expanded
&& !sh
->reconstruct_state
&& s
.locked
== 0) {
3572 clear_bit(STRIPE_EXPAND_READY
, &sh
->state
);
3573 atomic_dec(&conf
->reshape_stripes
);
3574 wake_up(&conf
->wait_for_overlap
);
3575 md_done_sync(conf
->mddev
, STRIPE_SECTORS
, 1);
3578 if (s
.expanding
&& s
.locked
== 0 &&
3579 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
))
3580 handle_stripe_expansion(conf
, sh
, &r6s
);
3583 spin_unlock(&sh
->lock
);
3585 /* wait for this device to become unblocked */
3586 if (unlikely(blocked_rdev
))
3587 md_wait_for_blocked_rdev(blocked_rdev
, conf
->mddev
);
3590 raid_run_ops(sh
, s
.ops_request
);
3595 if (dec_preread_active
) {
3596 /* We delay this until after ops_run_io so that if make_request
3597 * is waiting on a flush, it won't continue until the writes
3598 * have actually been submitted.
3600 atomic_dec(&conf
->preread_active_stripes
);
3601 if (atomic_read(&conf
->preread_active_stripes
) <
3603 md_wakeup_thread(conf
->mddev
->thread
);
3606 return_io(return_bi
);
3609 static void handle_stripe(struct stripe_head
*sh
)
3611 if (sh
->raid_conf
->level
== 6)
3617 static void raid5_activate_delayed(raid5_conf_t
*conf
)
3619 if (atomic_read(&conf
->preread_active_stripes
) < IO_THRESHOLD
) {
3620 while (!list_empty(&conf
->delayed_list
)) {
3621 struct list_head
*l
= conf
->delayed_list
.next
;
3622 struct stripe_head
*sh
;
3623 sh
= list_entry(l
, struct stripe_head
, lru
);
3625 clear_bit(STRIPE_DELAYED
, &sh
->state
);
3626 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
3627 atomic_inc(&conf
->preread_active_stripes
);
3628 list_add_tail(&sh
->lru
, &conf
->hold_list
);
3633 static void activate_bit_delay(raid5_conf_t
*conf
)
3635 /* device_lock is held */
3636 struct list_head head
;
3637 list_add(&head
, &conf
->bitmap_list
);
3638 list_del_init(&conf
->bitmap_list
);
3639 while (!list_empty(&head
)) {
3640 struct stripe_head
*sh
= list_entry(head
.next
, struct stripe_head
, lru
);
3641 list_del_init(&sh
->lru
);
3642 atomic_inc(&sh
->count
);
3643 __release_stripe(conf
, sh
);
3647 int md_raid5_congested(mddev_t
*mddev
, int bits
)
3649 raid5_conf_t
*conf
= mddev
->private;
3651 /* No difference between reads and writes. Just check
3652 * how busy the stripe_cache is
3655 if (conf
->inactive_blocked
)
3659 if (list_empty_careful(&conf
->inactive_list
))
3664 EXPORT_SYMBOL_GPL(md_raid5_congested
);
3666 static int raid5_congested(void *data
, int bits
)
3668 mddev_t
*mddev
= data
;
3670 return mddev_congested(mddev
, bits
) ||
3671 md_raid5_congested(mddev
, bits
);
3674 /* We want read requests to align with chunks where possible,
3675 * but write requests don't need to.
3677 static int raid5_mergeable_bvec(struct request_queue
*q
,
3678 struct bvec_merge_data
*bvm
,
3679 struct bio_vec
*biovec
)
3681 mddev_t
*mddev
= q
->queuedata
;
3682 sector_t sector
= bvm
->bi_sector
+ get_start_sect(bvm
->bi_bdev
);
3684 unsigned int chunk_sectors
= mddev
->chunk_sectors
;
3685 unsigned int bio_sectors
= bvm
->bi_size
>> 9;
3687 if ((bvm
->bi_rw
& 1) == WRITE
)
3688 return biovec
->bv_len
; /* always allow writes to be mergeable */
3690 if (mddev
->new_chunk_sectors
< mddev
->chunk_sectors
)
3691 chunk_sectors
= mddev
->new_chunk_sectors
;
3692 max
= (chunk_sectors
- ((sector
& (chunk_sectors
- 1)) + bio_sectors
)) << 9;
3693 if (max
< 0) max
= 0;
3694 if (max
<= biovec
->bv_len
&& bio_sectors
== 0)
3695 return biovec
->bv_len
;
3701 static int in_chunk_boundary(mddev_t
*mddev
, struct bio
*bio
)
3703 sector_t sector
= bio
->bi_sector
+ get_start_sect(bio
->bi_bdev
);
3704 unsigned int chunk_sectors
= mddev
->chunk_sectors
;
3705 unsigned int bio_sectors
= bio
->bi_size
>> 9;
3707 if (mddev
->new_chunk_sectors
< mddev
->chunk_sectors
)
3708 chunk_sectors
= mddev
->new_chunk_sectors
;
3709 return chunk_sectors
>=
3710 ((sector
& (chunk_sectors
- 1)) + bio_sectors
);
3714 * add bio to the retry LIFO ( in O(1) ... we are in interrupt )
3715 * later sampled by raid5d.
3717 static void add_bio_to_retry(struct bio
*bi
,raid5_conf_t
*conf
)
3719 unsigned long flags
;
3721 spin_lock_irqsave(&conf
->device_lock
, flags
);
3723 bi
->bi_next
= conf
->retry_read_aligned_list
;
3724 conf
->retry_read_aligned_list
= bi
;
3726 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
3727 md_wakeup_thread(conf
->mddev
->thread
);
3731 static struct bio
*remove_bio_from_retry(raid5_conf_t
*conf
)
3735 bi
= conf
->retry_read_aligned
;
3737 conf
->retry_read_aligned
= NULL
;
3740 bi
= conf
->retry_read_aligned_list
;
3742 conf
->retry_read_aligned_list
= bi
->bi_next
;
3745 * this sets the active strip count to 1 and the processed
3746 * strip count to zero (upper 8 bits)
3748 bi
->bi_phys_segments
= 1; /* biased count of active stripes */
3756 * The "raid5_align_endio" should check if the read succeeded and if it
3757 * did, call bio_endio on the original bio (having bio_put the new bio
3759 * If the read failed..
3761 static void raid5_align_endio(struct bio
*bi
, int error
)
3763 struct bio
* raid_bi
= bi
->bi_private
;
3766 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
3771 rdev
= (void*)raid_bi
->bi_next
;
3772 raid_bi
->bi_next
= NULL
;
3773 mddev
= rdev
->mddev
;
3774 conf
= mddev
->private;
3776 rdev_dec_pending(rdev
, conf
->mddev
);
3778 if (!error
&& uptodate
) {
3779 bio_endio(raid_bi
, 0);
3780 if (atomic_dec_and_test(&conf
->active_aligned_reads
))
3781 wake_up(&conf
->wait_for_stripe
);
3786 pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
3788 add_bio_to_retry(raid_bi
, conf
);
3791 static int bio_fits_rdev(struct bio
*bi
)
3793 struct request_queue
*q
= bdev_get_queue(bi
->bi_bdev
);
3795 if ((bi
->bi_size
>>9) > queue_max_sectors(q
))
3797 blk_recount_segments(q
, bi
);
3798 if (bi
->bi_phys_segments
> queue_max_segments(q
))
3801 if (q
->merge_bvec_fn
)
3802 /* it's too hard to apply the merge_bvec_fn at this stage,
3811 static int chunk_aligned_read(mddev_t
*mddev
, struct bio
* raid_bio
)
3813 raid5_conf_t
*conf
= mddev
->private;
3815 struct bio
* align_bi
;
3818 if (!in_chunk_boundary(mddev
, raid_bio
)) {
3819 pr_debug("chunk_aligned_read : non aligned\n");
3823 * use bio_clone_mddev to make a copy of the bio
3825 align_bi
= bio_clone_mddev(raid_bio
, GFP_NOIO
, mddev
);
3829 * set bi_end_io to a new function, and set bi_private to the
3832 align_bi
->bi_end_io
= raid5_align_endio
;
3833 align_bi
->bi_private
= raid_bio
;
3837 align_bi
->bi_sector
= raid5_compute_sector(conf
, raid_bio
->bi_sector
,
3842 rdev
= rcu_dereference(conf
->disks
[dd_idx
].rdev
);
3843 if (rdev
&& test_bit(In_sync
, &rdev
->flags
)) {
3844 atomic_inc(&rdev
->nr_pending
);
3846 raid_bio
->bi_next
= (void*)rdev
;
3847 align_bi
->bi_bdev
= rdev
->bdev
;
3848 align_bi
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
3849 align_bi
->bi_sector
+= rdev
->data_offset
;
3851 if (!bio_fits_rdev(align_bi
)) {
3852 /* too big in some way */
3854 rdev_dec_pending(rdev
, mddev
);
3858 spin_lock_irq(&conf
->device_lock
);
3859 wait_event_lock_irq(conf
->wait_for_stripe
,
3861 conf
->device_lock
, /* nothing */);
3862 atomic_inc(&conf
->active_aligned_reads
);
3863 spin_unlock_irq(&conf
->device_lock
);
3865 generic_make_request(align_bi
);
3874 /* __get_priority_stripe - get the next stripe to process
3876 * Full stripe writes are allowed to pass preread active stripes up until
3877 * the bypass_threshold is exceeded. In general the bypass_count
3878 * increments when the handle_list is handled before the hold_list; however, it
3879 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
3880 * stripe with in flight i/o. The bypass_count will be reset when the
3881 * head of the hold_list has changed, i.e. the head was promoted to the
3884 static struct stripe_head
*__get_priority_stripe(raid5_conf_t
*conf
)
3886 struct stripe_head
*sh
;
3888 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
3890 list_empty(&conf
->handle_list
) ? "empty" : "busy",
3891 list_empty(&conf
->hold_list
) ? "empty" : "busy",
3892 atomic_read(&conf
->pending_full_writes
), conf
->bypass_count
);
3894 if (!list_empty(&conf
->handle_list
)) {
3895 sh
= list_entry(conf
->handle_list
.next
, typeof(*sh
), lru
);
3897 if (list_empty(&conf
->hold_list
))
3898 conf
->bypass_count
= 0;
3899 else if (!test_bit(STRIPE_IO_STARTED
, &sh
->state
)) {
3900 if (conf
->hold_list
.next
== conf
->last_hold
)
3901 conf
->bypass_count
++;
3903 conf
->last_hold
= conf
->hold_list
.next
;
3904 conf
->bypass_count
-= conf
->bypass_threshold
;
3905 if (conf
->bypass_count
< 0)
3906 conf
->bypass_count
= 0;
3909 } else if (!list_empty(&conf
->hold_list
) &&
3910 ((conf
->bypass_threshold
&&
3911 conf
->bypass_count
> conf
->bypass_threshold
) ||
3912 atomic_read(&conf
->pending_full_writes
) == 0)) {
3913 sh
= list_entry(conf
->hold_list
.next
,
3915 conf
->bypass_count
-= conf
->bypass_threshold
;
3916 if (conf
->bypass_count
< 0)
3917 conf
->bypass_count
= 0;
3921 list_del_init(&sh
->lru
);
3922 atomic_inc(&sh
->count
);
3923 BUG_ON(atomic_read(&sh
->count
) != 1);
3927 static int make_request(mddev_t
*mddev
, struct bio
* bi
)
3929 raid5_conf_t
*conf
= mddev
->private;
3931 sector_t new_sector
;
3932 sector_t logical_sector
, last_sector
;
3933 struct stripe_head
*sh
;
3934 const int rw
= bio_data_dir(bi
);
3938 if (unlikely(bi
->bi_rw
& REQ_FLUSH
)) {
3939 md_flush_request(mddev
, bi
);
3943 md_write_start(mddev
, bi
);
3946 mddev
->reshape_position
== MaxSector
&&
3947 chunk_aligned_read(mddev
,bi
))
3950 logical_sector
= bi
->bi_sector
& ~((sector_t
)STRIPE_SECTORS
-1);
3951 last_sector
= bi
->bi_sector
+ (bi
->bi_size
>>9);
3953 bi
->bi_phys_segments
= 1; /* over-loaded to count active stripes */
3955 plugged
= mddev_check_plugged(mddev
);
3956 for (;logical_sector
< last_sector
; logical_sector
+= STRIPE_SECTORS
) {
3958 int disks
, data_disks
;
3963 disks
= conf
->raid_disks
;
3964 prepare_to_wait(&conf
->wait_for_overlap
, &w
, TASK_UNINTERRUPTIBLE
);
3965 if (unlikely(conf
->reshape_progress
!= MaxSector
)) {
3966 /* spinlock is needed as reshape_progress may be
3967 * 64bit on a 32bit platform, and so it might be
3968 * possible to see a half-updated value
3969 * Of course reshape_progress could change after
3970 * the lock is dropped, so once we get a reference
3971 * to the stripe that we think it is, we will have
3974 spin_lock_irq(&conf
->device_lock
);
3975 if (mddev
->delta_disks
< 0
3976 ? logical_sector
< conf
->reshape_progress
3977 : logical_sector
>= conf
->reshape_progress
) {
3978 disks
= conf
->previous_raid_disks
;
3981 if (mddev
->delta_disks
< 0
3982 ? logical_sector
< conf
->reshape_safe
3983 : logical_sector
>= conf
->reshape_safe
) {
3984 spin_unlock_irq(&conf
->device_lock
);
3989 spin_unlock_irq(&conf
->device_lock
);
3991 data_disks
= disks
- conf
->max_degraded
;
3993 new_sector
= raid5_compute_sector(conf
, logical_sector
,
3996 pr_debug("raid456: make_request, sector %llu logical %llu\n",
3997 (unsigned long long)new_sector
,
3998 (unsigned long long)logical_sector
);
4000 sh
= get_active_stripe(conf
, new_sector
, previous
,
4001 (bi
->bi_rw
&RWA_MASK
), 0);
4003 if (unlikely(previous
)) {
4004 /* expansion might have moved on while waiting for a
4005 * stripe, so we must do the range check again.
4006 * Expansion could still move past after this
4007 * test, but as we are holding a reference to
4008 * 'sh', we know that if that happens,
4009 * STRIPE_EXPANDING will get set and the expansion
4010 * won't proceed until we finish with the stripe.
4013 spin_lock_irq(&conf
->device_lock
);
4014 if (mddev
->delta_disks
< 0
4015 ? logical_sector
>= conf
->reshape_progress
4016 : logical_sector
< conf
->reshape_progress
)
4017 /* mismatch, need to try again */
4019 spin_unlock_irq(&conf
->device_lock
);
4027 if (bio_data_dir(bi
) == WRITE
&&
4028 logical_sector
>= mddev
->suspend_lo
&&
4029 logical_sector
< mddev
->suspend_hi
) {
4031 /* As the suspend_* range is controlled by
4032 * userspace, we want an interruptible
4035 flush_signals(current
);
4036 prepare_to_wait(&conf
->wait_for_overlap
,
4037 &w
, TASK_INTERRUPTIBLE
);
4038 if (logical_sector
>= mddev
->suspend_lo
&&
4039 logical_sector
< mddev
->suspend_hi
)
4044 if (test_bit(STRIPE_EXPANDING
, &sh
->state
) ||
4045 !add_stripe_bio(sh
, bi
, dd_idx
, (bi
->bi_rw
&RW_MASK
))) {
4046 /* Stripe is busy expanding or
4047 * add failed due to overlap. Flush everything
4050 md_wakeup_thread(mddev
->thread
);
4055 finish_wait(&conf
->wait_for_overlap
, &w
);
4056 set_bit(STRIPE_HANDLE
, &sh
->state
);
4057 clear_bit(STRIPE_DELAYED
, &sh
->state
);
4058 if ((bi
->bi_rw
& REQ_SYNC
) &&
4059 !test_and_set_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
4060 atomic_inc(&conf
->preread_active_stripes
);
4063 /* cannot get stripe for read-ahead, just give-up */
4064 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
4065 finish_wait(&conf
->wait_for_overlap
, &w
);
4071 md_wakeup_thread(mddev
->thread
);
4073 spin_lock_irq(&conf
->device_lock
);
4074 remaining
= raid5_dec_bi_phys_segments(bi
);
4075 spin_unlock_irq(&conf
->device_lock
);
4076 if (remaining
== 0) {
4079 md_write_end(mddev
);
4087 static sector_t
raid5_size(mddev_t
*mddev
, sector_t sectors
, int raid_disks
);
4089 static sector_t
reshape_request(mddev_t
*mddev
, sector_t sector_nr
, int *skipped
)
4091 /* reshaping is quite different to recovery/resync so it is
4092 * handled quite separately ... here.
4094 * On each call to sync_request, we gather one chunk worth of
4095 * destination stripes and flag them as expanding.
4096 * Then we find all the source stripes and request reads.
4097 * As the reads complete, handle_stripe will copy the data
4098 * into the destination stripe and release that stripe.
4100 raid5_conf_t
*conf
= mddev
->private;
4101 struct stripe_head
*sh
;
4102 sector_t first_sector
, last_sector
;
4103 int raid_disks
= conf
->previous_raid_disks
;
4104 int data_disks
= raid_disks
- conf
->max_degraded
;
4105 int new_data_disks
= conf
->raid_disks
- conf
->max_degraded
;
4108 sector_t writepos
, readpos
, safepos
;
4109 sector_t stripe_addr
;
4110 int reshape_sectors
;
4111 struct list_head stripes
;
4113 if (sector_nr
== 0) {
4114 /* If restarting in the middle, skip the initial sectors */
4115 if (mddev
->delta_disks
< 0 &&
4116 conf
->reshape_progress
< raid5_size(mddev
, 0, 0)) {
4117 sector_nr
= raid5_size(mddev
, 0, 0)
4118 - conf
->reshape_progress
;
4119 } else if (mddev
->delta_disks
>= 0 &&
4120 conf
->reshape_progress
> 0)
4121 sector_nr
= conf
->reshape_progress
;
4122 sector_div(sector_nr
, new_data_disks
);
4124 mddev
->curr_resync_completed
= sector_nr
;
4125 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
4131 /* We need to process a full chunk at a time.
4132 * If old and new chunk sizes differ, we need to process the
4135 if (mddev
->new_chunk_sectors
> mddev
->chunk_sectors
)
4136 reshape_sectors
= mddev
->new_chunk_sectors
;
4138 reshape_sectors
= mddev
->chunk_sectors
;
4140 /* we update the metadata when there is more than 3Meg
4141 * in the block range (that is rather arbitrary, should
4142 * probably be time based) or when the data about to be
4143 * copied would over-write the source of the data at
4144 * the front of the range.
4145 * i.e. one new_stripe along from reshape_progress new_maps
4146 * to after where reshape_safe old_maps to
4148 writepos
= conf
->reshape_progress
;
4149 sector_div(writepos
, new_data_disks
);
4150 readpos
= conf
->reshape_progress
;
4151 sector_div(readpos
, data_disks
);
4152 safepos
= conf
->reshape_safe
;
4153 sector_div(safepos
, data_disks
);
4154 if (mddev
->delta_disks
< 0) {
4155 writepos
-= min_t(sector_t
, reshape_sectors
, writepos
);
4156 readpos
+= reshape_sectors
;
4157 safepos
+= reshape_sectors
;
4159 writepos
+= reshape_sectors
;
4160 readpos
-= min_t(sector_t
, reshape_sectors
, readpos
);
4161 safepos
-= min_t(sector_t
, reshape_sectors
, safepos
);
4164 /* 'writepos' is the most advanced device address we might write.
4165 * 'readpos' is the least advanced device address we might read.
4166 * 'safepos' is the least address recorded in the metadata as having
4168 * If 'readpos' is behind 'writepos', then there is no way that we can
4169 * ensure safety in the face of a crash - that must be done by userspace
4170 * making a backup of the data. So in that case there is no particular
4171 * rush to update metadata.
4172 * Otherwise if 'safepos' is behind 'writepos', then we really need to
4173 * update the metadata to advance 'safepos' to match 'readpos' so that
4174 * we can be safe in the event of a crash.
4175 * So we insist on updating metadata if safepos is behind writepos and
4176 * readpos is beyond writepos.
4177 * In any case, update the metadata every 10 seconds.
4178 * Maybe that number should be configurable, but I'm not sure it is
4179 * worth it.... maybe it could be a multiple of safemode_delay???
4181 if ((mddev
->delta_disks
< 0
4182 ? (safepos
> writepos
&& readpos
< writepos
)
4183 : (safepos
< writepos
&& readpos
> writepos
)) ||
4184 time_after(jiffies
, conf
->reshape_checkpoint
+ 10*HZ
)) {
4185 /* Cannot proceed until we've updated the superblock... */
4186 wait_event(conf
->wait_for_overlap
,
4187 atomic_read(&conf
->reshape_stripes
)==0);
4188 mddev
->reshape_position
= conf
->reshape_progress
;
4189 mddev
->curr_resync_completed
= sector_nr
;
4190 conf
->reshape_checkpoint
= jiffies
;
4191 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
4192 md_wakeup_thread(mddev
->thread
);
4193 wait_event(mddev
->sb_wait
, mddev
->flags
== 0 ||
4194 kthread_should_stop());
4195 spin_lock_irq(&conf
->device_lock
);
4196 conf
->reshape_safe
= mddev
->reshape_position
;
4197 spin_unlock_irq(&conf
->device_lock
);
4198 wake_up(&conf
->wait_for_overlap
);
4199 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
4202 if (mddev
->delta_disks
< 0) {
4203 BUG_ON(conf
->reshape_progress
== 0);
4204 stripe_addr
= writepos
;
4205 BUG_ON((mddev
->dev_sectors
&
4206 ~((sector_t
)reshape_sectors
- 1))
4207 - reshape_sectors
- stripe_addr
4210 BUG_ON(writepos
!= sector_nr
+ reshape_sectors
);
4211 stripe_addr
= sector_nr
;
4213 INIT_LIST_HEAD(&stripes
);
4214 for (i
= 0; i
< reshape_sectors
; i
+= STRIPE_SECTORS
) {
4216 int skipped_disk
= 0;
4217 sh
= get_active_stripe(conf
, stripe_addr
+i
, 0, 0, 1);
4218 set_bit(STRIPE_EXPANDING
, &sh
->state
);
4219 atomic_inc(&conf
->reshape_stripes
);
4220 /* If any of this stripe is beyond the end of the old
4221 * array, then we need to zero those blocks
4223 for (j
=sh
->disks
; j
--;) {
4225 if (j
== sh
->pd_idx
)
4227 if (conf
->level
== 6 &&
4230 s
= compute_blocknr(sh
, j
, 0);
4231 if (s
< raid5_size(mddev
, 0, 0)) {
4235 memset(page_address(sh
->dev
[j
].page
), 0, STRIPE_SIZE
);
4236 set_bit(R5_Expanded
, &sh
->dev
[j
].flags
);
4237 set_bit(R5_UPTODATE
, &sh
->dev
[j
].flags
);
4239 if (!skipped_disk
) {
4240 set_bit(STRIPE_EXPAND_READY
, &sh
->state
);
4241 set_bit(STRIPE_HANDLE
, &sh
->state
);
4243 list_add(&sh
->lru
, &stripes
);
4245 spin_lock_irq(&conf
->device_lock
);
4246 if (mddev
->delta_disks
< 0)
4247 conf
->reshape_progress
-= reshape_sectors
* new_data_disks
;
4249 conf
->reshape_progress
+= reshape_sectors
* new_data_disks
;
4250 spin_unlock_irq(&conf
->device_lock
);
4251 /* Ok, those stripe are ready. We can start scheduling
4252 * reads on the source stripes.
4253 * The source stripes are determined by mapping the first and last
4254 * block on the destination stripes.
4257 raid5_compute_sector(conf
, stripe_addr
*(new_data_disks
),
4260 raid5_compute_sector(conf
, ((stripe_addr
+reshape_sectors
)
4261 * new_data_disks
- 1),
4263 if (last_sector
>= mddev
->dev_sectors
)
4264 last_sector
= mddev
->dev_sectors
- 1;
4265 while (first_sector
<= last_sector
) {
4266 sh
= get_active_stripe(conf
, first_sector
, 1, 0, 1);
4267 set_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
4268 set_bit(STRIPE_HANDLE
, &sh
->state
);
4270 first_sector
+= STRIPE_SECTORS
;
4272 /* Now that the sources are clearly marked, we can release
4273 * the destination stripes
4275 while (!list_empty(&stripes
)) {
4276 sh
= list_entry(stripes
.next
, struct stripe_head
, lru
);
4277 list_del_init(&sh
->lru
);
4280 /* If this takes us to the resync_max point where we have to pause,
4281 * then we need to write out the superblock.
4283 sector_nr
+= reshape_sectors
;
4284 if ((sector_nr
- mddev
->curr_resync_completed
) * 2
4285 >= mddev
->resync_max
- mddev
->curr_resync_completed
) {
4286 /* Cannot proceed until we've updated the superblock... */
4287 wait_event(conf
->wait_for_overlap
,
4288 atomic_read(&conf
->reshape_stripes
) == 0);
4289 mddev
->reshape_position
= conf
->reshape_progress
;
4290 mddev
->curr_resync_completed
= sector_nr
;
4291 conf
->reshape_checkpoint
= jiffies
;
4292 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
4293 md_wakeup_thread(mddev
->thread
);
4294 wait_event(mddev
->sb_wait
,
4295 !test_bit(MD_CHANGE_DEVS
, &mddev
->flags
)
4296 || kthread_should_stop());
4297 spin_lock_irq(&conf
->device_lock
);
4298 conf
->reshape_safe
= mddev
->reshape_position
;
4299 spin_unlock_irq(&conf
->device_lock
);
4300 wake_up(&conf
->wait_for_overlap
);
4301 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
4303 return reshape_sectors
;
4306 /* FIXME go_faster isn't used */
4307 static inline sector_t
sync_request(mddev_t
*mddev
, sector_t sector_nr
, int *skipped
, int go_faster
)
4309 raid5_conf_t
*conf
= mddev
->private;
4310 struct stripe_head
*sh
;
4311 sector_t max_sector
= mddev
->dev_sectors
;
4312 sector_t sync_blocks
;
4313 int still_degraded
= 0;
4316 if (sector_nr
>= max_sector
) {
4317 /* just being told to finish up .. nothing much to do */
4319 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
)) {
4324 if (mddev
->curr_resync
< max_sector
) /* aborted */
4325 bitmap_end_sync(mddev
->bitmap
, mddev
->curr_resync
,
4327 else /* completed sync */
4329 bitmap_close_sync(mddev
->bitmap
);
4334 /* Allow raid5_quiesce to complete */
4335 wait_event(conf
->wait_for_overlap
, conf
->quiesce
!= 2);
4337 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
4338 return reshape_request(mddev
, sector_nr
, skipped
);
4340 /* No need to check resync_max as we never do more than one
4341 * stripe, and as resync_max will always be on a chunk boundary,
4342 * if the check in md_do_sync didn't fire, there is no chance
4343 * of overstepping resync_max here
4346 /* if there is too many failed drives and we are trying
4347 * to resync, then assert that we are finished, because there is
4348 * nothing we can do.
4350 if (mddev
->degraded
>= conf
->max_degraded
&&
4351 test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
4352 sector_t rv
= mddev
->dev_sectors
- sector_nr
;
4356 if (!bitmap_start_sync(mddev
->bitmap
, sector_nr
, &sync_blocks
, 1) &&
4357 !test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
) &&
4358 !conf
->fullsync
&& sync_blocks
>= STRIPE_SECTORS
) {
4359 /* we can skip this block, and probably more */
4360 sync_blocks
/= STRIPE_SECTORS
;
4362 return sync_blocks
* STRIPE_SECTORS
; /* keep things rounded to whole stripes */
4366 bitmap_cond_end_sync(mddev
->bitmap
, sector_nr
);
4368 sh
= get_active_stripe(conf
, sector_nr
, 0, 1, 0);
4370 sh
= get_active_stripe(conf
, sector_nr
, 0, 0, 0);
4371 /* make sure we don't swamp the stripe cache if someone else
4372 * is trying to get access
4374 schedule_timeout_uninterruptible(1);
4376 /* Need to check if array will still be degraded after recovery/resync
4377 * We don't need to check the 'failed' flag as when that gets set,
4380 for (i
= 0; i
< conf
->raid_disks
; i
++)
4381 if (conf
->disks
[i
].rdev
== NULL
)
4384 bitmap_start_sync(mddev
->bitmap
, sector_nr
, &sync_blocks
, still_degraded
);
4386 spin_lock(&sh
->lock
);
4387 set_bit(STRIPE_SYNCING
, &sh
->state
);
4388 clear_bit(STRIPE_INSYNC
, &sh
->state
);
4389 spin_unlock(&sh
->lock
);
4394 return STRIPE_SECTORS
;
4397 static int retry_aligned_read(raid5_conf_t
*conf
, struct bio
*raid_bio
)
4399 /* We may not be able to submit a whole bio at once as there
4400 * may not be enough stripe_heads available.
4401 * We cannot pre-allocate enough stripe_heads as we may need
4402 * more than exist in the cache (if we allow ever large chunks).
4403 * So we do one stripe head at a time and record in
4404 * ->bi_hw_segments how many have been done.
4406 * We *know* that this entire raid_bio is in one chunk, so
4407 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
4409 struct stripe_head
*sh
;
4411 sector_t sector
, logical_sector
, last_sector
;
4416 logical_sector
= raid_bio
->bi_sector
& ~((sector_t
)STRIPE_SECTORS
-1);
4417 sector
= raid5_compute_sector(conf
, logical_sector
,
4419 last_sector
= raid_bio
->bi_sector
+ (raid_bio
->bi_size
>>9);
4421 for (; logical_sector
< last_sector
;
4422 logical_sector
+= STRIPE_SECTORS
,
4423 sector
+= STRIPE_SECTORS
,
4426 if (scnt
< raid5_bi_hw_segments(raid_bio
))
4427 /* already done this stripe */
4430 sh
= get_active_stripe(conf
, sector
, 0, 1, 0);
4433 /* failed to get a stripe - must wait */
4434 raid5_set_bi_hw_segments(raid_bio
, scnt
);
4435 conf
->retry_read_aligned
= raid_bio
;
4439 set_bit(R5_ReadError
, &sh
->dev
[dd_idx
].flags
);
4440 if (!add_stripe_bio(sh
, raid_bio
, dd_idx
, 0)) {
4442 raid5_set_bi_hw_segments(raid_bio
, scnt
);
4443 conf
->retry_read_aligned
= raid_bio
;
4451 spin_lock_irq(&conf
->device_lock
);
4452 remaining
= raid5_dec_bi_phys_segments(raid_bio
);
4453 spin_unlock_irq(&conf
->device_lock
);
4455 bio_endio(raid_bio
, 0);
4456 if (atomic_dec_and_test(&conf
->active_aligned_reads
))
4457 wake_up(&conf
->wait_for_stripe
);
4463 * This is our raid5 kernel thread.
4465 * We scan the hash table for stripes which can be handled now.
4466 * During the scan, completed stripes are saved for us by the interrupt
4467 * handler, so that they will not have to wait for our next wakeup.
4469 static void raid5d(mddev_t
*mddev
)
4471 struct stripe_head
*sh
;
4472 raid5_conf_t
*conf
= mddev
->private;
4474 struct blk_plug plug
;
4476 pr_debug("+++ raid5d active\n");
4478 md_check_recovery(mddev
);
4480 blk_start_plug(&plug
);
4482 spin_lock_irq(&conf
->device_lock
);
4486 if (atomic_read(&mddev
->plug_cnt
) == 0 &&
4487 !list_empty(&conf
->bitmap_list
)) {
4488 /* Now is a good time to flush some bitmap updates */
4490 spin_unlock_irq(&conf
->device_lock
);
4491 bitmap_unplug(mddev
->bitmap
);
4492 spin_lock_irq(&conf
->device_lock
);
4493 conf
->seq_write
= conf
->seq_flush
;
4494 activate_bit_delay(conf
);
4496 if (atomic_read(&mddev
->plug_cnt
) == 0)
4497 raid5_activate_delayed(conf
);
4499 while ((bio
= remove_bio_from_retry(conf
))) {
4501 spin_unlock_irq(&conf
->device_lock
);
4502 ok
= retry_aligned_read(conf
, bio
);
4503 spin_lock_irq(&conf
->device_lock
);
4509 sh
= __get_priority_stripe(conf
);
4513 spin_unlock_irq(&conf
->device_lock
);
4520 spin_lock_irq(&conf
->device_lock
);
4522 pr_debug("%d stripes handled\n", handled
);
4524 spin_unlock_irq(&conf
->device_lock
);
4526 async_tx_issue_pending_all();
4527 blk_finish_plug(&plug
);
4529 pr_debug("--- raid5d inactive\n");
4533 raid5_show_stripe_cache_size(mddev_t
*mddev
, char *page
)
4535 raid5_conf_t
*conf
= mddev
->private;
4537 return sprintf(page
, "%d\n", conf
->max_nr_stripes
);
4543 raid5_set_cache_size(mddev_t
*mddev
, int size
)
4545 raid5_conf_t
*conf
= mddev
->private;
4548 if (size
<= 16 || size
> 32768)
4550 while (size
< conf
->max_nr_stripes
) {
4551 if (drop_one_stripe(conf
))
4552 conf
->max_nr_stripes
--;
4556 err
= md_allow_write(mddev
);
4559 while (size
> conf
->max_nr_stripes
) {
4560 if (grow_one_stripe(conf
))
4561 conf
->max_nr_stripes
++;
4566 EXPORT_SYMBOL(raid5_set_cache_size
);
4569 raid5_store_stripe_cache_size(mddev_t
*mddev
, const char *page
, size_t len
)
4571 raid5_conf_t
*conf
= mddev
->private;
4575 if (len
>= PAGE_SIZE
)
4580 if (strict_strtoul(page
, 10, &new))
4582 err
= raid5_set_cache_size(mddev
, new);
4588 static struct md_sysfs_entry
4589 raid5_stripecache_size
= __ATTR(stripe_cache_size
, S_IRUGO
| S_IWUSR
,
4590 raid5_show_stripe_cache_size
,
4591 raid5_store_stripe_cache_size
);
4594 raid5_show_preread_threshold(mddev_t
*mddev
, char *page
)
4596 raid5_conf_t
*conf
= mddev
->private;
4598 return sprintf(page
, "%d\n", conf
->bypass_threshold
);
4604 raid5_store_preread_threshold(mddev_t
*mddev
, const char *page
, size_t len
)
4606 raid5_conf_t
*conf
= mddev
->private;
4608 if (len
>= PAGE_SIZE
)
4613 if (strict_strtoul(page
, 10, &new))
4615 if (new > conf
->max_nr_stripes
)
4617 conf
->bypass_threshold
= new;
4621 static struct md_sysfs_entry
4622 raid5_preread_bypass_threshold
= __ATTR(preread_bypass_threshold
,
4624 raid5_show_preread_threshold
,
4625 raid5_store_preread_threshold
);
4628 stripe_cache_active_show(mddev_t
*mddev
, char *page
)
4630 raid5_conf_t
*conf
= mddev
->private;
4632 return sprintf(page
, "%d\n", atomic_read(&conf
->active_stripes
));
4637 static struct md_sysfs_entry
4638 raid5_stripecache_active
= __ATTR_RO(stripe_cache_active
);
4640 static struct attribute
*raid5_attrs
[] = {
4641 &raid5_stripecache_size
.attr
,
4642 &raid5_stripecache_active
.attr
,
4643 &raid5_preread_bypass_threshold
.attr
,
4646 static struct attribute_group raid5_attrs_group
= {
4648 .attrs
= raid5_attrs
,
4652 raid5_size(mddev_t
*mddev
, sector_t sectors
, int raid_disks
)
4654 raid5_conf_t
*conf
= mddev
->private;
4657 sectors
= mddev
->dev_sectors
;
4659 /* size is defined by the smallest of previous and new size */
4660 raid_disks
= min(conf
->raid_disks
, conf
->previous_raid_disks
);
4662 sectors
&= ~((sector_t
)mddev
->chunk_sectors
- 1);
4663 sectors
&= ~((sector_t
)mddev
->new_chunk_sectors
- 1);
4664 return sectors
* (raid_disks
- conf
->max_degraded
);
4667 static void raid5_free_percpu(raid5_conf_t
*conf
)
4669 struct raid5_percpu
*percpu
;
4676 for_each_possible_cpu(cpu
) {
4677 percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
4678 safe_put_page(percpu
->spare_page
);
4679 kfree(percpu
->scribble
);
4681 #ifdef CONFIG_HOTPLUG_CPU
4682 unregister_cpu_notifier(&conf
->cpu_notify
);
4686 free_percpu(conf
->percpu
);
4689 static void free_conf(raid5_conf_t
*conf
)
4691 shrink_stripes(conf
);
4692 raid5_free_percpu(conf
);
4694 kfree(conf
->stripe_hashtbl
);
4698 #ifdef CONFIG_HOTPLUG_CPU
4699 static int raid456_cpu_notify(struct notifier_block
*nfb
, unsigned long action
,
4702 raid5_conf_t
*conf
= container_of(nfb
, raid5_conf_t
, cpu_notify
);
4703 long cpu
= (long)hcpu
;
4704 struct raid5_percpu
*percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
4707 case CPU_UP_PREPARE
:
4708 case CPU_UP_PREPARE_FROZEN
:
4709 if (conf
->level
== 6 && !percpu
->spare_page
)
4710 percpu
->spare_page
= alloc_page(GFP_KERNEL
);
4711 if (!percpu
->scribble
)
4712 percpu
->scribble
= kmalloc(conf
->scribble_len
, GFP_KERNEL
);
4714 if (!percpu
->scribble
||
4715 (conf
->level
== 6 && !percpu
->spare_page
)) {
4716 safe_put_page(percpu
->spare_page
);
4717 kfree(percpu
->scribble
);
4718 pr_err("%s: failed memory allocation for cpu%ld\n",
4720 return notifier_from_errno(-ENOMEM
);
4724 case CPU_DEAD_FROZEN
:
4725 safe_put_page(percpu
->spare_page
);
4726 kfree(percpu
->scribble
);
4727 percpu
->spare_page
= NULL
;
4728 percpu
->scribble
= NULL
;
4737 static int raid5_alloc_percpu(raid5_conf_t
*conf
)
4740 struct page
*spare_page
;
4741 struct raid5_percpu __percpu
*allcpus
;
4745 allcpus
= alloc_percpu(struct raid5_percpu
);
4748 conf
->percpu
= allcpus
;
4752 for_each_present_cpu(cpu
) {
4753 if (conf
->level
== 6) {
4754 spare_page
= alloc_page(GFP_KERNEL
);
4759 per_cpu_ptr(conf
->percpu
, cpu
)->spare_page
= spare_page
;
4761 scribble
= kmalloc(conf
->scribble_len
, GFP_KERNEL
);
4766 per_cpu_ptr(conf
->percpu
, cpu
)->scribble
= scribble
;
4768 #ifdef CONFIG_HOTPLUG_CPU
4769 conf
->cpu_notify
.notifier_call
= raid456_cpu_notify
;
4770 conf
->cpu_notify
.priority
= 0;
4772 err
= register_cpu_notifier(&conf
->cpu_notify
);
4779 static raid5_conf_t
*setup_conf(mddev_t
*mddev
)
4782 int raid_disk
, memory
, max_disks
;
4784 struct disk_info
*disk
;
4786 if (mddev
->new_level
!= 5
4787 && mddev
->new_level
!= 4
4788 && mddev
->new_level
!= 6) {
4789 printk(KERN_ERR
"md/raid:%s: raid level not set to 4/5/6 (%d)\n",
4790 mdname(mddev
), mddev
->new_level
);
4791 return ERR_PTR(-EIO
);
4793 if ((mddev
->new_level
== 5
4794 && !algorithm_valid_raid5(mddev
->new_layout
)) ||
4795 (mddev
->new_level
== 6
4796 && !algorithm_valid_raid6(mddev
->new_layout
))) {
4797 printk(KERN_ERR
"md/raid:%s: layout %d not supported\n",
4798 mdname(mddev
), mddev
->new_layout
);
4799 return ERR_PTR(-EIO
);
4801 if (mddev
->new_level
== 6 && mddev
->raid_disks
< 4) {
4802 printk(KERN_ERR
"md/raid:%s: not enough configured devices (%d, minimum 4)\n",
4803 mdname(mddev
), mddev
->raid_disks
);
4804 return ERR_PTR(-EINVAL
);
4807 if (!mddev
->new_chunk_sectors
||
4808 (mddev
->new_chunk_sectors
<< 9) % PAGE_SIZE
||
4809 !is_power_of_2(mddev
->new_chunk_sectors
)) {
4810 printk(KERN_ERR
"md/raid:%s: invalid chunk size %d\n",
4811 mdname(mddev
), mddev
->new_chunk_sectors
<< 9);
4812 return ERR_PTR(-EINVAL
);
4815 conf
= kzalloc(sizeof(raid5_conf_t
), GFP_KERNEL
);
4818 spin_lock_init(&conf
->device_lock
);
4819 init_waitqueue_head(&conf
->wait_for_stripe
);
4820 init_waitqueue_head(&conf
->wait_for_overlap
);
4821 INIT_LIST_HEAD(&conf
->handle_list
);
4822 INIT_LIST_HEAD(&conf
->hold_list
);
4823 INIT_LIST_HEAD(&conf
->delayed_list
);
4824 INIT_LIST_HEAD(&conf
->bitmap_list
);
4825 INIT_LIST_HEAD(&conf
->inactive_list
);
4826 atomic_set(&conf
->active_stripes
, 0);
4827 atomic_set(&conf
->preread_active_stripes
, 0);
4828 atomic_set(&conf
->active_aligned_reads
, 0);
4829 conf
->bypass_threshold
= BYPASS_THRESHOLD
;
4831 conf
->raid_disks
= mddev
->raid_disks
;
4832 if (mddev
->reshape_position
== MaxSector
)
4833 conf
->previous_raid_disks
= mddev
->raid_disks
;
4835 conf
->previous_raid_disks
= mddev
->raid_disks
- mddev
->delta_disks
;
4836 max_disks
= max(conf
->raid_disks
, conf
->previous_raid_disks
);
4837 conf
->scribble_len
= scribble_len(max_disks
);
4839 conf
->disks
= kzalloc(max_disks
* sizeof(struct disk_info
),
4844 conf
->mddev
= mddev
;
4846 if ((conf
->stripe_hashtbl
= kzalloc(PAGE_SIZE
, GFP_KERNEL
)) == NULL
)
4849 conf
->level
= mddev
->new_level
;
4850 if (raid5_alloc_percpu(conf
) != 0)
4853 pr_debug("raid456: run(%s) called.\n", mdname(mddev
));
4855 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
4856 raid_disk
= rdev
->raid_disk
;
4857 if (raid_disk
>= max_disks
4860 disk
= conf
->disks
+ raid_disk
;
4864 if (test_bit(In_sync
, &rdev
->flags
)) {
4865 char b
[BDEVNAME_SIZE
];
4866 printk(KERN_INFO
"md/raid:%s: device %s operational as raid"
4868 mdname(mddev
), bdevname(rdev
->bdev
, b
), raid_disk
);
4869 } else if (rdev
->saved_raid_disk
!= raid_disk
)
4870 /* Cannot rely on bitmap to complete recovery */
4874 conf
->chunk_sectors
= mddev
->new_chunk_sectors
;
4875 conf
->level
= mddev
->new_level
;
4876 if (conf
->level
== 6)
4877 conf
->max_degraded
= 2;
4879 conf
->max_degraded
= 1;
4880 conf
->algorithm
= mddev
->new_layout
;
4881 conf
->max_nr_stripes
= NR_STRIPES
;
4882 conf
->reshape_progress
= mddev
->reshape_position
;
4883 if (conf
->reshape_progress
!= MaxSector
) {
4884 conf
->prev_chunk_sectors
= mddev
->chunk_sectors
;
4885 conf
->prev_algo
= mddev
->layout
;
4888 memory
= conf
->max_nr_stripes
* (sizeof(struct stripe_head
) +
4889 max_disks
* ((sizeof(struct bio
) + PAGE_SIZE
))) / 1024;
4890 if (grow_stripes(conf
, conf
->max_nr_stripes
)) {
4892 "md/raid:%s: couldn't allocate %dkB for buffers\n",
4893 mdname(mddev
), memory
);
4896 printk(KERN_INFO
"md/raid:%s: allocated %dkB\n",
4897 mdname(mddev
), memory
);
4899 conf
->thread
= md_register_thread(raid5d
, mddev
, NULL
);
4900 if (!conf
->thread
) {
4902 "md/raid:%s: couldn't allocate thread.\n",
4912 return ERR_PTR(-EIO
);
4914 return ERR_PTR(-ENOMEM
);
4918 static int only_parity(int raid_disk
, int algo
, int raid_disks
, int max_degraded
)
4921 case ALGORITHM_PARITY_0
:
4922 if (raid_disk
< max_degraded
)
4925 case ALGORITHM_PARITY_N
:
4926 if (raid_disk
>= raid_disks
- max_degraded
)
4929 case ALGORITHM_PARITY_0_6
:
4930 if (raid_disk
== 0 ||
4931 raid_disk
== raid_disks
- 1)
4934 case ALGORITHM_LEFT_ASYMMETRIC_6
:
4935 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
4936 case ALGORITHM_LEFT_SYMMETRIC_6
:
4937 case ALGORITHM_RIGHT_SYMMETRIC_6
:
4938 if (raid_disk
== raid_disks
- 1)
4944 static int run(mddev_t
*mddev
)
4947 int working_disks
= 0;
4948 int dirty_parity_disks
= 0;
4950 sector_t reshape_offset
= 0;
4952 if (mddev
->recovery_cp
!= MaxSector
)
4953 printk(KERN_NOTICE
"md/raid:%s: not clean"
4954 " -- starting background reconstruction\n",
4956 if (mddev
->reshape_position
!= MaxSector
) {
4957 /* Check that we can continue the reshape.
4958 * Currently only disks can change, it must
4959 * increase, and we must be past the point where
4960 * a stripe over-writes itself
4962 sector_t here_new
, here_old
;
4964 int max_degraded
= (mddev
->level
== 6 ? 2 : 1);
4966 if (mddev
->new_level
!= mddev
->level
) {
4967 printk(KERN_ERR
"md/raid:%s: unsupported reshape "
4968 "required - aborting.\n",
4972 old_disks
= mddev
->raid_disks
- mddev
->delta_disks
;
4973 /* reshape_position must be on a new-stripe boundary, and one
4974 * further up in new geometry must map after here in old
4977 here_new
= mddev
->reshape_position
;
4978 if (sector_div(here_new
, mddev
->new_chunk_sectors
*
4979 (mddev
->raid_disks
- max_degraded
))) {
4980 printk(KERN_ERR
"md/raid:%s: reshape_position not "
4981 "on a stripe boundary\n", mdname(mddev
));
4984 reshape_offset
= here_new
* mddev
->new_chunk_sectors
;
4985 /* here_new is the stripe we will write to */
4986 here_old
= mddev
->reshape_position
;
4987 sector_div(here_old
, mddev
->chunk_sectors
*
4988 (old_disks
-max_degraded
));
4989 /* here_old is the first stripe that we might need to read
4991 if (mddev
->delta_disks
== 0) {
4992 /* We cannot be sure it is safe to start an in-place
4993 * reshape. It is only safe if user-space if monitoring
4994 * and taking constant backups.
4995 * mdadm always starts a situation like this in
4996 * readonly mode so it can take control before
4997 * allowing any writes. So just check for that.
4999 if ((here_new
* mddev
->new_chunk_sectors
!=
5000 here_old
* mddev
->chunk_sectors
) ||
5002 printk(KERN_ERR
"md/raid:%s: in-place reshape must be started"
5003 " in read-only mode - aborting\n",
5007 } else if (mddev
->delta_disks
< 0
5008 ? (here_new
* mddev
->new_chunk_sectors
<=
5009 here_old
* mddev
->chunk_sectors
)
5010 : (here_new
* mddev
->new_chunk_sectors
>=
5011 here_old
* mddev
->chunk_sectors
)) {
5012 /* Reading from the same stripe as writing to - bad */
5013 printk(KERN_ERR
"md/raid:%s: reshape_position too early for "
5014 "auto-recovery - aborting.\n",
5018 printk(KERN_INFO
"md/raid:%s: reshape will continue\n",
5020 /* OK, we should be able to continue; */
5022 BUG_ON(mddev
->level
!= mddev
->new_level
);
5023 BUG_ON(mddev
->layout
!= mddev
->new_layout
);
5024 BUG_ON(mddev
->chunk_sectors
!= mddev
->new_chunk_sectors
);
5025 BUG_ON(mddev
->delta_disks
!= 0);
5028 if (mddev
->private == NULL
)
5029 conf
= setup_conf(mddev
);
5031 conf
= mddev
->private;
5034 return PTR_ERR(conf
);
5036 mddev
->thread
= conf
->thread
;
5037 conf
->thread
= NULL
;
5038 mddev
->private = conf
;
5041 * 0 for a fully functional array, 1 or 2 for a degraded array.
5043 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
5044 if (rdev
->raid_disk
< 0)
5046 if (test_bit(In_sync
, &rdev
->flags
)) {
5050 /* This disc is not fully in-sync. However if it
5051 * just stored parity (beyond the recovery_offset),
5052 * when we don't need to be concerned about the
5053 * array being dirty.
5054 * When reshape goes 'backwards', we never have
5055 * partially completed devices, so we only need
5056 * to worry about reshape going forwards.
5058 /* Hack because v0.91 doesn't store recovery_offset properly. */
5059 if (mddev
->major_version
== 0 &&
5060 mddev
->minor_version
> 90)
5061 rdev
->recovery_offset
= reshape_offset
;
5063 if (rdev
->recovery_offset
< reshape_offset
) {
5064 /* We need to check old and new layout */
5065 if (!only_parity(rdev
->raid_disk
,
5068 conf
->max_degraded
))
5071 if (!only_parity(rdev
->raid_disk
,
5073 conf
->previous_raid_disks
,
5074 conf
->max_degraded
))
5076 dirty_parity_disks
++;
5079 mddev
->degraded
= (max(conf
->raid_disks
, conf
->previous_raid_disks
)
5082 if (has_failed(conf
)) {
5083 printk(KERN_ERR
"md/raid:%s: not enough operational devices"
5084 " (%d/%d failed)\n",
5085 mdname(mddev
), mddev
->degraded
, conf
->raid_disks
);
5089 /* device size must be a multiple of chunk size */
5090 mddev
->dev_sectors
&= ~(mddev
->chunk_sectors
- 1);
5091 mddev
->resync_max_sectors
= mddev
->dev_sectors
;
5093 if (mddev
->degraded
> dirty_parity_disks
&&
5094 mddev
->recovery_cp
!= MaxSector
) {
5095 if (mddev
->ok_start_degraded
)
5097 "md/raid:%s: starting dirty degraded array"
5098 " - data corruption possible.\n",
5102 "md/raid:%s: cannot start dirty degraded array.\n",
5108 if (mddev
->degraded
== 0)
5109 printk(KERN_INFO
"md/raid:%s: raid level %d active with %d out of %d"
5110 " devices, algorithm %d\n", mdname(mddev
), conf
->level
,
5111 mddev
->raid_disks
-mddev
->degraded
, mddev
->raid_disks
,
5114 printk(KERN_ALERT
"md/raid:%s: raid level %d active with %d"
5115 " out of %d devices, algorithm %d\n",
5116 mdname(mddev
), conf
->level
,
5117 mddev
->raid_disks
- mddev
->degraded
,
5118 mddev
->raid_disks
, mddev
->new_layout
);
5120 print_raid5_conf(conf
);
5122 if (conf
->reshape_progress
!= MaxSector
) {
5123 conf
->reshape_safe
= conf
->reshape_progress
;
5124 atomic_set(&conf
->reshape_stripes
, 0);
5125 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
5126 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
5127 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
5128 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
5129 mddev
->sync_thread
= md_register_thread(md_do_sync
, mddev
,
5134 /* Ok, everything is just fine now */
5135 if (mddev
->to_remove
== &raid5_attrs_group
)
5136 mddev
->to_remove
= NULL
;
5137 else if (mddev
->kobj
.sd
&&
5138 sysfs_create_group(&mddev
->kobj
, &raid5_attrs_group
))
5140 "raid5: failed to create sysfs attributes for %s\n",
5142 md_set_array_sectors(mddev
, raid5_size(mddev
, 0, 0));
5146 /* read-ahead size must cover two whole stripes, which
5147 * is 2 * (datadisks) * chunksize where 'n' is the
5148 * number of raid devices
5150 int data_disks
= conf
->previous_raid_disks
- conf
->max_degraded
;
5151 int stripe
= data_disks
*
5152 ((mddev
->chunk_sectors
<< 9) / PAGE_SIZE
);
5153 if (mddev
->queue
->backing_dev_info
.ra_pages
< 2 * stripe
)
5154 mddev
->queue
->backing_dev_info
.ra_pages
= 2 * stripe
;
5156 blk_queue_merge_bvec(mddev
->queue
, raid5_mergeable_bvec
);
5158 mddev
->queue
->backing_dev_info
.congested_data
= mddev
;
5159 mddev
->queue
->backing_dev_info
.congested_fn
= raid5_congested
;
5161 chunk_size
= mddev
->chunk_sectors
<< 9;
5162 blk_queue_io_min(mddev
->queue
, chunk_size
);
5163 blk_queue_io_opt(mddev
->queue
, chunk_size
*
5164 (conf
->raid_disks
- conf
->max_degraded
));
5166 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
5167 disk_stack_limits(mddev
->gendisk
, rdev
->bdev
,
5168 rdev
->data_offset
<< 9);
5173 md_unregister_thread(&mddev
->thread
);
5175 print_raid5_conf(conf
);
5178 mddev
->private = NULL
;
5179 printk(KERN_ALERT
"md/raid:%s: failed to run raid set.\n", mdname(mddev
));
5183 static int stop(mddev_t
*mddev
)
5185 raid5_conf_t
*conf
= mddev
->private;
5187 md_unregister_thread(&mddev
->thread
);
5189 mddev
->queue
->backing_dev_info
.congested_fn
= NULL
;
5191 mddev
->private = NULL
;
5192 mddev
->to_remove
= &raid5_attrs_group
;
5197 static void print_sh(struct seq_file
*seq
, struct stripe_head
*sh
)
5201 seq_printf(seq
, "sh %llu, pd_idx %d, state %ld.\n",
5202 (unsigned long long)sh
->sector
, sh
->pd_idx
, sh
->state
);
5203 seq_printf(seq
, "sh %llu, count %d.\n",
5204 (unsigned long long)sh
->sector
, atomic_read(&sh
->count
));
5205 seq_printf(seq
, "sh %llu, ", (unsigned long long)sh
->sector
);
5206 for (i
= 0; i
< sh
->disks
; i
++) {
5207 seq_printf(seq
, "(cache%d: %p %ld) ",
5208 i
, sh
->dev
[i
].page
, sh
->dev
[i
].flags
);
5210 seq_printf(seq
, "\n");
5213 static void printall(struct seq_file
*seq
, raid5_conf_t
*conf
)
5215 struct stripe_head
*sh
;
5216 struct hlist_node
*hn
;
5219 spin_lock_irq(&conf
->device_lock
);
5220 for (i
= 0; i
< NR_HASH
; i
++) {
5221 hlist_for_each_entry(sh
, hn
, &conf
->stripe_hashtbl
[i
], hash
) {
5222 if (sh
->raid_conf
!= conf
)
5227 spin_unlock_irq(&conf
->device_lock
);
5231 static void status(struct seq_file
*seq
, mddev_t
*mddev
)
5233 raid5_conf_t
*conf
= mddev
->private;
5236 seq_printf(seq
, " level %d, %dk chunk, algorithm %d", mddev
->level
,
5237 mddev
->chunk_sectors
/ 2, mddev
->layout
);
5238 seq_printf (seq
, " [%d/%d] [", conf
->raid_disks
, conf
->raid_disks
- mddev
->degraded
);
5239 for (i
= 0; i
< conf
->raid_disks
; i
++)
5240 seq_printf (seq
, "%s",
5241 conf
->disks
[i
].rdev
&&
5242 test_bit(In_sync
, &conf
->disks
[i
].rdev
->flags
) ? "U" : "_");
5243 seq_printf (seq
, "]");
5245 seq_printf (seq
, "\n");
5246 printall(seq
, conf
);
5250 static void print_raid5_conf (raid5_conf_t
*conf
)
5253 struct disk_info
*tmp
;
5255 printk(KERN_DEBUG
"RAID conf printout:\n");
5257 printk("(conf==NULL)\n");
5260 printk(KERN_DEBUG
" --- level:%d rd:%d wd:%d\n", conf
->level
,
5262 conf
->raid_disks
- conf
->mddev
->degraded
);
5264 for (i
= 0; i
< conf
->raid_disks
; i
++) {
5265 char b
[BDEVNAME_SIZE
];
5266 tmp
= conf
->disks
+ i
;
5268 printk(KERN_DEBUG
" disk %d, o:%d, dev:%s\n",
5269 i
, !test_bit(Faulty
, &tmp
->rdev
->flags
),
5270 bdevname(tmp
->rdev
->bdev
, b
));
5274 static int raid5_spare_active(mddev_t
*mddev
)
5277 raid5_conf_t
*conf
= mddev
->private;
5278 struct disk_info
*tmp
;
5280 unsigned long flags
;
5282 for (i
= 0; i
< conf
->raid_disks
; i
++) {
5283 tmp
= conf
->disks
+ i
;
5285 && tmp
->rdev
->recovery_offset
== MaxSector
5286 && !test_bit(Faulty
, &tmp
->rdev
->flags
)
5287 && !test_and_set_bit(In_sync
, &tmp
->rdev
->flags
)) {
5289 sysfs_notify_dirent_safe(tmp
->rdev
->sysfs_state
);
5292 spin_lock_irqsave(&conf
->device_lock
, flags
);
5293 mddev
->degraded
-= count
;
5294 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
5295 print_raid5_conf(conf
);
5299 static int raid5_remove_disk(mddev_t
*mddev
, int number
)
5301 raid5_conf_t
*conf
= mddev
->private;
5304 struct disk_info
*p
= conf
->disks
+ number
;
5306 print_raid5_conf(conf
);
5309 if (number
>= conf
->raid_disks
&&
5310 conf
->reshape_progress
== MaxSector
)
5311 clear_bit(In_sync
, &rdev
->flags
);
5313 if (test_bit(In_sync
, &rdev
->flags
) ||
5314 atomic_read(&rdev
->nr_pending
)) {
5318 /* Only remove non-faulty devices if recovery
5321 if (!test_bit(Faulty
, &rdev
->flags
) &&
5322 !has_failed(conf
) &&
5323 number
< conf
->raid_disks
) {
5329 if (atomic_read(&rdev
->nr_pending
)) {
5330 /* lost the race, try later */
5337 print_raid5_conf(conf
);
5341 static int raid5_add_disk(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
5343 raid5_conf_t
*conf
= mddev
->private;
5346 struct disk_info
*p
;
5348 int last
= conf
->raid_disks
- 1;
5350 if (has_failed(conf
))
5351 /* no point adding a device */
5354 if (rdev
->raid_disk
>= 0)
5355 first
= last
= rdev
->raid_disk
;
5358 * find the disk ... but prefer rdev->saved_raid_disk
5361 if (rdev
->saved_raid_disk
>= 0 &&
5362 rdev
->saved_raid_disk
>= first
&&
5363 conf
->disks
[rdev
->saved_raid_disk
].rdev
== NULL
)
5364 disk
= rdev
->saved_raid_disk
;
5367 for ( ; disk
<= last
; disk
++)
5368 if ((p
=conf
->disks
+ disk
)->rdev
== NULL
) {
5369 clear_bit(In_sync
, &rdev
->flags
);
5370 rdev
->raid_disk
= disk
;
5372 if (rdev
->saved_raid_disk
!= disk
)
5374 rcu_assign_pointer(p
->rdev
, rdev
);
5377 print_raid5_conf(conf
);
5381 static int raid5_resize(mddev_t
*mddev
, sector_t sectors
)
5383 /* no resync is happening, and there is enough space
5384 * on all devices, so we can resize.
5385 * We need to make sure resync covers any new space.
5386 * If the array is shrinking we should possibly wait until
5387 * any io in the removed space completes, but it hardly seems
5390 sectors
&= ~((sector_t
)mddev
->chunk_sectors
- 1);
5391 md_set_array_sectors(mddev
, raid5_size(mddev
, sectors
,
5392 mddev
->raid_disks
));
5393 if (mddev
->array_sectors
>
5394 raid5_size(mddev
, sectors
, mddev
->raid_disks
))
5396 set_capacity(mddev
->gendisk
, mddev
->array_sectors
);
5397 revalidate_disk(mddev
->gendisk
);
5398 if (sectors
> mddev
->dev_sectors
&&
5399 mddev
->recovery_cp
> mddev
->dev_sectors
) {
5400 mddev
->recovery_cp
= mddev
->dev_sectors
;
5401 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
5403 mddev
->dev_sectors
= sectors
;
5404 mddev
->resync_max_sectors
= sectors
;
5408 static int check_stripe_cache(mddev_t
*mddev
)
5410 /* Can only proceed if there are plenty of stripe_heads.
5411 * We need a minimum of one full stripe,, and for sensible progress
5412 * it is best to have about 4 times that.
5413 * If we require 4 times, then the default 256 4K stripe_heads will
5414 * allow for chunk sizes up to 256K, which is probably OK.
5415 * If the chunk size is greater, user-space should request more
5416 * stripe_heads first.
5418 raid5_conf_t
*conf
= mddev
->private;
5419 if (((mddev
->chunk_sectors
<< 9) / STRIPE_SIZE
) * 4
5420 > conf
->max_nr_stripes
||
5421 ((mddev
->new_chunk_sectors
<< 9) / STRIPE_SIZE
) * 4
5422 > conf
->max_nr_stripes
) {
5423 printk(KERN_WARNING
"md/raid:%s: reshape: not enough stripes. Needed %lu\n",
5425 ((max(mddev
->chunk_sectors
, mddev
->new_chunk_sectors
) << 9)
5432 static int check_reshape(mddev_t
*mddev
)
5434 raid5_conf_t
*conf
= mddev
->private;
5436 if (mddev
->delta_disks
== 0 &&
5437 mddev
->new_layout
== mddev
->layout
&&
5438 mddev
->new_chunk_sectors
== mddev
->chunk_sectors
)
5439 return 0; /* nothing to do */
5441 /* Cannot grow a bitmap yet */
5443 if (has_failed(conf
))
5445 if (mddev
->delta_disks
< 0) {
5446 /* We might be able to shrink, but the devices must
5447 * be made bigger first.
5448 * For raid6, 4 is the minimum size.
5449 * Otherwise 2 is the minimum
5452 if (mddev
->level
== 6)
5454 if (mddev
->raid_disks
+ mddev
->delta_disks
< min
)
5458 if (!check_stripe_cache(mddev
))
5461 return resize_stripes(conf
, conf
->raid_disks
+ mddev
->delta_disks
);
5464 static int raid5_start_reshape(mddev_t
*mddev
)
5466 raid5_conf_t
*conf
= mddev
->private;
5469 unsigned long flags
;
5471 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
))
5474 if (!check_stripe_cache(mddev
))
5477 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
5478 if (!test_bit(In_sync
, &rdev
->flags
)
5479 && !test_bit(Faulty
, &rdev
->flags
))
5482 if (spares
- mddev
->degraded
< mddev
->delta_disks
- conf
->max_degraded
)
5483 /* Not enough devices even to make a degraded array
5488 /* Refuse to reduce size of the array. Any reductions in
5489 * array size must be through explicit setting of array_size
5492 if (raid5_size(mddev
, 0, conf
->raid_disks
+ mddev
->delta_disks
)
5493 < mddev
->array_sectors
) {
5494 printk(KERN_ERR
"md/raid:%s: array size must be reduced "
5495 "before number of disks\n", mdname(mddev
));
5499 atomic_set(&conf
->reshape_stripes
, 0);
5500 spin_lock_irq(&conf
->device_lock
);
5501 conf
->previous_raid_disks
= conf
->raid_disks
;
5502 conf
->raid_disks
+= mddev
->delta_disks
;
5503 conf
->prev_chunk_sectors
= conf
->chunk_sectors
;
5504 conf
->chunk_sectors
= mddev
->new_chunk_sectors
;
5505 conf
->prev_algo
= conf
->algorithm
;
5506 conf
->algorithm
= mddev
->new_layout
;
5507 if (mddev
->delta_disks
< 0)
5508 conf
->reshape_progress
= raid5_size(mddev
, 0, 0);
5510 conf
->reshape_progress
= 0;
5511 conf
->reshape_safe
= conf
->reshape_progress
;
5513 spin_unlock_irq(&conf
->device_lock
);
5515 /* Add some new drives, as many as will fit.
5516 * We know there are enough to make the newly sized array work.
5517 * Don't add devices if we are reducing the number of
5518 * devices in the array. This is because it is not possible
5519 * to correctly record the "partially reconstructed" state of
5520 * such devices during the reshape and confusion could result.
5522 if (mddev
->delta_disks
>= 0) {
5523 int added_devices
= 0;
5524 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
5525 if (rdev
->raid_disk
< 0 &&
5526 !test_bit(Faulty
, &rdev
->flags
)) {
5527 if (raid5_add_disk(mddev
, rdev
) == 0) {
5530 >= conf
->previous_raid_disks
) {
5531 set_bit(In_sync
, &rdev
->flags
);
5534 rdev
->recovery_offset
= 0;
5535 sprintf(nm
, "rd%d", rdev
->raid_disk
);
5536 if (sysfs_create_link(&mddev
->kobj
,
5538 /* Failure here is OK */;
5540 } else if (rdev
->raid_disk
>= conf
->previous_raid_disks
5541 && !test_bit(Faulty
, &rdev
->flags
)) {
5542 /* This is a spare that was manually added */
5543 set_bit(In_sync
, &rdev
->flags
);
5547 /* When a reshape changes the number of devices,
5548 * ->degraded is measured against the larger of the
5549 * pre and post number of devices.
5551 spin_lock_irqsave(&conf
->device_lock
, flags
);
5552 mddev
->degraded
+= (conf
->raid_disks
- conf
->previous_raid_disks
)
5554 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
5556 mddev
->raid_disks
= conf
->raid_disks
;
5557 mddev
->reshape_position
= conf
->reshape_progress
;
5558 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
5560 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
5561 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
5562 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
5563 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
5564 mddev
->sync_thread
= md_register_thread(md_do_sync
, mddev
,
5566 if (!mddev
->sync_thread
) {
5567 mddev
->recovery
= 0;
5568 spin_lock_irq(&conf
->device_lock
);
5569 mddev
->raid_disks
= conf
->raid_disks
= conf
->previous_raid_disks
;
5570 conf
->reshape_progress
= MaxSector
;
5571 spin_unlock_irq(&conf
->device_lock
);
5574 conf
->reshape_checkpoint
= jiffies
;
5575 md_wakeup_thread(mddev
->sync_thread
);
5576 md_new_event(mddev
);
5580 /* This is called from the reshape thread and should make any
5581 * changes needed in 'conf'
5583 static void end_reshape(raid5_conf_t
*conf
)
5586 if (!test_bit(MD_RECOVERY_INTR
, &conf
->mddev
->recovery
)) {
5588 spin_lock_irq(&conf
->device_lock
);
5589 conf
->previous_raid_disks
= conf
->raid_disks
;
5590 conf
->reshape_progress
= MaxSector
;
5591 spin_unlock_irq(&conf
->device_lock
);
5592 wake_up(&conf
->wait_for_overlap
);
5594 /* read-ahead size must cover two whole stripes, which is
5595 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
5597 if (conf
->mddev
->queue
) {
5598 int data_disks
= conf
->raid_disks
- conf
->max_degraded
;
5599 int stripe
= data_disks
* ((conf
->chunk_sectors
<< 9)
5601 if (conf
->mddev
->queue
->backing_dev_info
.ra_pages
< 2 * stripe
)
5602 conf
->mddev
->queue
->backing_dev_info
.ra_pages
= 2 * stripe
;
5607 /* This is called from the raid5d thread with mddev_lock held.
5608 * It makes config changes to the device.
5610 static void raid5_finish_reshape(mddev_t
*mddev
)
5612 raid5_conf_t
*conf
= mddev
->private;
5614 if (!test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
)) {
5616 if (mddev
->delta_disks
> 0) {
5617 md_set_array_sectors(mddev
, raid5_size(mddev
, 0, 0));
5618 set_capacity(mddev
->gendisk
, mddev
->array_sectors
);
5619 revalidate_disk(mddev
->gendisk
);
5622 mddev
->degraded
= conf
->raid_disks
;
5623 for (d
= 0; d
< conf
->raid_disks
; d
++)
5624 if (conf
->disks
[d
].rdev
&&
5626 &conf
->disks
[d
].rdev
->flags
))
5628 for (d
= conf
->raid_disks
;
5629 d
< conf
->raid_disks
- mddev
->delta_disks
;
5631 mdk_rdev_t
*rdev
= conf
->disks
[d
].rdev
;
5632 if (rdev
&& raid5_remove_disk(mddev
, d
) == 0) {
5634 sprintf(nm
, "rd%d", rdev
->raid_disk
);
5635 sysfs_remove_link(&mddev
->kobj
, nm
);
5636 rdev
->raid_disk
= -1;
5640 mddev
->layout
= conf
->algorithm
;
5641 mddev
->chunk_sectors
= conf
->chunk_sectors
;
5642 mddev
->reshape_position
= MaxSector
;
5643 mddev
->delta_disks
= 0;
5647 static void raid5_quiesce(mddev_t
*mddev
, int state
)
5649 raid5_conf_t
*conf
= mddev
->private;
5652 case 2: /* resume for a suspend */
5653 wake_up(&conf
->wait_for_overlap
);
5656 case 1: /* stop all writes */
5657 spin_lock_irq(&conf
->device_lock
);
5658 /* '2' tells resync/reshape to pause so that all
5659 * active stripes can drain
5662 wait_event_lock_irq(conf
->wait_for_stripe
,
5663 atomic_read(&conf
->active_stripes
) == 0 &&
5664 atomic_read(&conf
->active_aligned_reads
) == 0,
5665 conf
->device_lock
, /* nothing */);
5667 spin_unlock_irq(&conf
->device_lock
);
5668 /* allow reshape to continue */
5669 wake_up(&conf
->wait_for_overlap
);
5672 case 0: /* re-enable writes */
5673 spin_lock_irq(&conf
->device_lock
);
5675 wake_up(&conf
->wait_for_stripe
);
5676 wake_up(&conf
->wait_for_overlap
);
5677 spin_unlock_irq(&conf
->device_lock
);
5683 static void *raid45_takeover_raid0(mddev_t
*mddev
, int level
)
5685 struct raid0_private_data
*raid0_priv
= mddev
->private;
5688 /* for raid0 takeover only one zone is supported */
5689 if (raid0_priv
->nr_strip_zones
> 1) {
5690 printk(KERN_ERR
"md/raid:%s: cannot takeover raid0 with more than one zone.\n",
5692 return ERR_PTR(-EINVAL
);
5695 sectors
= raid0_priv
->strip_zone
[0].zone_end
;
5696 sector_div(sectors
, raid0_priv
->strip_zone
[0].nb_dev
);
5697 mddev
->dev_sectors
= sectors
;
5698 mddev
->new_level
= level
;
5699 mddev
->new_layout
= ALGORITHM_PARITY_N
;
5700 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
5701 mddev
->raid_disks
+= 1;
5702 mddev
->delta_disks
= 1;
5703 /* make sure it will be not marked as dirty */
5704 mddev
->recovery_cp
= MaxSector
;
5706 return setup_conf(mddev
);
5710 static void *raid5_takeover_raid1(mddev_t
*mddev
)
5714 if (mddev
->raid_disks
!= 2 ||
5715 mddev
->degraded
> 1)
5716 return ERR_PTR(-EINVAL
);
5718 /* Should check if there are write-behind devices? */
5720 chunksect
= 64*2; /* 64K by default */
5722 /* The array must be an exact multiple of chunksize */
5723 while (chunksect
&& (mddev
->array_sectors
& (chunksect
-1)))
5726 if ((chunksect
<<9) < STRIPE_SIZE
)
5727 /* array size does not allow a suitable chunk size */
5728 return ERR_PTR(-EINVAL
);
5730 mddev
->new_level
= 5;
5731 mddev
->new_layout
= ALGORITHM_LEFT_SYMMETRIC
;
5732 mddev
->new_chunk_sectors
= chunksect
;
5734 return setup_conf(mddev
);
5737 static void *raid5_takeover_raid6(mddev_t
*mddev
)
5741 switch (mddev
->layout
) {
5742 case ALGORITHM_LEFT_ASYMMETRIC_6
:
5743 new_layout
= ALGORITHM_LEFT_ASYMMETRIC
;
5745 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
5746 new_layout
= ALGORITHM_RIGHT_ASYMMETRIC
;
5748 case ALGORITHM_LEFT_SYMMETRIC_6
:
5749 new_layout
= ALGORITHM_LEFT_SYMMETRIC
;
5751 case ALGORITHM_RIGHT_SYMMETRIC_6
:
5752 new_layout
= ALGORITHM_RIGHT_SYMMETRIC
;
5754 case ALGORITHM_PARITY_0_6
:
5755 new_layout
= ALGORITHM_PARITY_0
;
5757 case ALGORITHM_PARITY_N
:
5758 new_layout
= ALGORITHM_PARITY_N
;
5761 return ERR_PTR(-EINVAL
);
5763 mddev
->new_level
= 5;
5764 mddev
->new_layout
= new_layout
;
5765 mddev
->delta_disks
= -1;
5766 mddev
->raid_disks
-= 1;
5767 return setup_conf(mddev
);
5771 static int raid5_check_reshape(mddev_t
*mddev
)
5773 /* For a 2-drive array, the layout and chunk size can be changed
5774 * immediately as not restriping is needed.
5775 * For larger arrays we record the new value - after validation
5776 * to be used by a reshape pass.
5778 raid5_conf_t
*conf
= mddev
->private;
5779 int new_chunk
= mddev
->new_chunk_sectors
;
5781 if (mddev
->new_layout
>= 0 && !algorithm_valid_raid5(mddev
->new_layout
))
5783 if (new_chunk
> 0) {
5784 if (!is_power_of_2(new_chunk
))
5786 if (new_chunk
< (PAGE_SIZE
>>9))
5788 if (mddev
->array_sectors
& (new_chunk
-1))
5789 /* not factor of array size */
5793 /* They look valid */
5795 if (mddev
->raid_disks
== 2) {
5796 /* can make the change immediately */
5797 if (mddev
->new_layout
>= 0) {
5798 conf
->algorithm
= mddev
->new_layout
;
5799 mddev
->layout
= mddev
->new_layout
;
5801 if (new_chunk
> 0) {
5802 conf
->chunk_sectors
= new_chunk
;
5803 mddev
->chunk_sectors
= new_chunk
;
5805 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
5806 md_wakeup_thread(mddev
->thread
);
5808 return check_reshape(mddev
);
5811 static int raid6_check_reshape(mddev_t
*mddev
)
5813 int new_chunk
= mddev
->new_chunk_sectors
;
5815 if (mddev
->new_layout
>= 0 && !algorithm_valid_raid6(mddev
->new_layout
))
5817 if (new_chunk
> 0) {
5818 if (!is_power_of_2(new_chunk
))
5820 if (new_chunk
< (PAGE_SIZE
>> 9))
5822 if (mddev
->array_sectors
& (new_chunk
-1))
5823 /* not factor of array size */
5827 /* They look valid */
5828 return check_reshape(mddev
);
5831 static void *raid5_takeover(mddev_t
*mddev
)
5833 /* raid5 can take over:
5834 * raid0 - if there is only one strip zone - make it a raid4 layout
5835 * raid1 - if there are two drives. We need to know the chunk size
5836 * raid4 - trivial - just use a raid4 layout.
5837 * raid6 - Providing it is a *_6 layout
5839 if (mddev
->level
== 0)
5840 return raid45_takeover_raid0(mddev
, 5);
5841 if (mddev
->level
== 1)
5842 return raid5_takeover_raid1(mddev
);
5843 if (mddev
->level
== 4) {
5844 mddev
->new_layout
= ALGORITHM_PARITY_N
;
5845 mddev
->new_level
= 5;
5846 return setup_conf(mddev
);
5848 if (mddev
->level
== 6)
5849 return raid5_takeover_raid6(mddev
);
5851 return ERR_PTR(-EINVAL
);
5854 static void *raid4_takeover(mddev_t
*mddev
)
5856 /* raid4 can take over:
5857 * raid0 - if there is only one strip zone
5858 * raid5 - if layout is right
5860 if (mddev
->level
== 0)
5861 return raid45_takeover_raid0(mddev
, 4);
5862 if (mddev
->level
== 5 &&
5863 mddev
->layout
== ALGORITHM_PARITY_N
) {
5864 mddev
->new_layout
= 0;
5865 mddev
->new_level
= 4;
5866 return setup_conf(mddev
);
5868 return ERR_PTR(-EINVAL
);
5871 static struct mdk_personality raid5_personality
;
5873 static void *raid6_takeover(mddev_t
*mddev
)
5875 /* Currently can only take over a raid5. We map the
5876 * personality to an equivalent raid6 personality
5877 * with the Q block at the end.
5881 if (mddev
->pers
!= &raid5_personality
)
5882 return ERR_PTR(-EINVAL
);
5883 if (mddev
->degraded
> 1)
5884 return ERR_PTR(-EINVAL
);
5885 if (mddev
->raid_disks
> 253)
5886 return ERR_PTR(-EINVAL
);
5887 if (mddev
->raid_disks
< 3)
5888 return ERR_PTR(-EINVAL
);
5890 switch (mddev
->layout
) {
5891 case ALGORITHM_LEFT_ASYMMETRIC
:
5892 new_layout
= ALGORITHM_LEFT_ASYMMETRIC_6
;
5894 case ALGORITHM_RIGHT_ASYMMETRIC
:
5895 new_layout
= ALGORITHM_RIGHT_ASYMMETRIC_6
;
5897 case ALGORITHM_LEFT_SYMMETRIC
:
5898 new_layout
= ALGORITHM_LEFT_SYMMETRIC_6
;
5900 case ALGORITHM_RIGHT_SYMMETRIC
:
5901 new_layout
= ALGORITHM_RIGHT_SYMMETRIC_6
;
5903 case ALGORITHM_PARITY_0
:
5904 new_layout
= ALGORITHM_PARITY_0_6
;
5906 case ALGORITHM_PARITY_N
:
5907 new_layout
= ALGORITHM_PARITY_N
;
5910 return ERR_PTR(-EINVAL
);
5912 mddev
->new_level
= 6;
5913 mddev
->new_layout
= new_layout
;
5914 mddev
->delta_disks
= 1;
5915 mddev
->raid_disks
+= 1;
5916 return setup_conf(mddev
);
5920 static struct mdk_personality raid6_personality
=
5924 .owner
= THIS_MODULE
,
5925 .make_request
= make_request
,
5929 .error_handler
= error
,
5930 .hot_add_disk
= raid5_add_disk
,
5931 .hot_remove_disk
= raid5_remove_disk
,
5932 .spare_active
= raid5_spare_active
,
5933 .sync_request
= sync_request
,
5934 .resize
= raid5_resize
,
5936 .check_reshape
= raid6_check_reshape
,
5937 .start_reshape
= raid5_start_reshape
,
5938 .finish_reshape
= raid5_finish_reshape
,
5939 .quiesce
= raid5_quiesce
,
5940 .takeover
= raid6_takeover
,
5942 static struct mdk_personality raid5_personality
=
5946 .owner
= THIS_MODULE
,
5947 .make_request
= make_request
,
5951 .error_handler
= error
,
5952 .hot_add_disk
= raid5_add_disk
,
5953 .hot_remove_disk
= raid5_remove_disk
,
5954 .spare_active
= raid5_spare_active
,
5955 .sync_request
= sync_request
,
5956 .resize
= raid5_resize
,
5958 .check_reshape
= raid5_check_reshape
,
5959 .start_reshape
= raid5_start_reshape
,
5960 .finish_reshape
= raid5_finish_reshape
,
5961 .quiesce
= raid5_quiesce
,
5962 .takeover
= raid5_takeover
,
5965 static struct mdk_personality raid4_personality
=
5969 .owner
= THIS_MODULE
,
5970 .make_request
= make_request
,
5974 .error_handler
= error
,
5975 .hot_add_disk
= raid5_add_disk
,
5976 .hot_remove_disk
= raid5_remove_disk
,
5977 .spare_active
= raid5_spare_active
,
5978 .sync_request
= sync_request
,
5979 .resize
= raid5_resize
,
5981 .check_reshape
= raid5_check_reshape
,
5982 .start_reshape
= raid5_start_reshape
,
5983 .finish_reshape
= raid5_finish_reshape
,
5984 .quiesce
= raid5_quiesce
,
5985 .takeover
= raid4_takeover
,
5988 static int __init
raid5_init(void)
5990 register_md_personality(&raid6_personality
);
5991 register_md_personality(&raid5_personality
);
5992 register_md_personality(&raid4_personality
);
5996 static void raid5_exit(void)
5998 unregister_md_personality(&raid6_personality
);
5999 unregister_md_personality(&raid5_personality
);
6000 unregister_md_personality(&raid4_personality
);
6003 module_init(raid5_init
);
6004 module_exit(raid5_exit
);
6005 MODULE_LICENSE("GPL");
6006 MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
6007 MODULE_ALIAS("md-personality-4"); /* RAID5 */
6008 MODULE_ALIAS("md-raid5");
6009 MODULE_ALIAS("md-raid4");
6010 MODULE_ALIAS("md-level-5");
6011 MODULE_ALIAS("md-level-4");
6012 MODULE_ALIAS("md-personality-8"); /* RAID6 */
6013 MODULE_ALIAS("md-raid6");
6014 MODULE_ALIAS("md-level-6");
6016 /* This used to be two separate modules, they were: */
6017 MODULE_ALIAS("raid5");
6018 MODULE_ALIAS("raid6");