2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
5 * Copyright (C) 2002, 2003 H. Peter Anvin
7 * RAID-4/5/6 management functions.
8 * Thanks to Penguin Computing for making the RAID-6 development possible
9 * by donating a test server!
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 * The sequencing for updating the bitmap reliably is a little
25 * subtle (and I got it wrong the first time) so it deserves some
28 * We group bitmap updates into batches. Each batch has a number.
29 * We may write out several batches at once, but that isn't very important.
30 * conf->bm_write is the number of the last batch successfully written.
31 * conf->bm_flush is the number of the last batch that was closed to
33 * When we discover that we will need to write to any block in a stripe
34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
35 * the number of the batch it will be in. This is bm_flush+1.
36 * When we are ready to do a write, if that batch hasn't been written yet,
37 * we plug the array and queue the stripe for later.
38 * When an unplug happens, we increment bm_flush, thus closing the current
40 * When we notice that bm_flush > bm_write, we write out all pending updates
41 * to the bitmap, and advance bm_write to where bm_flush was.
42 * This may occasionally write a bit out twice, but is sure never to
46 #include <linux/blkdev.h>
47 #include <linux/kthread.h>
48 #include <linux/raid/pq.h>
49 #include <linux/async_tx.h>
50 #include <linux/async.h>
51 #include <linux/seq_file.h>
52 #include <linux/cpu.h>
61 #define NR_STRIPES 256
62 #define STRIPE_SIZE PAGE_SIZE
63 #define STRIPE_SHIFT (PAGE_SHIFT - 9)
64 #define STRIPE_SECTORS (STRIPE_SIZE>>9)
65 #define IO_THRESHOLD 1
66 #define BYPASS_THRESHOLD 1
67 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
68 #define HASH_MASK (NR_HASH - 1)
70 #define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
72 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
73 * order without overlap. There may be several bio's per stripe+device, and
74 * a bio could span several devices.
75 * When walking this list for a particular stripe+device, we must never proceed
76 * beyond a bio that extends past this device, as the next bio might no longer
78 * This macro is used to determine the 'next' bio in the list, given the sector
79 * of the current stripe+device
81 #define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
83 * The following can be used to debug the driver
85 #define RAID5_PARANOIA 1
86 #if RAID5_PARANOIA && defined(CONFIG_SMP)
87 # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
89 # define CHECK_DEVLOCK()
97 #define printk_rl(args...) ((void) (printk_ratelimit() && printk(args)))
100 * We maintain a biased count of active stripes in the bottom 16 bits of
101 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
103 static inline int raid5_bi_phys_segments(struct bio
*bio
)
105 return bio
->bi_phys_segments
& 0xffff;
108 static inline int raid5_bi_hw_segments(struct bio
*bio
)
110 return (bio
->bi_phys_segments
>> 16) & 0xffff;
113 static inline int raid5_dec_bi_phys_segments(struct bio
*bio
)
115 --bio
->bi_phys_segments
;
116 return raid5_bi_phys_segments(bio
);
119 static inline int raid5_dec_bi_hw_segments(struct bio
*bio
)
121 unsigned short val
= raid5_bi_hw_segments(bio
);
124 bio
->bi_phys_segments
= (val
<< 16) | raid5_bi_phys_segments(bio
);
128 static inline void raid5_set_bi_hw_segments(struct bio
*bio
, unsigned int cnt
)
130 bio
->bi_phys_segments
= raid5_bi_phys_segments(bio
) || (cnt
<< 16);
133 /* Find first data disk in a raid6 stripe */
134 static inline int raid6_d0(struct stripe_head
*sh
)
137 /* ddf always start from first device */
139 /* md starts just after Q block */
140 if (sh
->qd_idx
== sh
->disks
- 1)
143 return sh
->qd_idx
+ 1;
145 static inline int raid6_next_disk(int disk
, int raid_disks
)
148 return (disk
< raid_disks
) ? disk
: 0;
151 /* When walking through the disks in a raid5, starting at raid6_d0,
152 * We need to map each disk to a 'slot', where the data disks are slot
153 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
154 * is raid_disks-1. This help does that mapping.
156 static int raid6_idx_to_slot(int idx
, struct stripe_head
*sh
,
157 int *count
, int syndrome_disks
)
163 if (idx
== sh
->pd_idx
)
164 return syndrome_disks
;
165 if (idx
== sh
->qd_idx
)
166 return syndrome_disks
+ 1;
172 static void return_io(struct bio
*return_bi
)
174 struct bio
*bi
= return_bi
;
177 return_bi
= bi
->bi_next
;
185 static void print_raid5_conf (raid5_conf_t
*conf
);
187 static int stripe_operations_active(struct stripe_head
*sh
)
189 return sh
->check_state
|| sh
->reconstruct_state
||
190 test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
) ||
191 test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
194 static void __release_stripe(raid5_conf_t
*conf
, struct stripe_head
*sh
)
196 if (atomic_dec_and_test(&sh
->count
)) {
197 BUG_ON(!list_empty(&sh
->lru
));
198 BUG_ON(atomic_read(&conf
->active_stripes
)==0);
199 if (test_bit(STRIPE_HANDLE
, &sh
->state
)) {
200 if (test_bit(STRIPE_DELAYED
, &sh
->state
)) {
201 list_add_tail(&sh
->lru
, &conf
->delayed_list
);
202 blk_plug_device(conf
->mddev
->queue
);
203 } else if (test_bit(STRIPE_BIT_DELAY
, &sh
->state
) &&
204 sh
->bm_seq
- conf
->seq_write
> 0) {
205 list_add_tail(&sh
->lru
, &conf
->bitmap_list
);
206 blk_plug_device(conf
->mddev
->queue
);
208 clear_bit(STRIPE_BIT_DELAY
, &sh
->state
);
209 list_add_tail(&sh
->lru
, &conf
->handle_list
);
211 md_wakeup_thread(conf
->mddev
->thread
);
213 BUG_ON(stripe_operations_active(sh
));
214 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
215 atomic_dec(&conf
->preread_active_stripes
);
216 if (atomic_read(&conf
->preread_active_stripes
) < IO_THRESHOLD
)
217 md_wakeup_thread(conf
->mddev
->thread
);
219 atomic_dec(&conf
->active_stripes
);
220 if (!test_bit(STRIPE_EXPANDING
, &sh
->state
)) {
221 list_add_tail(&sh
->lru
, &conf
->inactive_list
);
222 wake_up(&conf
->wait_for_stripe
);
223 if (conf
->retry_read_aligned
)
224 md_wakeup_thread(conf
->mddev
->thread
);
230 static void release_stripe(struct stripe_head
*sh
)
232 raid5_conf_t
*conf
= sh
->raid_conf
;
235 spin_lock_irqsave(&conf
->device_lock
, flags
);
236 __release_stripe(conf
, sh
);
237 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
240 static inline void remove_hash(struct stripe_head
*sh
)
242 pr_debug("remove_hash(), stripe %llu\n",
243 (unsigned long long)sh
->sector
);
245 hlist_del_init(&sh
->hash
);
248 static inline void insert_hash(raid5_conf_t
*conf
, struct stripe_head
*sh
)
250 struct hlist_head
*hp
= stripe_hash(conf
, sh
->sector
);
252 pr_debug("insert_hash(), stripe %llu\n",
253 (unsigned long long)sh
->sector
);
256 hlist_add_head(&sh
->hash
, hp
);
260 /* find an idle stripe, make sure it is unhashed, and return it. */
261 static struct stripe_head
*get_free_stripe(raid5_conf_t
*conf
)
263 struct stripe_head
*sh
= NULL
;
264 struct list_head
*first
;
267 if (list_empty(&conf
->inactive_list
))
269 first
= conf
->inactive_list
.next
;
270 sh
= list_entry(first
, struct stripe_head
, lru
);
271 list_del_init(first
);
273 atomic_inc(&conf
->active_stripes
);
278 static void shrink_buffers(struct stripe_head
*sh
, int num
)
283 for (i
=0; i
<num
; i
++) {
287 sh
->dev
[i
].page
= NULL
;
292 static int grow_buffers(struct stripe_head
*sh
, int num
)
296 for (i
=0; i
<num
; i
++) {
299 if (!(page
= alloc_page(GFP_KERNEL
))) {
302 sh
->dev
[i
].page
= page
;
307 static void raid5_build_block(struct stripe_head
*sh
, int i
, int previous
);
308 static void stripe_set_idx(sector_t stripe
, raid5_conf_t
*conf
, int previous
,
309 struct stripe_head
*sh
);
311 static void init_stripe(struct stripe_head
*sh
, sector_t sector
, int previous
)
313 raid5_conf_t
*conf
= sh
->raid_conf
;
316 BUG_ON(atomic_read(&sh
->count
) != 0);
317 BUG_ON(test_bit(STRIPE_HANDLE
, &sh
->state
));
318 BUG_ON(stripe_operations_active(sh
));
321 pr_debug("init_stripe called, stripe %llu\n",
322 (unsigned long long)sh
->sector
);
326 sh
->generation
= conf
->generation
- previous
;
327 sh
->disks
= previous
? conf
->previous_raid_disks
: conf
->raid_disks
;
329 stripe_set_idx(sector
, conf
, previous
, sh
);
333 for (i
= sh
->disks
; i
--; ) {
334 struct r5dev
*dev
= &sh
->dev
[i
];
336 if (dev
->toread
|| dev
->read
|| dev
->towrite
|| dev
->written
||
337 test_bit(R5_LOCKED
, &dev
->flags
)) {
338 printk(KERN_ERR
"sector=%llx i=%d %p %p %p %p %d\n",
339 (unsigned long long)sh
->sector
, i
, dev
->toread
,
340 dev
->read
, dev
->towrite
, dev
->written
,
341 test_bit(R5_LOCKED
, &dev
->flags
));
345 raid5_build_block(sh
, i
, previous
);
347 insert_hash(conf
, sh
);
350 static struct stripe_head
*__find_stripe(raid5_conf_t
*conf
, sector_t sector
,
353 struct stripe_head
*sh
;
354 struct hlist_node
*hn
;
357 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector
);
358 hlist_for_each_entry(sh
, hn
, stripe_hash(conf
, sector
), hash
)
359 if (sh
->sector
== sector
&& sh
->generation
== generation
)
361 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector
);
365 static void unplug_slaves(mddev_t
*mddev
);
366 static void raid5_unplug_device(struct request_queue
*q
);
368 static struct stripe_head
*
369 get_active_stripe(raid5_conf_t
*conf
, sector_t sector
,
370 int previous
, int noblock
, int noquiesce
)
372 struct stripe_head
*sh
;
374 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector
);
376 spin_lock_irq(&conf
->device_lock
);
379 wait_event_lock_irq(conf
->wait_for_stripe
,
380 conf
->quiesce
== 0 || noquiesce
,
381 conf
->device_lock
, /* nothing */);
382 sh
= __find_stripe(conf
, sector
, conf
->generation
- previous
);
384 if (!conf
->inactive_blocked
)
385 sh
= get_free_stripe(conf
);
386 if (noblock
&& sh
== NULL
)
389 conf
->inactive_blocked
= 1;
390 wait_event_lock_irq(conf
->wait_for_stripe
,
391 !list_empty(&conf
->inactive_list
) &&
392 (atomic_read(&conf
->active_stripes
)
393 < (conf
->max_nr_stripes
*3/4)
394 || !conf
->inactive_blocked
),
396 raid5_unplug_device(conf
->mddev
->queue
)
398 conf
->inactive_blocked
= 0;
400 init_stripe(sh
, sector
, previous
);
402 if (atomic_read(&sh
->count
)) {
403 BUG_ON(!list_empty(&sh
->lru
)
404 && !test_bit(STRIPE_EXPANDING
, &sh
->state
));
406 if (!test_bit(STRIPE_HANDLE
, &sh
->state
))
407 atomic_inc(&conf
->active_stripes
);
408 if (list_empty(&sh
->lru
) &&
409 !test_bit(STRIPE_EXPANDING
, &sh
->state
))
411 list_del_init(&sh
->lru
);
414 } while (sh
== NULL
);
417 atomic_inc(&sh
->count
);
419 spin_unlock_irq(&conf
->device_lock
);
424 raid5_end_read_request(struct bio
*bi
, int error
);
426 raid5_end_write_request(struct bio
*bi
, int error
);
428 static void ops_run_io(struct stripe_head
*sh
, struct stripe_head_state
*s
)
430 raid5_conf_t
*conf
= sh
->raid_conf
;
431 int i
, disks
= sh
->disks
;
435 for (i
= disks
; i
--; ) {
439 if (test_and_clear_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
))
441 else if (test_and_clear_bit(R5_Wantread
, &sh
->dev
[i
].flags
))
446 bi
= &sh
->dev
[i
].req
;
450 bi
->bi_end_io
= raid5_end_write_request
;
452 bi
->bi_end_io
= raid5_end_read_request
;
455 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
456 if (rdev
&& test_bit(Faulty
, &rdev
->flags
))
459 atomic_inc(&rdev
->nr_pending
);
463 if (s
->syncing
|| s
->expanding
|| s
->expanded
)
464 md_sync_acct(rdev
->bdev
, STRIPE_SECTORS
);
466 set_bit(STRIPE_IO_STARTED
, &sh
->state
);
468 bi
->bi_bdev
= rdev
->bdev
;
469 pr_debug("%s: for %llu schedule op %ld on disc %d\n",
470 __func__
, (unsigned long long)sh
->sector
,
472 atomic_inc(&sh
->count
);
473 bi
->bi_sector
= sh
->sector
+ rdev
->data_offset
;
474 bi
->bi_flags
= 1 << BIO_UPTODATE
;
478 bi
->bi_io_vec
= &sh
->dev
[i
].vec
;
479 bi
->bi_io_vec
[0].bv_len
= STRIPE_SIZE
;
480 bi
->bi_io_vec
[0].bv_offset
= 0;
481 bi
->bi_size
= STRIPE_SIZE
;
484 test_bit(R5_ReWrite
, &sh
->dev
[i
].flags
))
485 atomic_add(STRIPE_SECTORS
,
486 &rdev
->corrected_errors
);
487 generic_make_request(bi
);
490 set_bit(STRIPE_DEGRADED
, &sh
->state
);
491 pr_debug("skip op %ld on disc %d for sector %llu\n",
492 bi
->bi_rw
, i
, (unsigned long long)sh
->sector
);
493 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
494 set_bit(STRIPE_HANDLE
, &sh
->state
);
499 static struct dma_async_tx_descriptor
*
500 async_copy_data(int frombio
, struct bio
*bio
, struct page
*page
,
501 sector_t sector
, struct dma_async_tx_descriptor
*tx
)
504 struct page
*bio_page
;
507 struct async_submit_ctl submit
;
508 enum async_tx_flags flags
= 0;
510 if (bio
->bi_sector
>= sector
)
511 page_offset
= (signed)(bio
->bi_sector
- sector
) * 512;
513 page_offset
= (signed)(sector
- bio
->bi_sector
) * -512;
516 flags
|= ASYNC_TX_FENCE
;
517 init_async_submit(&submit
, flags
, tx
, NULL
, NULL
, NULL
);
519 bio_for_each_segment(bvl
, bio
, i
) {
520 int len
= bio_iovec_idx(bio
, i
)->bv_len
;
524 if (page_offset
< 0) {
525 b_offset
= -page_offset
;
526 page_offset
+= b_offset
;
530 if (len
> 0 && page_offset
+ len
> STRIPE_SIZE
)
531 clen
= STRIPE_SIZE
- page_offset
;
536 b_offset
+= bio_iovec_idx(bio
, i
)->bv_offset
;
537 bio_page
= bio_iovec_idx(bio
, i
)->bv_page
;
539 tx
= async_memcpy(page
, bio_page
, page_offset
,
540 b_offset
, clen
, &submit
);
542 tx
= async_memcpy(bio_page
, page
, b_offset
,
543 page_offset
, clen
, &submit
);
545 /* chain the operations */
546 submit
.depend_tx
= tx
;
548 if (clen
< len
) /* hit end of page */
556 static void ops_complete_biofill(void *stripe_head_ref
)
558 struct stripe_head
*sh
= stripe_head_ref
;
559 struct bio
*return_bi
= NULL
;
560 raid5_conf_t
*conf
= sh
->raid_conf
;
563 pr_debug("%s: stripe %llu\n", __func__
,
564 (unsigned long long)sh
->sector
);
566 /* clear completed biofills */
567 spin_lock_irq(&conf
->device_lock
);
568 for (i
= sh
->disks
; i
--; ) {
569 struct r5dev
*dev
= &sh
->dev
[i
];
571 /* acknowledge completion of a biofill operation */
572 /* and check if we need to reply to a read request,
573 * new R5_Wantfill requests are held off until
574 * !STRIPE_BIOFILL_RUN
576 if (test_and_clear_bit(R5_Wantfill
, &dev
->flags
)) {
577 struct bio
*rbi
, *rbi2
;
582 while (rbi
&& rbi
->bi_sector
<
583 dev
->sector
+ STRIPE_SECTORS
) {
584 rbi2
= r5_next_bio(rbi
, dev
->sector
);
585 if (!raid5_dec_bi_phys_segments(rbi
)) {
586 rbi
->bi_next
= return_bi
;
593 spin_unlock_irq(&conf
->device_lock
);
594 clear_bit(STRIPE_BIOFILL_RUN
, &sh
->state
);
596 return_io(return_bi
);
598 set_bit(STRIPE_HANDLE
, &sh
->state
);
602 static void ops_run_biofill(struct stripe_head
*sh
)
604 struct dma_async_tx_descriptor
*tx
= NULL
;
605 raid5_conf_t
*conf
= sh
->raid_conf
;
606 struct async_submit_ctl submit
;
609 pr_debug("%s: stripe %llu\n", __func__
,
610 (unsigned long long)sh
->sector
);
612 for (i
= sh
->disks
; i
--; ) {
613 struct r5dev
*dev
= &sh
->dev
[i
];
614 if (test_bit(R5_Wantfill
, &dev
->flags
)) {
616 spin_lock_irq(&conf
->device_lock
);
617 dev
->read
= rbi
= dev
->toread
;
619 spin_unlock_irq(&conf
->device_lock
);
620 while (rbi
&& rbi
->bi_sector
<
621 dev
->sector
+ STRIPE_SECTORS
) {
622 tx
= async_copy_data(0, rbi
, dev
->page
,
624 rbi
= r5_next_bio(rbi
, dev
->sector
);
629 atomic_inc(&sh
->count
);
630 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, ops_complete_biofill
, sh
, NULL
);
631 async_trigger_callback(&submit
);
634 static void mark_target_uptodate(struct stripe_head
*sh
, int target
)
641 tgt
= &sh
->dev
[target
];
642 set_bit(R5_UPTODATE
, &tgt
->flags
);
643 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
644 clear_bit(R5_Wantcompute
, &tgt
->flags
);
647 static void ops_complete_compute(void *stripe_head_ref
)
649 struct stripe_head
*sh
= stripe_head_ref
;
651 pr_debug("%s: stripe %llu\n", __func__
,
652 (unsigned long long)sh
->sector
);
654 /* mark the computed target(s) as uptodate */
655 mark_target_uptodate(sh
, sh
->ops
.target
);
656 mark_target_uptodate(sh
, sh
->ops
.target2
);
658 clear_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
659 if (sh
->check_state
== check_state_compute_run
)
660 sh
->check_state
= check_state_compute_result
;
661 set_bit(STRIPE_HANDLE
, &sh
->state
);
665 /* return a pointer to the address conversion region of the scribble buffer */
666 static addr_conv_t
*to_addr_conv(struct stripe_head
*sh
,
667 struct raid5_percpu
*percpu
)
669 return percpu
->scribble
+ sizeof(struct page
*) * (sh
->disks
+ 2);
672 static struct dma_async_tx_descriptor
*
673 ops_run_compute5(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
675 int disks
= sh
->disks
;
676 struct page
**xor_srcs
= percpu
->scribble
;
677 int target
= sh
->ops
.target
;
678 struct r5dev
*tgt
= &sh
->dev
[target
];
679 struct page
*xor_dest
= tgt
->page
;
681 struct dma_async_tx_descriptor
*tx
;
682 struct async_submit_ctl submit
;
685 pr_debug("%s: stripe %llu block: %d\n",
686 __func__
, (unsigned long long)sh
->sector
, target
);
687 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
689 for (i
= disks
; i
--; )
691 xor_srcs
[count
++] = sh
->dev
[i
].page
;
693 atomic_inc(&sh
->count
);
695 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
, NULL
,
696 ops_complete_compute
, sh
, to_addr_conv(sh
, percpu
));
697 if (unlikely(count
== 1))
698 tx
= async_memcpy(xor_dest
, xor_srcs
[0], 0, 0, STRIPE_SIZE
, &submit
);
700 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
, &submit
);
705 /* set_syndrome_sources - populate source buffers for gen_syndrome
706 * @srcs - (struct page *) array of size sh->disks
707 * @sh - stripe_head to parse
709 * Populates srcs in proper layout order for the stripe and returns the
710 * 'count' of sources to be used in a call to async_gen_syndrome. The P
711 * destination buffer is recorded in srcs[count] and the Q destination
712 * is recorded in srcs[count+1]].
714 static int set_syndrome_sources(struct page
**srcs
, struct stripe_head
*sh
)
716 int disks
= sh
->disks
;
717 int syndrome_disks
= sh
->ddf_layout
? disks
: (disks
- 2);
718 int d0_idx
= raid6_d0(sh
);
722 for (i
= 0; i
< disks
; i
++)
728 int slot
= raid6_idx_to_slot(i
, sh
, &count
, syndrome_disks
);
730 srcs
[slot
] = sh
->dev
[i
].page
;
731 i
= raid6_next_disk(i
, disks
);
732 } while (i
!= d0_idx
);
734 return syndrome_disks
;
737 static struct dma_async_tx_descriptor
*
738 ops_run_compute6_1(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
740 int disks
= sh
->disks
;
741 struct page
**blocks
= percpu
->scribble
;
743 int qd_idx
= sh
->qd_idx
;
744 struct dma_async_tx_descriptor
*tx
;
745 struct async_submit_ctl submit
;
751 if (sh
->ops
.target
< 0)
752 target
= sh
->ops
.target2
;
753 else if (sh
->ops
.target2
< 0)
754 target
= sh
->ops
.target
;
756 /* we should only have one valid target */
759 pr_debug("%s: stripe %llu block: %d\n",
760 __func__
, (unsigned long long)sh
->sector
, target
);
762 tgt
= &sh
->dev
[target
];
763 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
766 atomic_inc(&sh
->count
);
768 if (target
== qd_idx
) {
769 count
= set_syndrome_sources(blocks
, sh
);
770 blocks
[count
] = NULL
; /* regenerating p is not necessary */
771 BUG_ON(blocks
[count
+1] != dest
); /* q should already be set */
772 init_async_submit(&submit
, ASYNC_TX_FENCE
, NULL
,
773 ops_complete_compute
, sh
,
774 to_addr_conv(sh
, percpu
));
775 tx
= async_gen_syndrome(blocks
, 0, count
+2, STRIPE_SIZE
, &submit
);
777 /* Compute any data- or p-drive using XOR */
779 for (i
= disks
; i
-- ; ) {
780 if (i
== target
|| i
== qd_idx
)
782 blocks
[count
++] = sh
->dev
[i
].page
;
785 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
,
786 NULL
, ops_complete_compute
, sh
,
787 to_addr_conv(sh
, percpu
));
788 tx
= async_xor(dest
, blocks
, 0, count
, STRIPE_SIZE
, &submit
);
794 static struct dma_async_tx_descriptor
*
795 ops_run_compute6_2(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
797 int i
, count
, disks
= sh
->disks
;
798 int syndrome_disks
= sh
->ddf_layout
? disks
: disks
-2;
799 int d0_idx
= raid6_d0(sh
);
800 int faila
= -1, failb
= -1;
801 int target
= sh
->ops
.target
;
802 int target2
= sh
->ops
.target2
;
803 struct r5dev
*tgt
= &sh
->dev
[target
];
804 struct r5dev
*tgt2
= &sh
->dev
[target2
];
805 struct dma_async_tx_descriptor
*tx
;
806 struct page
**blocks
= percpu
->scribble
;
807 struct async_submit_ctl submit
;
809 pr_debug("%s: stripe %llu block1: %d block2: %d\n",
810 __func__
, (unsigned long long)sh
->sector
, target
, target2
);
811 BUG_ON(target
< 0 || target2
< 0);
812 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
813 BUG_ON(!test_bit(R5_Wantcompute
, &tgt2
->flags
));
815 /* we need to open-code set_syndrome_sources to handle the
816 * slot number conversion for 'faila' and 'failb'
818 for (i
= 0; i
< disks
; i
++)
823 int slot
= raid6_idx_to_slot(i
, sh
, &count
, syndrome_disks
);
825 blocks
[slot
] = sh
->dev
[i
].page
;
831 i
= raid6_next_disk(i
, disks
);
832 } while (i
!= d0_idx
);
834 BUG_ON(faila
== failb
);
837 pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
838 __func__
, (unsigned long long)sh
->sector
, faila
, failb
);
840 atomic_inc(&sh
->count
);
842 if (failb
== syndrome_disks
+1) {
843 /* Q disk is one of the missing disks */
844 if (faila
== syndrome_disks
) {
845 /* Missing P+Q, just recompute */
846 init_async_submit(&submit
, ASYNC_TX_FENCE
, NULL
,
847 ops_complete_compute
, sh
,
848 to_addr_conv(sh
, percpu
));
849 return async_gen_syndrome(blocks
, 0, syndrome_disks
+2,
850 STRIPE_SIZE
, &submit
);
854 int qd_idx
= sh
->qd_idx
;
856 /* Missing D+Q: recompute D from P, then recompute Q */
857 if (target
== qd_idx
)
858 data_target
= target2
;
860 data_target
= target
;
863 for (i
= disks
; i
-- ; ) {
864 if (i
== data_target
|| i
== qd_idx
)
866 blocks
[count
++] = sh
->dev
[i
].page
;
868 dest
= sh
->dev
[data_target
].page
;
869 init_async_submit(&submit
,
870 ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
,
872 to_addr_conv(sh
, percpu
));
873 tx
= async_xor(dest
, blocks
, 0, count
, STRIPE_SIZE
,
876 count
= set_syndrome_sources(blocks
, sh
);
877 init_async_submit(&submit
, ASYNC_TX_FENCE
, tx
,
878 ops_complete_compute
, sh
,
879 to_addr_conv(sh
, percpu
));
880 return async_gen_syndrome(blocks
, 0, count
+2,
881 STRIPE_SIZE
, &submit
);
884 init_async_submit(&submit
, ASYNC_TX_FENCE
, NULL
,
885 ops_complete_compute
, sh
,
886 to_addr_conv(sh
, percpu
));
887 if (failb
== syndrome_disks
) {
888 /* We're missing D+P. */
889 return async_raid6_datap_recov(syndrome_disks
+2,
893 /* We're missing D+D. */
894 return async_raid6_2data_recov(syndrome_disks
+2,
895 STRIPE_SIZE
, faila
, failb
,
902 static void ops_complete_prexor(void *stripe_head_ref
)
904 struct stripe_head
*sh
= stripe_head_ref
;
906 pr_debug("%s: stripe %llu\n", __func__
,
907 (unsigned long long)sh
->sector
);
910 static struct dma_async_tx_descriptor
*
911 ops_run_prexor(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
912 struct dma_async_tx_descriptor
*tx
)
914 int disks
= sh
->disks
;
915 struct page
**xor_srcs
= percpu
->scribble
;
916 int count
= 0, pd_idx
= sh
->pd_idx
, i
;
917 struct async_submit_ctl submit
;
919 /* existing parity data subtracted */
920 struct page
*xor_dest
= xor_srcs
[count
++] = sh
->dev
[pd_idx
].page
;
922 pr_debug("%s: stripe %llu\n", __func__
,
923 (unsigned long long)sh
->sector
);
925 for (i
= disks
; i
--; ) {
926 struct r5dev
*dev
= &sh
->dev
[i
];
927 /* Only process blocks that are known to be uptodate */
928 if (test_bit(R5_Wantdrain
, &dev
->flags
))
929 xor_srcs
[count
++] = dev
->page
;
932 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_DROP_DST
, tx
,
933 ops_complete_prexor
, sh
, to_addr_conv(sh
, percpu
));
934 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
, &submit
);
939 static struct dma_async_tx_descriptor
*
940 ops_run_biodrain(struct stripe_head
*sh
, struct dma_async_tx_descriptor
*tx
)
942 int disks
= sh
->disks
;
945 pr_debug("%s: stripe %llu\n", __func__
,
946 (unsigned long long)sh
->sector
);
948 for (i
= disks
; i
--; ) {
949 struct r5dev
*dev
= &sh
->dev
[i
];
952 if (test_and_clear_bit(R5_Wantdrain
, &dev
->flags
)) {
955 spin_lock(&sh
->lock
);
956 chosen
= dev
->towrite
;
958 BUG_ON(dev
->written
);
959 wbi
= dev
->written
= chosen
;
960 spin_unlock(&sh
->lock
);
962 while (wbi
&& wbi
->bi_sector
<
963 dev
->sector
+ STRIPE_SECTORS
) {
964 tx
= async_copy_data(1, wbi
, dev
->page
,
966 wbi
= r5_next_bio(wbi
, dev
->sector
);
974 static void ops_complete_reconstruct(void *stripe_head_ref
)
976 struct stripe_head
*sh
= stripe_head_ref
;
977 int disks
= sh
->disks
;
978 int pd_idx
= sh
->pd_idx
;
979 int qd_idx
= sh
->qd_idx
;
982 pr_debug("%s: stripe %llu\n", __func__
,
983 (unsigned long long)sh
->sector
);
985 for (i
= disks
; i
--; ) {
986 struct r5dev
*dev
= &sh
->dev
[i
];
988 if (dev
->written
|| i
== pd_idx
|| i
== qd_idx
)
989 set_bit(R5_UPTODATE
, &dev
->flags
);
992 if (sh
->reconstruct_state
== reconstruct_state_drain_run
)
993 sh
->reconstruct_state
= reconstruct_state_drain_result
;
994 else if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_run
)
995 sh
->reconstruct_state
= reconstruct_state_prexor_drain_result
;
997 BUG_ON(sh
->reconstruct_state
!= reconstruct_state_run
);
998 sh
->reconstruct_state
= reconstruct_state_result
;
1001 set_bit(STRIPE_HANDLE
, &sh
->state
);
1006 ops_run_reconstruct5(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
1007 struct dma_async_tx_descriptor
*tx
)
1009 int disks
= sh
->disks
;
1010 struct page
**xor_srcs
= percpu
->scribble
;
1011 struct async_submit_ctl submit
;
1012 int count
= 0, pd_idx
= sh
->pd_idx
, i
;
1013 struct page
*xor_dest
;
1015 unsigned long flags
;
1017 pr_debug("%s: stripe %llu\n", __func__
,
1018 (unsigned long long)sh
->sector
);
1020 /* check if prexor is active which means only process blocks
1021 * that are part of a read-modify-write (written)
1023 if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_run
) {
1025 xor_dest
= xor_srcs
[count
++] = sh
->dev
[pd_idx
].page
;
1026 for (i
= disks
; i
--; ) {
1027 struct r5dev
*dev
= &sh
->dev
[i
];
1029 xor_srcs
[count
++] = dev
->page
;
1032 xor_dest
= sh
->dev
[pd_idx
].page
;
1033 for (i
= disks
; i
--; ) {
1034 struct r5dev
*dev
= &sh
->dev
[i
];
1036 xor_srcs
[count
++] = dev
->page
;
1040 /* 1/ if we prexor'd then the dest is reused as a source
1041 * 2/ if we did not prexor then we are redoing the parity
1042 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
1043 * for the synchronous xor case
1045 flags
= ASYNC_TX_ACK
|
1046 (prexor
? ASYNC_TX_XOR_DROP_DST
: ASYNC_TX_XOR_ZERO_DST
);
1048 atomic_inc(&sh
->count
);
1050 init_async_submit(&submit
, flags
, tx
, ops_complete_reconstruct
, sh
,
1051 to_addr_conv(sh
, percpu
));
1052 if (unlikely(count
== 1))
1053 tx
= async_memcpy(xor_dest
, xor_srcs
[0], 0, 0, STRIPE_SIZE
, &submit
);
1055 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
, &submit
);
1059 ops_run_reconstruct6(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
1060 struct dma_async_tx_descriptor
*tx
)
1062 struct async_submit_ctl submit
;
1063 struct page
**blocks
= percpu
->scribble
;
1066 pr_debug("%s: stripe %llu\n", __func__
, (unsigned long long)sh
->sector
);
1068 count
= set_syndrome_sources(blocks
, sh
);
1070 atomic_inc(&sh
->count
);
1072 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, ops_complete_reconstruct
,
1073 sh
, to_addr_conv(sh
, percpu
));
1074 async_gen_syndrome(blocks
, 0, count
+2, STRIPE_SIZE
, &submit
);
1077 static void ops_complete_check(void *stripe_head_ref
)
1079 struct stripe_head
*sh
= stripe_head_ref
;
1081 pr_debug("%s: stripe %llu\n", __func__
,
1082 (unsigned long long)sh
->sector
);
1084 sh
->check_state
= check_state_check_result
;
1085 set_bit(STRIPE_HANDLE
, &sh
->state
);
1089 static void ops_run_check_p(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
1091 int disks
= sh
->disks
;
1092 int pd_idx
= sh
->pd_idx
;
1093 int qd_idx
= sh
->qd_idx
;
1094 struct page
*xor_dest
;
1095 struct page
**xor_srcs
= percpu
->scribble
;
1096 struct dma_async_tx_descriptor
*tx
;
1097 struct async_submit_ctl submit
;
1101 pr_debug("%s: stripe %llu\n", __func__
,
1102 (unsigned long long)sh
->sector
);
1105 xor_dest
= sh
->dev
[pd_idx
].page
;
1106 xor_srcs
[count
++] = xor_dest
;
1107 for (i
= disks
; i
--; ) {
1108 if (i
== pd_idx
|| i
== qd_idx
)
1110 xor_srcs
[count
++] = sh
->dev
[i
].page
;
1113 init_async_submit(&submit
, 0, NULL
, NULL
, NULL
,
1114 to_addr_conv(sh
, percpu
));
1115 tx
= async_xor_val(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
,
1116 &sh
->ops
.zero_sum_result
, &submit
);
1118 atomic_inc(&sh
->count
);
1119 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, ops_complete_check
, sh
, NULL
);
1120 tx
= async_trigger_callback(&submit
);
1123 static void ops_run_check_pq(struct stripe_head
*sh
, struct raid5_percpu
*percpu
, int checkp
)
1125 struct page
**srcs
= percpu
->scribble
;
1126 struct async_submit_ctl submit
;
1129 pr_debug("%s: stripe %llu checkp: %d\n", __func__
,
1130 (unsigned long long)sh
->sector
, checkp
);
1132 count
= set_syndrome_sources(srcs
, sh
);
1136 atomic_inc(&sh
->count
);
1137 init_async_submit(&submit
, ASYNC_TX_ACK
, NULL
, ops_complete_check
,
1138 sh
, to_addr_conv(sh
, percpu
));
1139 async_syndrome_val(srcs
, 0, count
+2, STRIPE_SIZE
,
1140 &sh
->ops
.zero_sum_result
, percpu
->spare_page
, &submit
);
1143 static void __raid_run_ops(struct stripe_head
*sh
, unsigned long ops_request
)
1145 int overlap_clear
= 0, i
, disks
= sh
->disks
;
1146 struct dma_async_tx_descriptor
*tx
= NULL
;
1147 raid5_conf_t
*conf
= sh
->raid_conf
;
1148 int level
= conf
->level
;
1149 struct raid5_percpu
*percpu
;
1153 percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
1154 if (test_bit(STRIPE_OP_BIOFILL
, &ops_request
)) {
1155 ops_run_biofill(sh
);
1159 if (test_bit(STRIPE_OP_COMPUTE_BLK
, &ops_request
)) {
1161 tx
= ops_run_compute5(sh
, percpu
);
1163 if (sh
->ops
.target2
< 0 || sh
->ops
.target
< 0)
1164 tx
= ops_run_compute6_1(sh
, percpu
);
1166 tx
= ops_run_compute6_2(sh
, percpu
);
1168 /* terminate the chain if reconstruct is not set to be run */
1169 if (tx
&& !test_bit(STRIPE_OP_RECONSTRUCT
, &ops_request
))
1173 if (test_bit(STRIPE_OP_PREXOR
, &ops_request
))
1174 tx
= ops_run_prexor(sh
, percpu
, tx
);
1176 if (test_bit(STRIPE_OP_BIODRAIN
, &ops_request
)) {
1177 tx
= ops_run_biodrain(sh
, tx
);
1181 if (test_bit(STRIPE_OP_RECONSTRUCT
, &ops_request
)) {
1183 ops_run_reconstruct5(sh
, percpu
, tx
);
1185 ops_run_reconstruct6(sh
, percpu
, tx
);
1188 if (test_bit(STRIPE_OP_CHECK
, &ops_request
)) {
1189 if (sh
->check_state
== check_state_run
)
1190 ops_run_check_p(sh
, percpu
);
1191 else if (sh
->check_state
== check_state_run_q
)
1192 ops_run_check_pq(sh
, percpu
, 0);
1193 else if (sh
->check_state
== check_state_run_pq
)
1194 ops_run_check_pq(sh
, percpu
, 1);
1200 for (i
= disks
; i
--; ) {
1201 struct r5dev
*dev
= &sh
->dev
[i
];
1202 if (test_and_clear_bit(R5_Overlap
, &dev
->flags
))
1203 wake_up(&sh
->raid_conf
->wait_for_overlap
);
1208 #ifdef CONFIG_MULTICORE_RAID456
1209 static void async_run_ops(void *param
, async_cookie_t cookie
)
1211 struct stripe_head
*sh
= param
;
1212 unsigned long ops_request
= sh
->ops
.request
;
1214 clear_bit_unlock(STRIPE_OPS_REQ_PENDING
, &sh
->state
);
1215 wake_up(&sh
->ops
.wait_for_ops
);
1217 __raid_run_ops(sh
, ops_request
);
1221 static void raid_run_ops(struct stripe_head
*sh
, unsigned long ops_request
)
1223 /* since handle_stripe can be called outside of raid5d context
1224 * we need to ensure sh->ops.request is de-staged before another
1227 wait_event(sh
->ops
.wait_for_ops
,
1228 !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING
, &sh
->state
));
1229 sh
->ops
.request
= ops_request
;
1231 atomic_inc(&sh
->count
);
1232 async_schedule(async_run_ops
, sh
);
1235 #define raid_run_ops __raid_run_ops
1238 static int grow_one_stripe(raid5_conf_t
*conf
)
1240 struct stripe_head
*sh
;
1241 int disks
= max(conf
->raid_disks
, conf
->previous_raid_disks
);
1242 sh
= kmem_cache_alloc(conf
->slab_cache
, GFP_KERNEL
);
1245 memset(sh
, 0, sizeof(*sh
) + (disks
-1)*sizeof(struct r5dev
));
1246 sh
->raid_conf
= conf
;
1247 spin_lock_init(&sh
->lock
);
1248 #ifdef CONFIG_MULTICORE_RAID456
1249 init_waitqueue_head(&sh
->ops
.wait_for_ops
);
1252 if (grow_buffers(sh
, disks
)) {
1253 shrink_buffers(sh
, disks
);
1254 kmem_cache_free(conf
->slab_cache
, sh
);
1257 /* we just created an active stripe so... */
1258 atomic_set(&sh
->count
, 1);
1259 atomic_inc(&conf
->active_stripes
);
1260 INIT_LIST_HEAD(&sh
->lru
);
1265 static int grow_stripes(raid5_conf_t
*conf
, int num
)
1267 struct kmem_cache
*sc
;
1268 int devs
= max(conf
->raid_disks
, conf
->previous_raid_disks
);
1270 sprintf(conf
->cache_name
[0],
1271 "raid%d-%s", conf
->level
, mdname(conf
->mddev
));
1272 sprintf(conf
->cache_name
[1],
1273 "raid%d-%s-alt", conf
->level
, mdname(conf
->mddev
));
1274 conf
->active_name
= 0;
1275 sc
= kmem_cache_create(conf
->cache_name
[conf
->active_name
],
1276 sizeof(struct stripe_head
)+(devs
-1)*sizeof(struct r5dev
),
1280 conf
->slab_cache
= sc
;
1281 conf
->pool_size
= devs
;
1283 if (!grow_one_stripe(conf
))
1289 * scribble_len - return the required size of the scribble region
1290 * @num - total number of disks in the array
1292 * The size must be enough to contain:
1293 * 1/ a struct page pointer for each device in the array +2
1294 * 2/ room to convert each entry in (1) to its corresponding dma
1295 * (dma_map_page()) or page (page_address()) address.
1297 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
1298 * calculate over all devices (not just the data blocks), using zeros in place
1299 * of the P and Q blocks.
1301 static size_t scribble_len(int num
)
1305 len
= sizeof(struct page
*) * (num
+2) + sizeof(addr_conv_t
) * (num
+2);
1310 static int resize_stripes(raid5_conf_t
*conf
, int newsize
)
1312 /* Make all the stripes able to hold 'newsize' devices.
1313 * New slots in each stripe get 'page' set to a new page.
1315 * This happens in stages:
1316 * 1/ create a new kmem_cache and allocate the required number of
1318 * 2/ gather all the old stripe_heads and tranfer the pages across
1319 * to the new stripe_heads. This will have the side effect of
1320 * freezing the array as once all stripe_heads have been collected,
1321 * no IO will be possible. Old stripe heads are freed once their
1322 * pages have been transferred over, and the old kmem_cache is
1323 * freed when all stripes are done.
1324 * 3/ reallocate conf->disks to be suitable bigger. If this fails,
1325 * we simple return a failre status - no need to clean anything up.
1326 * 4/ allocate new pages for the new slots in the new stripe_heads.
1327 * If this fails, we don't bother trying the shrink the
1328 * stripe_heads down again, we just leave them as they are.
1329 * As each stripe_head is processed the new one is released into
1332 * Once step2 is started, we cannot afford to wait for a write,
1333 * so we use GFP_NOIO allocations.
1335 struct stripe_head
*osh
, *nsh
;
1336 LIST_HEAD(newstripes
);
1337 struct disk_info
*ndisks
;
1340 struct kmem_cache
*sc
;
1343 if (newsize
<= conf
->pool_size
)
1344 return 0; /* never bother to shrink */
1346 err
= md_allow_write(conf
->mddev
);
1351 sc
= kmem_cache_create(conf
->cache_name
[1-conf
->active_name
],
1352 sizeof(struct stripe_head
)+(newsize
-1)*sizeof(struct r5dev
),
1357 for (i
= conf
->max_nr_stripes
; i
; i
--) {
1358 nsh
= kmem_cache_alloc(sc
, GFP_KERNEL
);
1362 memset(nsh
, 0, sizeof(*nsh
) + (newsize
-1)*sizeof(struct r5dev
));
1364 nsh
->raid_conf
= conf
;
1365 spin_lock_init(&nsh
->lock
);
1366 #ifdef CONFIG_MULTICORE_RAID456
1367 init_waitqueue_head(&nsh
->ops
.wait_for_ops
);
1370 list_add(&nsh
->lru
, &newstripes
);
1373 /* didn't get enough, give up */
1374 while (!list_empty(&newstripes
)) {
1375 nsh
= list_entry(newstripes
.next
, struct stripe_head
, lru
);
1376 list_del(&nsh
->lru
);
1377 kmem_cache_free(sc
, nsh
);
1379 kmem_cache_destroy(sc
);
1382 /* Step 2 - Must use GFP_NOIO now.
1383 * OK, we have enough stripes, start collecting inactive
1384 * stripes and copying them over
1386 list_for_each_entry(nsh
, &newstripes
, lru
) {
1387 spin_lock_irq(&conf
->device_lock
);
1388 wait_event_lock_irq(conf
->wait_for_stripe
,
1389 !list_empty(&conf
->inactive_list
),
1391 unplug_slaves(conf
->mddev
)
1393 osh
= get_free_stripe(conf
);
1394 spin_unlock_irq(&conf
->device_lock
);
1395 atomic_set(&nsh
->count
, 1);
1396 for(i
=0; i
<conf
->pool_size
; i
++)
1397 nsh
->dev
[i
].page
= osh
->dev
[i
].page
;
1398 for( ; i
<newsize
; i
++)
1399 nsh
->dev
[i
].page
= NULL
;
1400 kmem_cache_free(conf
->slab_cache
, osh
);
1402 kmem_cache_destroy(conf
->slab_cache
);
1405 * At this point, we are holding all the stripes so the array
1406 * is completely stalled, so now is a good time to resize
1407 * conf->disks and the scribble region
1409 ndisks
= kzalloc(newsize
* sizeof(struct disk_info
), GFP_NOIO
);
1411 for (i
=0; i
<conf
->raid_disks
; i
++)
1412 ndisks
[i
] = conf
->disks
[i
];
1414 conf
->disks
= ndisks
;
1419 conf
->scribble_len
= scribble_len(newsize
);
1420 for_each_present_cpu(cpu
) {
1421 struct raid5_percpu
*percpu
;
1424 percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
1425 scribble
= kmalloc(conf
->scribble_len
, GFP_NOIO
);
1428 kfree(percpu
->scribble
);
1429 percpu
->scribble
= scribble
;
1437 /* Step 4, return new stripes to service */
1438 while(!list_empty(&newstripes
)) {
1439 nsh
= list_entry(newstripes
.next
, struct stripe_head
, lru
);
1440 list_del_init(&nsh
->lru
);
1442 for (i
=conf
->raid_disks
; i
< newsize
; i
++)
1443 if (nsh
->dev
[i
].page
== NULL
) {
1444 struct page
*p
= alloc_page(GFP_NOIO
);
1445 nsh
->dev
[i
].page
= p
;
1449 release_stripe(nsh
);
1451 /* critical section pass, GFP_NOIO no longer needed */
1453 conf
->slab_cache
= sc
;
1454 conf
->active_name
= 1-conf
->active_name
;
1455 conf
->pool_size
= newsize
;
1459 static int drop_one_stripe(raid5_conf_t
*conf
)
1461 struct stripe_head
*sh
;
1463 spin_lock_irq(&conf
->device_lock
);
1464 sh
= get_free_stripe(conf
);
1465 spin_unlock_irq(&conf
->device_lock
);
1468 BUG_ON(atomic_read(&sh
->count
));
1469 shrink_buffers(sh
, conf
->pool_size
);
1470 kmem_cache_free(conf
->slab_cache
, sh
);
1471 atomic_dec(&conf
->active_stripes
);
1475 static void shrink_stripes(raid5_conf_t
*conf
)
1477 while (drop_one_stripe(conf
))
1480 if (conf
->slab_cache
)
1481 kmem_cache_destroy(conf
->slab_cache
);
1482 conf
->slab_cache
= NULL
;
1485 static void raid5_end_read_request(struct bio
* bi
, int error
)
1487 struct stripe_head
*sh
= bi
->bi_private
;
1488 raid5_conf_t
*conf
= sh
->raid_conf
;
1489 int disks
= sh
->disks
, i
;
1490 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
1491 char b
[BDEVNAME_SIZE
];
1495 for (i
=0 ; i
<disks
; i
++)
1496 if (bi
== &sh
->dev
[i
].req
)
1499 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
1500 (unsigned long long)sh
->sector
, i
, atomic_read(&sh
->count
),
1508 set_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
);
1509 if (test_bit(R5_ReadError
, &sh
->dev
[i
].flags
)) {
1510 rdev
= conf
->disks
[i
].rdev
;
1511 printk_rl(KERN_INFO
"raid5:%s: read error corrected"
1512 " (%lu sectors at %llu on %s)\n",
1513 mdname(conf
->mddev
), STRIPE_SECTORS
,
1514 (unsigned long long)(sh
->sector
1515 + rdev
->data_offset
),
1516 bdevname(rdev
->bdev
, b
));
1517 clear_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
1518 clear_bit(R5_ReWrite
, &sh
->dev
[i
].flags
);
1520 if (atomic_read(&conf
->disks
[i
].rdev
->read_errors
))
1521 atomic_set(&conf
->disks
[i
].rdev
->read_errors
, 0);
1523 const char *bdn
= bdevname(conf
->disks
[i
].rdev
->bdev
, b
);
1525 rdev
= conf
->disks
[i
].rdev
;
1527 clear_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
);
1528 atomic_inc(&rdev
->read_errors
);
1529 if (conf
->mddev
->degraded
>= conf
->max_degraded
)
1530 printk_rl(KERN_WARNING
1531 "raid5:%s: read error not correctable "
1532 "(sector %llu on %s).\n",
1533 mdname(conf
->mddev
),
1534 (unsigned long long)(sh
->sector
1535 + rdev
->data_offset
),
1537 else if (test_bit(R5_ReWrite
, &sh
->dev
[i
].flags
))
1539 printk_rl(KERN_WARNING
1540 "raid5:%s: read error NOT corrected!! "
1541 "(sector %llu on %s).\n",
1542 mdname(conf
->mddev
),
1543 (unsigned long long)(sh
->sector
1544 + rdev
->data_offset
),
1546 else if (atomic_read(&rdev
->read_errors
)
1547 > conf
->max_nr_stripes
)
1549 "raid5:%s: Too many read errors, failing device %s.\n",
1550 mdname(conf
->mddev
), bdn
);
1554 set_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
1556 clear_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
1557 clear_bit(R5_ReWrite
, &sh
->dev
[i
].flags
);
1558 md_error(conf
->mddev
, rdev
);
1561 rdev_dec_pending(conf
->disks
[i
].rdev
, conf
->mddev
);
1562 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
1563 set_bit(STRIPE_HANDLE
, &sh
->state
);
1567 static void raid5_end_write_request(struct bio
*bi
, int error
)
1569 struct stripe_head
*sh
= bi
->bi_private
;
1570 raid5_conf_t
*conf
= sh
->raid_conf
;
1571 int disks
= sh
->disks
, i
;
1572 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
1574 for (i
=0 ; i
<disks
; i
++)
1575 if (bi
== &sh
->dev
[i
].req
)
1578 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
1579 (unsigned long long)sh
->sector
, i
, atomic_read(&sh
->count
),
1587 md_error(conf
->mddev
, conf
->disks
[i
].rdev
);
1589 rdev_dec_pending(conf
->disks
[i
].rdev
, conf
->mddev
);
1591 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
1592 set_bit(STRIPE_HANDLE
, &sh
->state
);
1597 static sector_t
compute_blocknr(struct stripe_head
*sh
, int i
, int previous
);
1599 static void raid5_build_block(struct stripe_head
*sh
, int i
, int previous
)
1601 struct r5dev
*dev
= &sh
->dev
[i
];
1603 bio_init(&dev
->req
);
1604 dev
->req
.bi_io_vec
= &dev
->vec
;
1606 dev
->req
.bi_max_vecs
++;
1607 dev
->vec
.bv_page
= dev
->page
;
1608 dev
->vec
.bv_len
= STRIPE_SIZE
;
1609 dev
->vec
.bv_offset
= 0;
1611 dev
->req
.bi_sector
= sh
->sector
;
1612 dev
->req
.bi_private
= sh
;
1615 dev
->sector
= compute_blocknr(sh
, i
, previous
);
1618 static void error(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1620 char b
[BDEVNAME_SIZE
];
1621 raid5_conf_t
*conf
= (raid5_conf_t
*) mddev
->private;
1622 pr_debug("raid5: error called\n");
1624 if (!test_bit(Faulty
, &rdev
->flags
)) {
1625 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
1626 if (test_and_clear_bit(In_sync
, &rdev
->flags
)) {
1627 unsigned long flags
;
1628 spin_lock_irqsave(&conf
->device_lock
, flags
);
1630 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1632 * if recovery was running, make sure it aborts.
1634 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
1636 set_bit(Faulty
, &rdev
->flags
);
1638 "raid5: Disk failure on %s, disabling device.\n"
1639 "raid5: Operation continuing on %d devices.\n",
1640 bdevname(rdev
->bdev
,b
), conf
->raid_disks
- mddev
->degraded
);
1645 * Input: a 'big' sector number,
1646 * Output: index of the data and parity disk, and the sector # in them.
1648 static sector_t
raid5_compute_sector(raid5_conf_t
*conf
, sector_t r_sector
,
1649 int previous
, int *dd_idx
,
1650 struct stripe_head
*sh
)
1652 sector_t stripe
, stripe2
;
1653 sector_t chunk_number
;
1654 unsigned int chunk_offset
;
1657 sector_t new_sector
;
1658 int algorithm
= previous
? conf
->prev_algo
1660 int sectors_per_chunk
= previous
? conf
->prev_chunk_sectors
1661 : conf
->chunk_sectors
;
1662 int raid_disks
= previous
? conf
->previous_raid_disks
1664 int data_disks
= raid_disks
- conf
->max_degraded
;
1666 /* First compute the information on this sector */
1669 * Compute the chunk number and the sector offset inside the chunk
1671 chunk_offset
= sector_div(r_sector
, sectors_per_chunk
);
1672 chunk_number
= r_sector
;
1675 * Compute the stripe number
1677 stripe
= chunk_number
;
1678 *dd_idx
= sector_div(stripe
, data_disks
);
1681 * Select the parity disk based on the user selected algorithm.
1683 pd_idx
= qd_idx
= ~0;
1684 switch(conf
->level
) {
1686 pd_idx
= data_disks
;
1689 switch (algorithm
) {
1690 case ALGORITHM_LEFT_ASYMMETRIC
:
1691 pd_idx
= data_disks
- sector_div(stripe2
, raid_disks
);
1692 if (*dd_idx
>= pd_idx
)
1695 case ALGORITHM_RIGHT_ASYMMETRIC
:
1696 pd_idx
= sector_div(stripe2
, raid_disks
);
1697 if (*dd_idx
>= pd_idx
)
1700 case ALGORITHM_LEFT_SYMMETRIC
:
1701 pd_idx
= data_disks
- sector_div(stripe2
, raid_disks
);
1702 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
1704 case ALGORITHM_RIGHT_SYMMETRIC
:
1705 pd_idx
= sector_div(stripe2
, raid_disks
);
1706 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
1708 case ALGORITHM_PARITY_0
:
1712 case ALGORITHM_PARITY_N
:
1713 pd_idx
= data_disks
;
1716 printk(KERN_ERR
"raid5: unsupported algorithm %d\n",
1723 switch (algorithm
) {
1724 case ALGORITHM_LEFT_ASYMMETRIC
:
1725 pd_idx
= raid_disks
- 1 - sector_div(stripe2
, raid_disks
);
1726 qd_idx
= pd_idx
+ 1;
1727 if (pd_idx
== raid_disks
-1) {
1728 (*dd_idx
)++; /* Q D D D P */
1730 } else if (*dd_idx
>= pd_idx
)
1731 (*dd_idx
) += 2; /* D D P Q D */
1733 case ALGORITHM_RIGHT_ASYMMETRIC
:
1734 pd_idx
= sector_div(stripe2
, raid_disks
);
1735 qd_idx
= pd_idx
+ 1;
1736 if (pd_idx
== raid_disks
-1) {
1737 (*dd_idx
)++; /* Q D D D P */
1739 } else if (*dd_idx
>= pd_idx
)
1740 (*dd_idx
) += 2; /* D D P Q D */
1742 case ALGORITHM_LEFT_SYMMETRIC
:
1743 pd_idx
= raid_disks
- 1 - sector_div(stripe2
, raid_disks
);
1744 qd_idx
= (pd_idx
+ 1) % raid_disks
;
1745 *dd_idx
= (pd_idx
+ 2 + *dd_idx
) % raid_disks
;
1747 case ALGORITHM_RIGHT_SYMMETRIC
:
1748 pd_idx
= sector_div(stripe2
, raid_disks
);
1749 qd_idx
= (pd_idx
+ 1) % raid_disks
;
1750 *dd_idx
= (pd_idx
+ 2 + *dd_idx
) % raid_disks
;
1753 case ALGORITHM_PARITY_0
:
1758 case ALGORITHM_PARITY_N
:
1759 pd_idx
= data_disks
;
1760 qd_idx
= data_disks
+ 1;
1763 case ALGORITHM_ROTATING_ZERO_RESTART
:
1764 /* Exactly the same as RIGHT_ASYMMETRIC, but or
1765 * of blocks for computing Q is different.
1767 pd_idx
= sector_div(stripe2
, raid_disks
);
1768 qd_idx
= pd_idx
+ 1;
1769 if (pd_idx
== raid_disks
-1) {
1770 (*dd_idx
)++; /* Q D D D P */
1772 } else if (*dd_idx
>= pd_idx
)
1773 (*dd_idx
) += 2; /* D D P Q D */
1777 case ALGORITHM_ROTATING_N_RESTART
:
1778 /* Same a left_asymmetric, by first stripe is
1779 * D D D P Q rather than
1783 pd_idx
= raid_disks
- 1 - sector_div(stripe2
, raid_disks
);
1784 qd_idx
= pd_idx
+ 1;
1785 if (pd_idx
== raid_disks
-1) {
1786 (*dd_idx
)++; /* Q D D D P */
1788 } else if (*dd_idx
>= pd_idx
)
1789 (*dd_idx
) += 2; /* D D P Q D */
1793 case ALGORITHM_ROTATING_N_CONTINUE
:
1794 /* Same as left_symmetric but Q is before P */
1795 pd_idx
= raid_disks
- 1 - sector_div(stripe2
, raid_disks
);
1796 qd_idx
= (pd_idx
+ raid_disks
- 1) % raid_disks
;
1797 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
1801 case ALGORITHM_LEFT_ASYMMETRIC_6
:
1802 /* RAID5 left_asymmetric, with Q on last device */
1803 pd_idx
= data_disks
- sector_div(stripe2
, raid_disks
-1);
1804 if (*dd_idx
>= pd_idx
)
1806 qd_idx
= raid_disks
- 1;
1809 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
1810 pd_idx
= sector_div(stripe2
, raid_disks
-1);
1811 if (*dd_idx
>= pd_idx
)
1813 qd_idx
= raid_disks
- 1;
1816 case ALGORITHM_LEFT_SYMMETRIC_6
:
1817 pd_idx
= data_disks
- sector_div(stripe2
, raid_disks
-1);
1818 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % (raid_disks
-1);
1819 qd_idx
= raid_disks
- 1;
1822 case ALGORITHM_RIGHT_SYMMETRIC_6
:
1823 pd_idx
= sector_div(stripe2
, raid_disks
-1);
1824 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % (raid_disks
-1);
1825 qd_idx
= raid_disks
- 1;
1828 case ALGORITHM_PARITY_0_6
:
1831 qd_idx
= raid_disks
- 1;
1836 printk(KERN_CRIT
"raid6: unsupported algorithm %d\n",
1844 sh
->pd_idx
= pd_idx
;
1845 sh
->qd_idx
= qd_idx
;
1846 sh
->ddf_layout
= ddf_layout
;
1849 * Finally, compute the new sector number
1851 new_sector
= (sector_t
)stripe
* sectors_per_chunk
+ chunk_offset
;
1856 static sector_t
compute_blocknr(struct stripe_head
*sh
, int i
, int previous
)
1858 raid5_conf_t
*conf
= sh
->raid_conf
;
1859 int raid_disks
= sh
->disks
;
1860 int data_disks
= raid_disks
- conf
->max_degraded
;
1861 sector_t new_sector
= sh
->sector
, check
;
1862 int sectors_per_chunk
= previous
? conf
->prev_chunk_sectors
1863 : conf
->chunk_sectors
;
1864 int algorithm
= previous
? conf
->prev_algo
1868 sector_t chunk_number
;
1869 int dummy1
, dd_idx
= i
;
1871 struct stripe_head sh2
;
1874 chunk_offset
= sector_div(new_sector
, sectors_per_chunk
);
1875 stripe
= new_sector
;
1877 if (i
== sh
->pd_idx
)
1879 switch(conf
->level
) {
1882 switch (algorithm
) {
1883 case ALGORITHM_LEFT_ASYMMETRIC
:
1884 case ALGORITHM_RIGHT_ASYMMETRIC
:
1888 case ALGORITHM_LEFT_SYMMETRIC
:
1889 case ALGORITHM_RIGHT_SYMMETRIC
:
1892 i
-= (sh
->pd_idx
+ 1);
1894 case ALGORITHM_PARITY_0
:
1897 case ALGORITHM_PARITY_N
:
1900 printk(KERN_ERR
"raid5: unsupported algorithm %d\n",
1906 if (i
== sh
->qd_idx
)
1907 return 0; /* It is the Q disk */
1908 switch (algorithm
) {
1909 case ALGORITHM_LEFT_ASYMMETRIC
:
1910 case ALGORITHM_RIGHT_ASYMMETRIC
:
1911 case ALGORITHM_ROTATING_ZERO_RESTART
:
1912 case ALGORITHM_ROTATING_N_RESTART
:
1913 if (sh
->pd_idx
== raid_disks
-1)
1914 i
--; /* Q D D D P */
1915 else if (i
> sh
->pd_idx
)
1916 i
-= 2; /* D D P Q D */
1918 case ALGORITHM_LEFT_SYMMETRIC
:
1919 case ALGORITHM_RIGHT_SYMMETRIC
:
1920 if (sh
->pd_idx
== raid_disks
-1)
1921 i
--; /* Q D D D P */
1926 i
-= (sh
->pd_idx
+ 2);
1929 case ALGORITHM_PARITY_0
:
1932 case ALGORITHM_PARITY_N
:
1934 case ALGORITHM_ROTATING_N_CONTINUE
:
1935 /* Like left_symmetric, but P is before Q */
1936 if (sh
->pd_idx
== 0)
1937 i
--; /* P D D D Q */
1942 i
-= (sh
->pd_idx
+ 1);
1945 case ALGORITHM_LEFT_ASYMMETRIC_6
:
1946 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
1950 case ALGORITHM_LEFT_SYMMETRIC_6
:
1951 case ALGORITHM_RIGHT_SYMMETRIC_6
:
1953 i
+= data_disks
+ 1;
1954 i
-= (sh
->pd_idx
+ 1);
1956 case ALGORITHM_PARITY_0_6
:
1960 printk(KERN_CRIT
"raid6: unsupported algorithm %d\n",
1967 chunk_number
= stripe
* data_disks
+ i
;
1968 r_sector
= chunk_number
* sectors_per_chunk
+ chunk_offset
;
1970 check
= raid5_compute_sector(conf
, r_sector
,
1971 previous
, &dummy1
, &sh2
);
1972 if (check
!= sh
->sector
|| dummy1
!= dd_idx
|| sh2
.pd_idx
!= sh
->pd_idx
1973 || sh2
.qd_idx
!= sh
->qd_idx
) {
1974 printk(KERN_ERR
"compute_blocknr: map not correct\n");
1982 schedule_reconstruction(struct stripe_head
*sh
, struct stripe_head_state
*s
,
1983 int rcw
, int expand
)
1985 int i
, pd_idx
= sh
->pd_idx
, disks
= sh
->disks
;
1986 raid5_conf_t
*conf
= sh
->raid_conf
;
1987 int level
= conf
->level
;
1990 /* if we are not expanding this is a proper write request, and
1991 * there will be bios with new data to be drained into the
1995 sh
->reconstruct_state
= reconstruct_state_drain_run
;
1996 set_bit(STRIPE_OP_BIODRAIN
, &s
->ops_request
);
1998 sh
->reconstruct_state
= reconstruct_state_run
;
2000 set_bit(STRIPE_OP_RECONSTRUCT
, &s
->ops_request
);
2002 for (i
= disks
; i
--; ) {
2003 struct r5dev
*dev
= &sh
->dev
[i
];
2006 set_bit(R5_LOCKED
, &dev
->flags
);
2007 set_bit(R5_Wantdrain
, &dev
->flags
);
2009 clear_bit(R5_UPTODATE
, &dev
->flags
);
2013 if (s
->locked
+ conf
->max_degraded
== disks
)
2014 if (!test_and_set_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2015 atomic_inc(&conf
->pending_full_writes
);
2018 BUG_ON(!(test_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
) ||
2019 test_bit(R5_Wantcompute
, &sh
->dev
[pd_idx
].flags
)));
2021 sh
->reconstruct_state
= reconstruct_state_prexor_drain_run
;
2022 set_bit(STRIPE_OP_PREXOR
, &s
->ops_request
);
2023 set_bit(STRIPE_OP_BIODRAIN
, &s
->ops_request
);
2024 set_bit(STRIPE_OP_RECONSTRUCT
, &s
->ops_request
);
2026 for (i
= disks
; i
--; ) {
2027 struct r5dev
*dev
= &sh
->dev
[i
];
2032 (test_bit(R5_UPTODATE
, &dev
->flags
) ||
2033 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2034 set_bit(R5_Wantdrain
, &dev
->flags
);
2035 set_bit(R5_LOCKED
, &dev
->flags
);
2036 clear_bit(R5_UPTODATE
, &dev
->flags
);
2042 /* keep the parity disk(s) locked while asynchronous operations
2045 set_bit(R5_LOCKED
, &sh
->dev
[pd_idx
].flags
);
2046 clear_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
);
2050 int qd_idx
= sh
->qd_idx
;
2051 struct r5dev
*dev
= &sh
->dev
[qd_idx
];
2053 set_bit(R5_LOCKED
, &dev
->flags
);
2054 clear_bit(R5_UPTODATE
, &dev
->flags
);
2058 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
2059 __func__
, (unsigned long long)sh
->sector
,
2060 s
->locked
, s
->ops_request
);
2064 * Each stripe/dev can have one or more bion attached.
2065 * toread/towrite point to the first in a chain.
2066 * The bi_next chain must be in order.
2068 static int add_stripe_bio(struct stripe_head
*sh
, struct bio
*bi
, int dd_idx
, int forwrite
)
2071 raid5_conf_t
*conf
= sh
->raid_conf
;
2074 pr_debug("adding bh b#%llu to stripe s#%llu\n",
2075 (unsigned long long)bi
->bi_sector
,
2076 (unsigned long long)sh
->sector
);
2079 spin_lock(&sh
->lock
);
2080 spin_lock_irq(&conf
->device_lock
);
2082 bip
= &sh
->dev
[dd_idx
].towrite
;
2083 if (*bip
== NULL
&& sh
->dev
[dd_idx
].written
== NULL
)
2086 bip
= &sh
->dev
[dd_idx
].toread
;
2087 while (*bip
&& (*bip
)->bi_sector
< bi
->bi_sector
) {
2088 if ((*bip
)->bi_sector
+ ((*bip
)->bi_size
>> 9) > bi
->bi_sector
)
2090 bip
= & (*bip
)->bi_next
;
2092 if (*bip
&& (*bip
)->bi_sector
< bi
->bi_sector
+ ((bi
->bi_size
)>>9))
2095 BUG_ON(*bip
&& bi
->bi_next
&& (*bip
) != bi
->bi_next
);
2099 bi
->bi_phys_segments
++;
2100 spin_unlock_irq(&conf
->device_lock
);
2101 spin_unlock(&sh
->lock
);
2103 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2104 (unsigned long long)bi
->bi_sector
,
2105 (unsigned long long)sh
->sector
, dd_idx
);
2107 if (conf
->mddev
->bitmap
&& firstwrite
) {
2108 bitmap_startwrite(conf
->mddev
->bitmap
, sh
->sector
,
2110 sh
->bm_seq
= conf
->seq_flush
+1;
2111 set_bit(STRIPE_BIT_DELAY
, &sh
->state
);
2115 /* check if page is covered */
2116 sector_t sector
= sh
->dev
[dd_idx
].sector
;
2117 for (bi
=sh
->dev
[dd_idx
].towrite
;
2118 sector
< sh
->dev
[dd_idx
].sector
+ STRIPE_SECTORS
&&
2119 bi
&& bi
->bi_sector
<= sector
;
2120 bi
= r5_next_bio(bi
, sh
->dev
[dd_idx
].sector
)) {
2121 if (bi
->bi_sector
+ (bi
->bi_size
>>9) >= sector
)
2122 sector
= bi
->bi_sector
+ (bi
->bi_size
>>9);
2124 if (sector
>= sh
->dev
[dd_idx
].sector
+ STRIPE_SECTORS
)
2125 set_bit(R5_OVERWRITE
, &sh
->dev
[dd_idx
].flags
);
2130 set_bit(R5_Overlap
, &sh
->dev
[dd_idx
].flags
);
2131 spin_unlock_irq(&conf
->device_lock
);
2132 spin_unlock(&sh
->lock
);
2136 static void end_reshape(raid5_conf_t
*conf
);
2138 static void stripe_set_idx(sector_t stripe
, raid5_conf_t
*conf
, int previous
,
2139 struct stripe_head
*sh
)
2141 int sectors_per_chunk
=
2142 previous
? conf
->prev_chunk_sectors
: conf
->chunk_sectors
;
2144 int chunk_offset
= sector_div(stripe
, sectors_per_chunk
);
2145 int disks
= previous
? conf
->previous_raid_disks
: conf
->raid_disks
;
2147 raid5_compute_sector(conf
,
2148 stripe
* (disks
- conf
->max_degraded
)
2149 *sectors_per_chunk
+ chunk_offset
,
2155 handle_failed_stripe(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2156 struct stripe_head_state
*s
, int disks
,
2157 struct bio
**return_bi
)
2160 for (i
= disks
; i
--; ) {
2164 if (test_bit(R5_ReadError
, &sh
->dev
[i
].flags
)) {
2167 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
2168 if (rdev
&& test_bit(In_sync
, &rdev
->flags
))
2169 /* multiple read failures in one stripe */
2170 md_error(conf
->mddev
, rdev
);
2173 spin_lock_irq(&conf
->device_lock
);
2174 /* fail all writes first */
2175 bi
= sh
->dev
[i
].towrite
;
2176 sh
->dev
[i
].towrite
= NULL
;
2182 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[i
].flags
))
2183 wake_up(&conf
->wait_for_overlap
);
2185 while (bi
&& bi
->bi_sector
<
2186 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
2187 struct bio
*nextbi
= r5_next_bio(bi
, sh
->dev
[i
].sector
);
2188 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2189 if (!raid5_dec_bi_phys_segments(bi
)) {
2190 md_write_end(conf
->mddev
);
2191 bi
->bi_next
= *return_bi
;
2196 /* and fail all 'written' */
2197 bi
= sh
->dev
[i
].written
;
2198 sh
->dev
[i
].written
= NULL
;
2199 if (bi
) bitmap_end
= 1;
2200 while (bi
&& bi
->bi_sector
<
2201 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
2202 struct bio
*bi2
= r5_next_bio(bi
, sh
->dev
[i
].sector
);
2203 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2204 if (!raid5_dec_bi_phys_segments(bi
)) {
2205 md_write_end(conf
->mddev
);
2206 bi
->bi_next
= *return_bi
;
2212 /* fail any reads if this device is non-operational and
2213 * the data has not reached the cache yet.
2215 if (!test_bit(R5_Wantfill
, &sh
->dev
[i
].flags
) &&
2216 (!test_bit(R5_Insync
, &sh
->dev
[i
].flags
) ||
2217 test_bit(R5_ReadError
, &sh
->dev
[i
].flags
))) {
2218 bi
= sh
->dev
[i
].toread
;
2219 sh
->dev
[i
].toread
= NULL
;
2220 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[i
].flags
))
2221 wake_up(&conf
->wait_for_overlap
);
2222 if (bi
) s
->to_read
--;
2223 while (bi
&& bi
->bi_sector
<
2224 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
2225 struct bio
*nextbi
=
2226 r5_next_bio(bi
, sh
->dev
[i
].sector
);
2227 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2228 if (!raid5_dec_bi_phys_segments(bi
)) {
2229 bi
->bi_next
= *return_bi
;
2235 spin_unlock_irq(&conf
->device_lock
);
2237 bitmap_endwrite(conf
->mddev
->bitmap
, sh
->sector
,
2238 STRIPE_SECTORS
, 0, 0);
2241 if (test_and_clear_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2242 if (atomic_dec_and_test(&conf
->pending_full_writes
))
2243 md_wakeup_thread(conf
->mddev
->thread
);
2246 /* fetch_block5 - checks the given member device to see if its data needs
2247 * to be read or computed to satisfy a request.
2249 * Returns 1 when no more member devices need to be checked, otherwise returns
2250 * 0 to tell the loop in handle_stripe_fill5 to continue
2252 static int fetch_block5(struct stripe_head
*sh
, struct stripe_head_state
*s
,
2253 int disk_idx
, int disks
)
2255 struct r5dev
*dev
= &sh
->dev
[disk_idx
];
2256 struct r5dev
*failed_dev
= &sh
->dev
[s
->failed_num
];
2258 /* is the data in this block needed, and can we get it? */
2259 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
2260 !test_bit(R5_UPTODATE
, &dev
->flags
) &&
2262 (dev
->towrite
&& !test_bit(R5_OVERWRITE
, &dev
->flags
)) ||
2263 s
->syncing
|| s
->expanding
||
2265 (failed_dev
->toread
||
2266 (failed_dev
->towrite
&&
2267 !test_bit(R5_OVERWRITE
, &failed_dev
->flags
)))))) {
2268 /* We would like to get this block, possibly by computing it,
2269 * otherwise read it if the backing disk is insync
2271 if ((s
->uptodate
== disks
- 1) &&
2272 (s
->failed
&& disk_idx
== s
->failed_num
)) {
2273 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2274 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2275 set_bit(R5_Wantcompute
, &dev
->flags
);
2276 sh
->ops
.target
= disk_idx
;
2277 sh
->ops
.target2
= -1;
2279 /* Careful: from this point on 'uptodate' is in the eye
2280 * of raid_run_ops which services 'compute' operations
2281 * before writes. R5_Wantcompute flags a block that will
2282 * be R5_UPTODATE by the time it is needed for a
2283 * subsequent operation.
2286 return 1; /* uptodate + compute == disks */
2287 } else if (test_bit(R5_Insync
, &dev
->flags
)) {
2288 set_bit(R5_LOCKED
, &dev
->flags
);
2289 set_bit(R5_Wantread
, &dev
->flags
);
2291 pr_debug("Reading block %d (sync=%d)\n", disk_idx
,
2300 * handle_stripe_fill5 - read or compute data to satisfy pending requests.
2302 static void handle_stripe_fill5(struct stripe_head
*sh
,
2303 struct stripe_head_state
*s
, int disks
)
2307 /* look for blocks to read/compute, skip this if a compute
2308 * is already in flight, or if the stripe contents are in the
2309 * midst of changing due to a write
2311 if (!test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) && !sh
->check_state
&&
2312 !sh
->reconstruct_state
)
2313 for (i
= disks
; i
--; )
2314 if (fetch_block5(sh
, s
, i
, disks
))
2316 set_bit(STRIPE_HANDLE
, &sh
->state
);
2319 /* fetch_block6 - checks the given member device to see if its data needs
2320 * to be read or computed to satisfy a request.
2322 * Returns 1 when no more member devices need to be checked, otherwise returns
2323 * 0 to tell the loop in handle_stripe_fill6 to continue
2325 static int fetch_block6(struct stripe_head
*sh
, struct stripe_head_state
*s
,
2326 struct r6_state
*r6s
, int disk_idx
, int disks
)
2328 struct r5dev
*dev
= &sh
->dev
[disk_idx
];
2329 struct r5dev
*fdev
[2] = { &sh
->dev
[r6s
->failed_num
[0]],
2330 &sh
->dev
[r6s
->failed_num
[1]] };
2332 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
2333 !test_bit(R5_UPTODATE
, &dev
->flags
) &&
2335 (dev
->towrite
&& !test_bit(R5_OVERWRITE
, &dev
->flags
)) ||
2336 s
->syncing
|| s
->expanding
||
2338 (fdev
[0]->toread
|| s
->to_write
)) ||
2340 (fdev
[1]->toread
|| s
->to_write
)))) {
2341 /* we would like to get this block, possibly by computing it,
2342 * otherwise read it if the backing disk is insync
2344 BUG_ON(test_bit(R5_Wantcompute
, &dev
->flags
));
2345 BUG_ON(test_bit(R5_Wantread
, &dev
->flags
));
2346 if ((s
->uptodate
== disks
- 1) &&
2347 (s
->failed
&& (disk_idx
== r6s
->failed_num
[0] ||
2348 disk_idx
== r6s
->failed_num
[1]))) {
2349 /* have disk failed, and we're requested to fetch it;
2352 pr_debug("Computing stripe %llu block %d\n",
2353 (unsigned long long)sh
->sector
, disk_idx
);
2354 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2355 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2356 set_bit(R5_Wantcompute
, &dev
->flags
);
2357 sh
->ops
.target
= disk_idx
;
2358 sh
->ops
.target2
= -1; /* no 2nd target */
2362 } else if (s
->uptodate
== disks
-2 && s
->failed
>= 2) {
2363 /* Computing 2-failure is *very* expensive; only
2364 * do it if failed >= 2
2367 for (other
= disks
; other
--; ) {
2368 if (other
== disk_idx
)
2370 if (!test_bit(R5_UPTODATE
,
2371 &sh
->dev
[other
].flags
))
2375 pr_debug("Computing stripe %llu blocks %d,%d\n",
2376 (unsigned long long)sh
->sector
,
2378 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2379 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2380 set_bit(R5_Wantcompute
, &sh
->dev
[disk_idx
].flags
);
2381 set_bit(R5_Wantcompute
, &sh
->dev
[other
].flags
);
2382 sh
->ops
.target
= disk_idx
;
2383 sh
->ops
.target2
= other
;
2387 } else if (test_bit(R5_Insync
, &dev
->flags
)) {
2388 set_bit(R5_LOCKED
, &dev
->flags
);
2389 set_bit(R5_Wantread
, &dev
->flags
);
2391 pr_debug("Reading block %d (sync=%d)\n",
2392 disk_idx
, s
->syncing
);
2400 * handle_stripe_fill6 - read or compute data to satisfy pending requests.
2402 static void handle_stripe_fill6(struct stripe_head
*sh
,
2403 struct stripe_head_state
*s
, struct r6_state
*r6s
,
2408 /* look for blocks to read/compute, skip this if a compute
2409 * is already in flight, or if the stripe contents are in the
2410 * midst of changing due to a write
2412 if (!test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) && !sh
->check_state
&&
2413 !sh
->reconstruct_state
)
2414 for (i
= disks
; i
--; )
2415 if (fetch_block6(sh
, s
, r6s
, i
, disks
))
2417 set_bit(STRIPE_HANDLE
, &sh
->state
);
2421 /* handle_stripe_clean_event
2422 * any written block on an uptodate or failed drive can be returned.
2423 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
2424 * never LOCKED, so we don't need to test 'failed' directly.
2426 static void handle_stripe_clean_event(raid5_conf_t
*conf
,
2427 struct stripe_head
*sh
, int disks
, struct bio
**return_bi
)
2432 for (i
= disks
; i
--; )
2433 if (sh
->dev
[i
].written
) {
2435 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
2436 test_bit(R5_UPTODATE
, &dev
->flags
)) {
2437 /* We can return any write requests */
2438 struct bio
*wbi
, *wbi2
;
2440 pr_debug("Return write for disc %d\n", i
);
2441 spin_lock_irq(&conf
->device_lock
);
2443 dev
->written
= NULL
;
2444 while (wbi
&& wbi
->bi_sector
<
2445 dev
->sector
+ STRIPE_SECTORS
) {
2446 wbi2
= r5_next_bio(wbi
, dev
->sector
);
2447 if (!raid5_dec_bi_phys_segments(wbi
)) {
2448 md_write_end(conf
->mddev
);
2449 wbi
->bi_next
= *return_bi
;
2454 if (dev
->towrite
== NULL
)
2456 spin_unlock_irq(&conf
->device_lock
);
2458 bitmap_endwrite(conf
->mddev
->bitmap
,
2461 !test_bit(STRIPE_DEGRADED
, &sh
->state
),
2466 if (test_and_clear_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2467 if (atomic_dec_and_test(&conf
->pending_full_writes
))
2468 md_wakeup_thread(conf
->mddev
->thread
);
2471 static void handle_stripe_dirtying5(raid5_conf_t
*conf
,
2472 struct stripe_head
*sh
, struct stripe_head_state
*s
, int disks
)
2474 int rmw
= 0, rcw
= 0, i
;
2475 for (i
= disks
; i
--; ) {
2476 /* would I have to read this buffer for read_modify_write */
2477 struct r5dev
*dev
= &sh
->dev
[i
];
2478 if ((dev
->towrite
|| i
== sh
->pd_idx
) &&
2479 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2480 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2481 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2482 if (test_bit(R5_Insync
, &dev
->flags
))
2485 rmw
+= 2*disks
; /* cannot read it */
2487 /* Would I have to read this buffer for reconstruct_write */
2488 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) && i
!= sh
->pd_idx
&&
2489 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2490 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2491 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2492 if (test_bit(R5_Insync
, &dev
->flags
)) rcw
++;
2497 pr_debug("for sector %llu, rmw=%d rcw=%d\n",
2498 (unsigned long long)sh
->sector
, rmw
, rcw
);
2499 set_bit(STRIPE_HANDLE
, &sh
->state
);
2500 if (rmw
< rcw
&& rmw
> 0)
2501 /* prefer read-modify-write, but need to get some data */
2502 for (i
= disks
; i
--; ) {
2503 struct r5dev
*dev
= &sh
->dev
[i
];
2504 if ((dev
->towrite
|| i
== sh
->pd_idx
) &&
2505 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2506 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2507 test_bit(R5_Wantcompute
, &dev
->flags
)) &&
2508 test_bit(R5_Insync
, &dev
->flags
)) {
2510 test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2511 pr_debug("Read_old block "
2512 "%d for r-m-w\n", i
);
2513 set_bit(R5_LOCKED
, &dev
->flags
);
2514 set_bit(R5_Wantread
, &dev
->flags
);
2517 set_bit(STRIPE_DELAYED
, &sh
->state
);
2518 set_bit(STRIPE_HANDLE
, &sh
->state
);
2522 if (rcw
<= rmw
&& rcw
> 0)
2523 /* want reconstruct write, but need to get some data */
2524 for (i
= disks
; i
--; ) {
2525 struct r5dev
*dev
= &sh
->dev
[i
];
2526 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) &&
2528 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2529 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2530 test_bit(R5_Wantcompute
, &dev
->flags
)) &&
2531 test_bit(R5_Insync
, &dev
->flags
)) {
2533 test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2534 pr_debug("Read_old block "
2535 "%d for Reconstruct\n", i
);
2536 set_bit(R5_LOCKED
, &dev
->flags
);
2537 set_bit(R5_Wantread
, &dev
->flags
);
2540 set_bit(STRIPE_DELAYED
, &sh
->state
);
2541 set_bit(STRIPE_HANDLE
, &sh
->state
);
2545 /* now if nothing is locked, and if we have enough data,
2546 * we can start a write request
2548 /* since handle_stripe can be called at any time we need to handle the
2549 * case where a compute block operation has been submitted and then a
2550 * subsequent call wants to start a write request. raid_run_ops only
2551 * handles the case where compute block and reconstruct are requested
2552 * simultaneously. If this is not the case then new writes need to be
2553 * held off until the compute completes.
2555 if ((s
->req_compute
|| !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
)) &&
2556 (s
->locked
== 0 && (rcw
== 0 || rmw
== 0) &&
2557 !test_bit(STRIPE_BIT_DELAY
, &sh
->state
)))
2558 schedule_reconstruction(sh
, s
, rcw
== 0, 0);
2561 static void handle_stripe_dirtying6(raid5_conf_t
*conf
,
2562 struct stripe_head
*sh
, struct stripe_head_state
*s
,
2563 struct r6_state
*r6s
, int disks
)
2565 int rcw
= 0, pd_idx
= sh
->pd_idx
, i
;
2566 int qd_idx
= sh
->qd_idx
;
2568 set_bit(STRIPE_HANDLE
, &sh
->state
);
2569 for (i
= disks
; i
--; ) {
2570 struct r5dev
*dev
= &sh
->dev
[i
];
2571 /* check if we haven't enough data */
2572 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) &&
2573 i
!= pd_idx
&& i
!= qd_idx
&&
2574 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2575 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2576 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2578 if (!test_bit(R5_Insync
, &dev
->flags
))
2579 continue; /* it's a failed drive */
2582 test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2583 pr_debug("Read_old stripe %llu "
2584 "block %d for Reconstruct\n",
2585 (unsigned long long)sh
->sector
, i
);
2586 set_bit(R5_LOCKED
, &dev
->flags
);
2587 set_bit(R5_Wantread
, &dev
->flags
);
2590 pr_debug("Request delayed stripe %llu "
2591 "block %d for Reconstruct\n",
2592 (unsigned long long)sh
->sector
, i
);
2593 set_bit(STRIPE_DELAYED
, &sh
->state
);
2594 set_bit(STRIPE_HANDLE
, &sh
->state
);
2598 /* now if nothing is locked, and if we have enough data, we can start a
2601 if ((s
->req_compute
|| !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
)) &&
2602 s
->locked
== 0 && rcw
== 0 &&
2603 !test_bit(STRIPE_BIT_DELAY
, &sh
->state
)) {
2604 schedule_reconstruction(sh
, s
, 1, 0);
2608 static void handle_parity_checks5(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2609 struct stripe_head_state
*s
, int disks
)
2611 struct r5dev
*dev
= NULL
;
2613 set_bit(STRIPE_HANDLE
, &sh
->state
);
2615 switch (sh
->check_state
) {
2616 case check_state_idle
:
2617 /* start a new check operation if there are no failures */
2618 if (s
->failed
== 0) {
2619 BUG_ON(s
->uptodate
!= disks
);
2620 sh
->check_state
= check_state_run
;
2621 set_bit(STRIPE_OP_CHECK
, &s
->ops_request
);
2622 clear_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
);
2626 dev
= &sh
->dev
[s
->failed_num
];
2628 case check_state_compute_result
:
2629 sh
->check_state
= check_state_idle
;
2631 dev
= &sh
->dev
[sh
->pd_idx
];
2633 /* check that a write has not made the stripe insync */
2634 if (test_bit(STRIPE_INSYNC
, &sh
->state
))
2637 /* either failed parity check, or recovery is happening */
2638 BUG_ON(!test_bit(R5_UPTODATE
, &dev
->flags
));
2639 BUG_ON(s
->uptodate
!= disks
);
2641 set_bit(R5_LOCKED
, &dev
->flags
);
2643 set_bit(R5_Wantwrite
, &dev
->flags
);
2645 clear_bit(STRIPE_DEGRADED
, &sh
->state
);
2646 set_bit(STRIPE_INSYNC
, &sh
->state
);
2648 case check_state_run
:
2649 break; /* we will be called again upon completion */
2650 case check_state_check_result
:
2651 sh
->check_state
= check_state_idle
;
2653 /* if a failure occurred during the check operation, leave
2654 * STRIPE_INSYNC not set and let the stripe be handled again
2659 /* handle a successful check operation, if parity is correct
2660 * we are done. Otherwise update the mismatch count and repair
2661 * parity if !MD_RECOVERY_CHECK
2663 if ((sh
->ops
.zero_sum_result
& SUM_CHECK_P_RESULT
) == 0)
2664 /* parity is correct (on disc,
2665 * not in buffer any more)
2667 set_bit(STRIPE_INSYNC
, &sh
->state
);
2669 conf
->mddev
->resync_mismatches
+= STRIPE_SECTORS
;
2670 if (test_bit(MD_RECOVERY_CHECK
, &conf
->mddev
->recovery
))
2671 /* don't try to repair!! */
2672 set_bit(STRIPE_INSYNC
, &sh
->state
);
2674 sh
->check_state
= check_state_compute_run
;
2675 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2676 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2677 set_bit(R5_Wantcompute
,
2678 &sh
->dev
[sh
->pd_idx
].flags
);
2679 sh
->ops
.target
= sh
->pd_idx
;
2680 sh
->ops
.target2
= -1;
2685 case check_state_compute_run
:
2688 printk(KERN_ERR
"%s: unknown check_state: %d sector: %llu\n",
2689 __func__
, sh
->check_state
,
2690 (unsigned long long) sh
->sector
);
2696 static void handle_parity_checks6(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2697 struct stripe_head_state
*s
,
2698 struct r6_state
*r6s
, int disks
)
2700 int pd_idx
= sh
->pd_idx
;
2701 int qd_idx
= sh
->qd_idx
;
2704 set_bit(STRIPE_HANDLE
, &sh
->state
);
2706 BUG_ON(s
->failed
> 2);
2708 /* Want to check and possibly repair P and Q.
2709 * However there could be one 'failed' device, in which
2710 * case we can only check one of them, possibly using the
2711 * other to generate missing data
2714 switch (sh
->check_state
) {
2715 case check_state_idle
:
2716 /* start a new check operation if there are < 2 failures */
2717 if (s
->failed
== r6s
->q_failed
) {
2718 /* The only possible failed device holds Q, so it
2719 * makes sense to check P (If anything else were failed,
2720 * we would have used P to recreate it).
2722 sh
->check_state
= check_state_run
;
2724 if (!r6s
->q_failed
&& s
->failed
< 2) {
2725 /* Q is not failed, and we didn't use it to generate
2726 * anything, so it makes sense to check it
2728 if (sh
->check_state
== check_state_run
)
2729 sh
->check_state
= check_state_run_pq
;
2731 sh
->check_state
= check_state_run_q
;
2734 /* discard potentially stale zero_sum_result */
2735 sh
->ops
.zero_sum_result
= 0;
2737 if (sh
->check_state
== check_state_run
) {
2738 /* async_xor_zero_sum destroys the contents of P */
2739 clear_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
);
2742 if (sh
->check_state
>= check_state_run
&&
2743 sh
->check_state
<= check_state_run_pq
) {
2744 /* async_syndrome_zero_sum preserves P and Q, so
2745 * no need to mark them !uptodate here
2747 set_bit(STRIPE_OP_CHECK
, &s
->ops_request
);
2751 /* we have 2-disk failure */
2752 BUG_ON(s
->failed
!= 2);
2754 case check_state_compute_result
:
2755 sh
->check_state
= check_state_idle
;
2757 /* check that a write has not made the stripe insync */
2758 if (test_bit(STRIPE_INSYNC
, &sh
->state
))
2761 /* now write out any block on a failed drive,
2762 * or P or Q if they were recomputed
2764 BUG_ON(s
->uptodate
< disks
- 1); /* We don't need Q to recover */
2765 if (s
->failed
== 2) {
2766 dev
= &sh
->dev
[r6s
->failed_num
[1]];
2768 set_bit(R5_LOCKED
, &dev
->flags
);
2769 set_bit(R5_Wantwrite
, &dev
->flags
);
2771 if (s
->failed
>= 1) {
2772 dev
= &sh
->dev
[r6s
->failed_num
[0]];
2774 set_bit(R5_LOCKED
, &dev
->flags
);
2775 set_bit(R5_Wantwrite
, &dev
->flags
);
2777 if (sh
->ops
.zero_sum_result
& SUM_CHECK_P_RESULT
) {
2778 dev
= &sh
->dev
[pd_idx
];
2780 set_bit(R5_LOCKED
, &dev
->flags
);
2781 set_bit(R5_Wantwrite
, &dev
->flags
);
2783 if (sh
->ops
.zero_sum_result
& SUM_CHECK_Q_RESULT
) {
2784 dev
= &sh
->dev
[qd_idx
];
2786 set_bit(R5_LOCKED
, &dev
->flags
);
2787 set_bit(R5_Wantwrite
, &dev
->flags
);
2789 clear_bit(STRIPE_DEGRADED
, &sh
->state
);
2791 set_bit(STRIPE_INSYNC
, &sh
->state
);
2793 case check_state_run
:
2794 case check_state_run_q
:
2795 case check_state_run_pq
:
2796 break; /* we will be called again upon completion */
2797 case check_state_check_result
:
2798 sh
->check_state
= check_state_idle
;
2800 /* handle a successful check operation, if parity is correct
2801 * we are done. Otherwise update the mismatch count and repair
2802 * parity if !MD_RECOVERY_CHECK
2804 if (sh
->ops
.zero_sum_result
== 0) {
2805 /* both parities are correct */
2807 set_bit(STRIPE_INSYNC
, &sh
->state
);
2809 /* in contrast to the raid5 case we can validate
2810 * parity, but still have a failure to write
2813 sh
->check_state
= check_state_compute_result
;
2814 /* Returning at this point means that we may go
2815 * off and bring p and/or q uptodate again so
2816 * we make sure to check zero_sum_result again
2817 * to verify if p or q need writeback
2821 conf
->mddev
->resync_mismatches
+= STRIPE_SECTORS
;
2822 if (test_bit(MD_RECOVERY_CHECK
, &conf
->mddev
->recovery
))
2823 /* don't try to repair!! */
2824 set_bit(STRIPE_INSYNC
, &sh
->state
);
2826 int *target
= &sh
->ops
.target
;
2828 sh
->ops
.target
= -1;
2829 sh
->ops
.target2
= -1;
2830 sh
->check_state
= check_state_compute_run
;
2831 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2832 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2833 if (sh
->ops
.zero_sum_result
& SUM_CHECK_P_RESULT
) {
2834 set_bit(R5_Wantcompute
,
2835 &sh
->dev
[pd_idx
].flags
);
2837 target
= &sh
->ops
.target2
;
2840 if (sh
->ops
.zero_sum_result
& SUM_CHECK_Q_RESULT
) {
2841 set_bit(R5_Wantcompute
,
2842 &sh
->dev
[qd_idx
].flags
);
2849 case check_state_compute_run
:
2852 printk(KERN_ERR
"%s: unknown check_state: %d sector: %llu\n",
2853 __func__
, sh
->check_state
,
2854 (unsigned long long) sh
->sector
);
2859 static void handle_stripe_expansion(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2860 struct r6_state
*r6s
)
2864 /* We have read all the blocks in this stripe and now we need to
2865 * copy some of them into a target stripe for expand.
2867 struct dma_async_tx_descriptor
*tx
= NULL
;
2868 clear_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
2869 for (i
= 0; i
< sh
->disks
; i
++)
2870 if (i
!= sh
->pd_idx
&& i
!= sh
->qd_idx
) {
2872 struct stripe_head
*sh2
;
2873 struct async_submit_ctl submit
;
2875 sector_t bn
= compute_blocknr(sh
, i
, 1);
2876 sector_t s
= raid5_compute_sector(conf
, bn
, 0,
2878 sh2
= get_active_stripe(conf
, s
, 0, 1, 1);
2880 /* so far only the early blocks of this stripe
2881 * have been requested. When later blocks
2882 * get requested, we will try again
2885 if (!test_bit(STRIPE_EXPANDING
, &sh2
->state
) ||
2886 test_bit(R5_Expanded
, &sh2
->dev
[dd_idx
].flags
)) {
2887 /* must have already done this block */
2888 release_stripe(sh2
);
2892 /* place all the copies on one channel */
2893 init_async_submit(&submit
, 0, tx
, NULL
, NULL
, NULL
);
2894 tx
= async_memcpy(sh2
->dev
[dd_idx
].page
,
2895 sh
->dev
[i
].page
, 0, 0, STRIPE_SIZE
,
2898 set_bit(R5_Expanded
, &sh2
->dev
[dd_idx
].flags
);
2899 set_bit(R5_UPTODATE
, &sh2
->dev
[dd_idx
].flags
);
2900 for (j
= 0; j
< conf
->raid_disks
; j
++)
2901 if (j
!= sh2
->pd_idx
&&
2902 (!r6s
|| j
!= sh2
->qd_idx
) &&
2903 !test_bit(R5_Expanded
, &sh2
->dev
[j
].flags
))
2905 if (j
== conf
->raid_disks
) {
2906 set_bit(STRIPE_EXPAND_READY
, &sh2
->state
);
2907 set_bit(STRIPE_HANDLE
, &sh2
->state
);
2909 release_stripe(sh2
);
2912 /* done submitting copies, wait for them to complete */
2915 dma_wait_for_async_tx(tx
);
2921 * handle_stripe - do things to a stripe.
2923 * We lock the stripe and then examine the state of various bits
2924 * to see what needs to be done.
2926 * return some read request which now have data
2927 * return some write requests which are safely on disc
2928 * schedule a read on some buffers
2929 * schedule a write of some buffers
2930 * return confirmation of parity correctness
2932 * buffers are taken off read_list or write_list, and bh_cache buffers
2933 * get BH_Lock set before the stripe lock is released.
2937 static void handle_stripe5(struct stripe_head
*sh
)
2939 raid5_conf_t
*conf
= sh
->raid_conf
;
2940 int disks
= sh
->disks
, i
;
2941 struct bio
*return_bi
= NULL
;
2942 struct stripe_head_state s
;
2944 mdk_rdev_t
*blocked_rdev
= NULL
;
2947 memset(&s
, 0, sizeof(s
));
2948 pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d "
2949 "reconstruct:%d\n", (unsigned long long)sh
->sector
, sh
->state
,
2950 atomic_read(&sh
->count
), sh
->pd_idx
, sh
->check_state
,
2951 sh
->reconstruct_state
);
2953 spin_lock(&sh
->lock
);
2954 clear_bit(STRIPE_HANDLE
, &sh
->state
);
2955 clear_bit(STRIPE_DELAYED
, &sh
->state
);
2957 s
.syncing
= test_bit(STRIPE_SYNCING
, &sh
->state
);
2958 s
.expanding
= test_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
2959 s
.expanded
= test_bit(STRIPE_EXPAND_READY
, &sh
->state
);
2961 /* Now to look around and see what can be done */
2963 for (i
=disks
; i
--; ) {
2967 clear_bit(R5_Insync
, &dev
->flags
);
2969 pr_debug("check %d: state 0x%lx toread %p read %p write %p "
2970 "written %p\n", i
, dev
->flags
, dev
->toread
, dev
->read
,
2971 dev
->towrite
, dev
->written
);
2973 /* maybe we can request a biofill operation
2975 * new wantfill requests are only permitted while
2976 * ops_complete_biofill is guaranteed to be inactive
2978 if (test_bit(R5_UPTODATE
, &dev
->flags
) && dev
->toread
&&
2979 !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
))
2980 set_bit(R5_Wantfill
, &dev
->flags
);
2982 /* now count some things */
2983 if (test_bit(R5_LOCKED
, &dev
->flags
)) s
.locked
++;
2984 if (test_bit(R5_UPTODATE
, &dev
->flags
)) s
.uptodate
++;
2985 if (test_bit(R5_Wantcompute
, &dev
->flags
)) s
.compute
++;
2987 if (test_bit(R5_Wantfill
, &dev
->flags
))
2989 else if (dev
->toread
)
2993 if (!test_bit(R5_OVERWRITE
, &dev
->flags
))
2998 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
2999 if (blocked_rdev
== NULL
&&
3000 rdev
&& unlikely(test_bit(Blocked
, &rdev
->flags
))) {
3001 blocked_rdev
= rdev
;
3002 atomic_inc(&rdev
->nr_pending
);
3004 if (!rdev
|| !test_bit(In_sync
, &rdev
->flags
)) {
3005 /* The ReadError flag will just be confusing now */
3006 clear_bit(R5_ReadError
, &dev
->flags
);
3007 clear_bit(R5_ReWrite
, &dev
->flags
);
3009 if (!rdev
|| !test_bit(In_sync
, &rdev
->flags
)
3010 || test_bit(R5_ReadError
, &dev
->flags
)) {
3014 set_bit(R5_Insync
, &dev
->flags
);
3018 if (unlikely(blocked_rdev
)) {
3019 if (s
.syncing
|| s
.expanding
|| s
.expanded
||
3020 s
.to_write
|| s
.written
) {
3021 set_bit(STRIPE_HANDLE
, &sh
->state
);
3024 /* There is nothing for the blocked_rdev to block */
3025 rdev_dec_pending(blocked_rdev
, conf
->mddev
);
3026 blocked_rdev
= NULL
;
3029 if (s
.to_fill
&& !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
)) {
3030 set_bit(STRIPE_OP_BIOFILL
, &s
.ops_request
);
3031 set_bit(STRIPE_BIOFILL_RUN
, &sh
->state
);
3034 pr_debug("locked=%d uptodate=%d to_read=%d"
3035 " to_write=%d failed=%d failed_num=%d\n",
3036 s
.locked
, s
.uptodate
, s
.to_read
, s
.to_write
,
3037 s
.failed
, s
.failed_num
);
3038 /* check if the array has lost two devices and, if so, some requests might
3041 if (s
.failed
> 1 && s
.to_read
+s
.to_write
+s
.written
)
3042 handle_failed_stripe(conf
, sh
, &s
, disks
, &return_bi
);
3043 if (s
.failed
> 1 && s
.syncing
) {
3044 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,0);
3045 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3049 /* might be able to return some write requests if the parity block
3050 * is safe, or on a failed drive
3052 dev
= &sh
->dev
[sh
->pd_idx
];
3054 ((test_bit(R5_Insync
, &dev
->flags
) &&
3055 !test_bit(R5_LOCKED
, &dev
->flags
) &&
3056 test_bit(R5_UPTODATE
, &dev
->flags
)) ||
3057 (s
.failed
== 1 && s
.failed_num
== sh
->pd_idx
)))
3058 handle_stripe_clean_event(conf
, sh
, disks
, &return_bi
);
3060 /* Now we might consider reading some blocks, either to check/generate
3061 * parity, or to satisfy requests
3062 * or to load a block that is being partially written.
3064 if (s
.to_read
|| s
.non_overwrite
||
3065 (s
.syncing
&& (s
.uptodate
+ s
.compute
< disks
)) || s
.expanding
)
3066 handle_stripe_fill5(sh
, &s
, disks
);
3068 /* Now we check to see if any write operations have recently
3072 if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_result
)
3074 if (sh
->reconstruct_state
== reconstruct_state_drain_result
||
3075 sh
->reconstruct_state
== reconstruct_state_prexor_drain_result
) {
3076 sh
->reconstruct_state
= reconstruct_state_idle
;
3078 /* All the 'written' buffers and the parity block are ready to
3079 * be written back to disk
3081 BUG_ON(!test_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
));
3082 for (i
= disks
; i
--; ) {
3084 if (test_bit(R5_LOCKED
, &dev
->flags
) &&
3085 (i
== sh
->pd_idx
|| dev
->written
)) {
3086 pr_debug("Writing block %d\n", i
);
3087 set_bit(R5_Wantwrite
, &dev
->flags
);
3090 if (!test_bit(R5_Insync
, &dev
->flags
) ||
3091 (i
== sh
->pd_idx
&& s
.failed
== 0))
3092 set_bit(STRIPE_INSYNC
, &sh
->state
);
3095 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
3096 atomic_dec(&conf
->preread_active_stripes
);
3097 if (atomic_read(&conf
->preread_active_stripes
) <
3099 md_wakeup_thread(conf
->mddev
->thread
);
3103 /* Now to consider new write requests and what else, if anything
3104 * should be read. We do not handle new writes when:
3105 * 1/ A 'write' operation (copy+xor) is already in flight.
3106 * 2/ A 'check' operation is in flight, as it may clobber the parity
3109 if (s
.to_write
&& !sh
->reconstruct_state
&& !sh
->check_state
)
3110 handle_stripe_dirtying5(conf
, sh
, &s
, disks
);
3112 /* maybe we need to check and possibly fix the parity for this stripe
3113 * Any reads will already have been scheduled, so we just see if enough
3114 * data is available. The parity check is held off while parity
3115 * dependent operations are in flight.
3117 if (sh
->check_state
||
3118 (s
.syncing
&& s
.locked
== 0 &&
3119 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) &&
3120 !test_bit(STRIPE_INSYNC
, &sh
->state
)))
3121 handle_parity_checks5(conf
, sh
, &s
, disks
);
3123 if (s
.syncing
&& s
.locked
== 0 && test_bit(STRIPE_INSYNC
, &sh
->state
)) {
3124 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,1);
3125 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3128 /* If the failed drive is just a ReadError, then we might need to progress
3129 * the repair/check process
3131 if (s
.failed
== 1 && !conf
->mddev
->ro
&&
3132 test_bit(R5_ReadError
, &sh
->dev
[s
.failed_num
].flags
)
3133 && !test_bit(R5_LOCKED
, &sh
->dev
[s
.failed_num
].flags
)
3134 && test_bit(R5_UPTODATE
, &sh
->dev
[s
.failed_num
].flags
)
3136 dev
= &sh
->dev
[s
.failed_num
];
3137 if (!test_bit(R5_ReWrite
, &dev
->flags
)) {
3138 set_bit(R5_Wantwrite
, &dev
->flags
);
3139 set_bit(R5_ReWrite
, &dev
->flags
);
3140 set_bit(R5_LOCKED
, &dev
->flags
);
3143 /* let's read it back */
3144 set_bit(R5_Wantread
, &dev
->flags
);
3145 set_bit(R5_LOCKED
, &dev
->flags
);
3150 /* Finish reconstruct operations initiated by the expansion process */
3151 if (sh
->reconstruct_state
== reconstruct_state_result
) {
3152 struct stripe_head
*sh2
3153 = get_active_stripe(conf
, sh
->sector
, 1, 1, 1);
3154 if (sh2
&& test_bit(STRIPE_EXPAND_SOURCE
, &sh2
->state
)) {
3155 /* sh cannot be written until sh2 has been read.
3156 * so arrange for sh to be delayed a little
3158 set_bit(STRIPE_DELAYED
, &sh
->state
);
3159 set_bit(STRIPE_HANDLE
, &sh
->state
);
3160 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
,
3162 atomic_inc(&conf
->preread_active_stripes
);
3163 release_stripe(sh2
);
3167 release_stripe(sh2
);
3169 sh
->reconstruct_state
= reconstruct_state_idle
;
3170 clear_bit(STRIPE_EXPANDING
, &sh
->state
);
3171 for (i
= conf
->raid_disks
; i
--; ) {
3172 set_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
);
3173 set_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
3178 if (s
.expanded
&& test_bit(STRIPE_EXPANDING
, &sh
->state
) &&
3179 !sh
->reconstruct_state
) {
3180 /* Need to write out all blocks after computing parity */
3181 sh
->disks
= conf
->raid_disks
;
3182 stripe_set_idx(sh
->sector
, conf
, 0, sh
);
3183 schedule_reconstruction(sh
, &s
, 1, 1);
3184 } else if (s
.expanded
&& !sh
->reconstruct_state
&& s
.locked
== 0) {
3185 clear_bit(STRIPE_EXPAND_READY
, &sh
->state
);
3186 atomic_dec(&conf
->reshape_stripes
);
3187 wake_up(&conf
->wait_for_overlap
);
3188 md_done_sync(conf
->mddev
, STRIPE_SECTORS
, 1);
3191 if (s
.expanding
&& s
.locked
== 0 &&
3192 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
))
3193 handle_stripe_expansion(conf
, sh
, NULL
);
3196 spin_unlock(&sh
->lock
);
3198 /* wait for this device to become unblocked */
3199 if (unlikely(blocked_rdev
))
3200 md_wait_for_blocked_rdev(blocked_rdev
, conf
->mddev
);
3203 raid_run_ops(sh
, s
.ops_request
);
3207 return_io(return_bi
);
3210 static void handle_stripe6(struct stripe_head
*sh
)
3212 raid5_conf_t
*conf
= sh
->raid_conf
;
3213 int disks
= sh
->disks
;
3214 struct bio
*return_bi
= NULL
;
3215 int i
, pd_idx
= sh
->pd_idx
, qd_idx
= sh
->qd_idx
;
3216 struct stripe_head_state s
;
3217 struct r6_state r6s
;
3218 struct r5dev
*dev
, *pdev
, *qdev
;
3219 mdk_rdev_t
*blocked_rdev
= NULL
;
3221 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
3222 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
3223 (unsigned long long)sh
->sector
, sh
->state
,
3224 atomic_read(&sh
->count
), pd_idx
, qd_idx
,
3225 sh
->check_state
, sh
->reconstruct_state
);
3226 memset(&s
, 0, sizeof(s
));
3228 spin_lock(&sh
->lock
);
3229 clear_bit(STRIPE_HANDLE
, &sh
->state
);
3230 clear_bit(STRIPE_DELAYED
, &sh
->state
);
3232 s
.syncing
= test_bit(STRIPE_SYNCING
, &sh
->state
);
3233 s
.expanding
= test_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
3234 s
.expanded
= test_bit(STRIPE_EXPAND_READY
, &sh
->state
);
3235 /* Now to look around and see what can be done */
3238 for (i
=disks
; i
--; ) {
3241 clear_bit(R5_Insync
, &dev
->flags
);
3243 pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
3244 i
, dev
->flags
, dev
->toread
, dev
->towrite
, dev
->written
);
3245 /* maybe we can reply to a read
3247 * new wantfill requests are only permitted while
3248 * ops_complete_biofill is guaranteed to be inactive
3250 if (test_bit(R5_UPTODATE
, &dev
->flags
) && dev
->toread
&&
3251 !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
))
3252 set_bit(R5_Wantfill
, &dev
->flags
);
3254 /* now count some things */
3255 if (test_bit(R5_LOCKED
, &dev
->flags
)) s
.locked
++;
3256 if (test_bit(R5_UPTODATE
, &dev
->flags
)) s
.uptodate
++;
3257 if (test_bit(R5_Wantcompute
, &dev
->flags
)) {
3259 BUG_ON(s
.compute
> 2);
3262 if (test_bit(R5_Wantfill
, &dev
->flags
)) {
3264 } else if (dev
->toread
)
3268 if (!test_bit(R5_OVERWRITE
, &dev
->flags
))
3273 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
3274 if (blocked_rdev
== NULL
&&
3275 rdev
&& unlikely(test_bit(Blocked
, &rdev
->flags
))) {
3276 blocked_rdev
= rdev
;
3277 atomic_inc(&rdev
->nr_pending
);
3279 if (!rdev
|| !test_bit(In_sync
, &rdev
->flags
)) {
3280 /* The ReadError flag will just be confusing now */
3281 clear_bit(R5_ReadError
, &dev
->flags
);
3282 clear_bit(R5_ReWrite
, &dev
->flags
);
3284 if (!rdev
|| !test_bit(In_sync
, &rdev
->flags
)
3285 || test_bit(R5_ReadError
, &dev
->flags
)) {
3287 r6s
.failed_num
[s
.failed
] = i
;
3290 set_bit(R5_Insync
, &dev
->flags
);
3294 if (unlikely(blocked_rdev
)) {
3295 if (s
.syncing
|| s
.expanding
|| s
.expanded
||
3296 s
.to_write
|| s
.written
) {
3297 set_bit(STRIPE_HANDLE
, &sh
->state
);
3300 /* There is nothing for the blocked_rdev to block */
3301 rdev_dec_pending(blocked_rdev
, conf
->mddev
);
3302 blocked_rdev
= NULL
;
3305 if (s
.to_fill
&& !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
)) {
3306 set_bit(STRIPE_OP_BIOFILL
, &s
.ops_request
);
3307 set_bit(STRIPE_BIOFILL_RUN
, &sh
->state
);
3310 pr_debug("locked=%d uptodate=%d to_read=%d"
3311 " to_write=%d failed=%d failed_num=%d,%d\n",
3312 s
.locked
, s
.uptodate
, s
.to_read
, s
.to_write
, s
.failed
,
3313 r6s
.failed_num
[0], r6s
.failed_num
[1]);
3314 /* check if the array has lost >2 devices and, if so, some requests
3315 * might need to be failed
3317 if (s
.failed
> 2 && s
.to_read
+s
.to_write
+s
.written
)
3318 handle_failed_stripe(conf
, sh
, &s
, disks
, &return_bi
);
3319 if (s
.failed
> 2 && s
.syncing
) {
3320 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,0);
3321 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3326 * might be able to return some write requests if the parity blocks
3327 * are safe, or on a failed drive
3329 pdev
= &sh
->dev
[pd_idx
];
3330 r6s
.p_failed
= (s
.failed
>= 1 && r6s
.failed_num
[0] == pd_idx
)
3331 || (s
.failed
>= 2 && r6s
.failed_num
[1] == pd_idx
);
3332 qdev
= &sh
->dev
[qd_idx
];
3333 r6s
.q_failed
= (s
.failed
>= 1 && r6s
.failed_num
[0] == qd_idx
)
3334 || (s
.failed
>= 2 && r6s
.failed_num
[1] == qd_idx
);
3337 ( r6s
.p_failed
|| ((test_bit(R5_Insync
, &pdev
->flags
)
3338 && !test_bit(R5_LOCKED
, &pdev
->flags
)
3339 && test_bit(R5_UPTODATE
, &pdev
->flags
)))) &&
3340 ( r6s
.q_failed
|| ((test_bit(R5_Insync
, &qdev
->flags
)
3341 && !test_bit(R5_LOCKED
, &qdev
->flags
)
3342 && test_bit(R5_UPTODATE
, &qdev
->flags
)))))
3343 handle_stripe_clean_event(conf
, sh
, disks
, &return_bi
);
3345 /* Now we might consider reading some blocks, either to check/generate
3346 * parity, or to satisfy requests
3347 * or to load a block that is being partially written.
3349 if (s
.to_read
|| s
.non_overwrite
|| (s
.to_write
&& s
.failed
) ||
3350 (s
.syncing
&& (s
.uptodate
+ s
.compute
< disks
)) || s
.expanding
)
3351 handle_stripe_fill6(sh
, &s
, &r6s
, disks
);
3353 /* Now we check to see if any write operations have recently
3356 if (sh
->reconstruct_state
== reconstruct_state_drain_result
) {
3357 int qd_idx
= sh
->qd_idx
;
3359 sh
->reconstruct_state
= reconstruct_state_idle
;
3360 /* All the 'written' buffers and the parity blocks are ready to
3361 * be written back to disk
3363 BUG_ON(!test_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
));
3364 BUG_ON(!test_bit(R5_UPTODATE
, &sh
->dev
[qd_idx
].flags
));
3365 for (i
= disks
; i
--; ) {
3367 if (test_bit(R5_LOCKED
, &dev
->flags
) &&
3368 (i
== sh
->pd_idx
|| i
== qd_idx
||
3370 pr_debug("Writing block %d\n", i
);
3371 BUG_ON(!test_bit(R5_UPTODATE
, &dev
->flags
));
3372 set_bit(R5_Wantwrite
, &dev
->flags
);
3373 if (!test_bit(R5_Insync
, &dev
->flags
) ||
3374 ((i
== sh
->pd_idx
|| i
== qd_idx
) &&
3376 set_bit(STRIPE_INSYNC
, &sh
->state
);
3379 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
3380 atomic_dec(&conf
->preread_active_stripes
);
3381 if (atomic_read(&conf
->preread_active_stripes
) <
3383 md_wakeup_thread(conf
->mddev
->thread
);
3387 /* Now to consider new write requests and what else, if anything
3388 * should be read. We do not handle new writes when:
3389 * 1/ A 'write' operation (copy+gen_syndrome) is already in flight.
3390 * 2/ A 'check' operation is in flight, as it may clobber the parity
3393 if (s
.to_write
&& !sh
->reconstruct_state
&& !sh
->check_state
)
3394 handle_stripe_dirtying6(conf
, sh
, &s
, &r6s
, disks
);
3396 /* maybe we need to check and possibly fix the parity for this stripe
3397 * Any reads will already have been scheduled, so we just see if enough
3398 * data is available. The parity check is held off while parity
3399 * dependent operations are in flight.
3401 if (sh
->check_state
||
3402 (s
.syncing
&& s
.locked
== 0 &&
3403 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) &&
3404 !test_bit(STRIPE_INSYNC
, &sh
->state
)))
3405 handle_parity_checks6(conf
, sh
, &s
, &r6s
, disks
);
3407 if (s
.syncing
&& s
.locked
== 0 && test_bit(STRIPE_INSYNC
, &sh
->state
)) {
3408 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,1);
3409 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3412 /* If the failed drives are just a ReadError, then we might need
3413 * to progress the repair/check process
3415 if (s
.failed
<= 2 && !conf
->mddev
->ro
)
3416 for (i
= 0; i
< s
.failed
; i
++) {
3417 dev
= &sh
->dev
[r6s
.failed_num
[i
]];
3418 if (test_bit(R5_ReadError
, &dev
->flags
)
3419 && !test_bit(R5_LOCKED
, &dev
->flags
)
3420 && test_bit(R5_UPTODATE
, &dev
->flags
)
3422 if (!test_bit(R5_ReWrite
, &dev
->flags
)) {
3423 set_bit(R5_Wantwrite
, &dev
->flags
);
3424 set_bit(R5_ReWrite
, &dev
->flags
);
3425 set_bit(R5_LOCKED
, &dev
->flags
);
3428 /* let's read it back */
3429 set_bit(R5_Wantread
, &dev
->flags
);
3430 set_bit(R5_LOCKED
, &dev
->flags
);
3436 /* Finish reconstruct operations initiated by the expansion process */
3437 if (sh
->reconstruct_state
== reconstruct_state_result
) {
3438 sh
->reconstruct_state
= reconstruct_state_idle
;
3439 clear_bit(STRIPE_EXPANDING
, &sh
->state
);
3440 for (i
= conf
->raid_disks
; i
--; ) {
3441 set_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
);
3442 set_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
3447 if (s
.expanded
&& test_bit(STRIPE_EXPANDING
, &sh
->state
) &&
3448 !sh
->reconstruct_state
) {
3449 struct stripe_head
*sh2
3450 = get_active_stripe(conf
, sh
->sector
, 1, 1, 1);
3451 if (sh2
&& test_bit(STRIPE_EXPAND_SOURCE
, &sh2
->state
)) {
3452 /* sh cannot be written until sh2 has been read.
3453 * so arrange for sh to be delayed a little
3455 set_bit(STRIPE_DELAYED
, &sh
->state
);
3456 set_bit(STRIPE_HANDLE
, &sh
->state
);
3457 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
,
3459 atomic_inc(&conf
->preread_active_stripes
);
3460 release_stripe(sh2
);
3464 release_stripe(sh2
);
3466 /* Need to write out all blocks after computing P&Q */
3467 sh
->disks
= conf
->raid_disks
;
3468 stripe_set_idx(sh
->sector
, conf
, 0, sh
);
3469 schedule_reconstruction(sh
, &s
, 1, 1);
3470 } else if (s
.expanded
&& !sh
->reconstruct_state
&& s
.locked
== 0) {
3471 clear_bit(STRIPE_EXPAND_READY
, &sh
->state
);
3472 atomic_dec(&conf
->reshape_stripes
);
3473 wake_up(&conf
->wait_for_overlap
);
3474 md_done_sync(conf
->mddev
, STRIPE_SECTORS
, 1);
3477 if (s
.expanding
&& s
.locked
== 0 &&
3478 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
))
3479 handle_stripe_expansion(conf
, sh
, &r6s
);
3482 spin_unlock(&sh
->lock
);
3484 /* wait for this device to become unblocked */
3485 if (unlikely(blocked_rdev
))
3486 md_wait_for_blocked_rdev(blocked_rdev
, conf
->mddev
);
3489 raid_run_ops(sh
, s
.ops_request
);
3493 return_io(return_bi
);
3496 static void handle_stripe(struct stripe_head
*sh
)
3498 if (sh
->raid_conf
->level
== 6)
3504 static void raid5_activate_delayed(raid5_conf_t
*conf
)
3506 if (atomic_read(&conf
->preread_active_stripes
) < IO_THRESHOLD
) {
3507 while (!list_empty(&conf
->delayed_list
)) {
3508 struct list_head
*l
= conf
->delayed_list
.next
;
3509 struct stripe_head
*sh
;
3510 sh
= list_entry(l
, struct stripe_head
, lru
);
3512 clear_bit(STRIPE_DELAYED
, &sh
->state
);
3513 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
3514 atomic_inc(&conf
->preread_active_stripes
);
3515 list_add_tail(&sh
->lru
, &conf
->hold_list
);
3518 blk_plug_device(conf
->mddev
->queue
);
3521 static void activate_bit_delay(raid5_conf_t
*conf
)
3523 /* device_lock is held */
3524 struct list_head head
;
3525 list_add(&head
, &conf
->bitmap_list
);
3526 list_del_init(&conf
->bitmap_list
);
3527 while (!list_empty(&head
)) {
3528 struct stripe_head
*sh
= list_entry(head
.next
, struct stripe_head
, lru
);
3529 list_del_init(&sh
->lru
);
3530 atomic_inc(&sh
->count
);
3531 __release_stripe(conf
, sh
);
3535 static void unplug_slaves(mddev_t
*mddev
)
3537 raid5_conf_t
*conf
= mddev
->private;
3539 int devs
= max(conf
->raid_disks
, conf
->previous_raid_disks
);
3542 for (i
= 0; i
< devs
; i
++) {
3543 mdk_rdev_t
*rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
3544 if (rdev
&& !test_bit(Faulty
, &rdev
->flags
) && atomic_read(&rdev
->nr_pending
)) {
3545 struct request_queue
*r_queue
= bdev_get_queue(rdev
->bdev
);
3547 atomic_inc(&rdev
->nr_pending
);
3550 blk_unplug(r_queue
);
3552 rdev_dec_pending(rdev
, mddev
);
3559 static void raid5_unplug_device(struct request_queue
*q
)
3561 mddev_t
*mddev
= q
->queuedata
;
3562 raid5_conf_t
*conf
= mddev
->private;
3563 unsigned long flags
;
3565 spin_lock_irqsave(&conf
->device_lock
, flags
);
3567 if (blk_remove_plug(q
)) {
3569 raid5_activate_delayed(conf
);
3571 md_wakeup_thread(mddev
->thread
);
3573 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
3575 unplug_slaves(mddev
);
3578 static int raid5_congested(void *data
, int bits
)
3580 mddev_t
*mddev
= data
;
3581 raid5_conf_t
*conf
= mddev
->private;
3583 /* No difference between reads and writes. Just check
3584 * how busy the stripe_cache is
3587 if (mddev_congested(mddev
, bits
))
3589 if (conf
->inactive_blocked
)
3593 if (list_empty_careful(&conf
->inactive_list
))
3599 /* We want read requests to align with chunks where possible,
3600 * but write requests don't need to.
3602 static int raid5_mergeable_bvec(struct request_queue
*q
,
3603 struct bvec_merge_data
*bvm
,
3604 struct bio_vec
*biovec
)
3606 mddev_t
*mddev
= q
->queuedata
;
3607 sector_t sector
= bvm
->bi_sector
+ get_start_sect(bvm
->bi_bdev
);
3609 unsigned int chunk_sectors
= mddev
->chunk_sectors
;
3610 unsigned int bio_sectors
= bvm
->bi_size
>> 9;
3612 if ((bvm
->bi_rw
& 1) == WRITE
)
3613 return biovec
->bv_len
; /* always allow writes to be mergeable */
3615 if (mddev
->new_chunk_sectors
< mddev
->chunk_sectors
)
3616 chunk_sectors
= mddev
->new_chunk_sectors
;
3617 max
= (chunk_sectors
- ((sector
& (chunk_sectors
- 1)) + bio_sectors
)) << 9;
3618 if (max
< 0) max
= 0;
3619 if (max
<= biovec
->bv_len
&& bio_sectors
== 0)
3620 return biovec
->bv_len
;
3626 static int in_chunk_boundary(mddev_t
*mddev
, struct bio
*bio
)
3628 sector_t sector
= bio
->bi_sector
+ get_start_sect(bio
->bi_bdev
);
3629 unsigned int chunk_sectors
= mddev
->chunk_sectors
;
3630 unsigned int bio_sectors
= bio
->bi_size
>> 9;
3632 if (mddev
->new_chunk_sectors
< mddev
->chunk_sectors
)
3633 chunk_sectors
= mddev
->new_chunk_sectors
;
3634 return chunk_sectors
>=
3635 ((sector
& (chunk_sectors
- 1)) + bio_sectors
);
3639 * add bio to the retry LIFO ( in O(1) ... we are in interrupt )
3640 * later sampled by raid5d.
3642 static void add_bio_to_retry(struct bio
*bi
,raid5_conf_t
*conf
)
3644 unsigned long flags
;
3646 spin_lock_irqsave(&conf
->device_lock
, flags
);
3648 bi
->bi_next
= conf
->retry_read_aligned_list
;
3649 conf
->retry_read_aligned_list
= bi
;
3651 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
3652 md_wakeup_thread(conf
->mddev
->thread
);
3656 static struct bio
*remove_bio_from_retry(raid5_conf_t
*conf
)
3660 bi
= conf
->retry_read_aligned
;
3662 conf
->retry_read_aligned
= NULL
;
3665 bi
= conf
->retry_read_aligned_list
;
3667 conf
->retry_read_aligned_list
= bi
->bi_next
;
3670 * this sets the active strip count to 1 and the processed
3671 * strip count to zero (upper 8 bits)
3673 bi
->bi_phys_segments
= 1; /* biased count of active stripes */
3681 * The "raid5_align_endio" should check if the read succeeded and if it
3682 * did, call bio_endio on the original bio (having bio_put the new bio
3684 * If the read failed..
3686 static void raid5_align_endio(struct bio
*bi
, int error
)
3688 struct bio
* raid_bi
= bi
->bi_private
;
3691 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
3696 mddev
= raid_bi
->bi_bdev
->bd_disk
->queue
->queuedata
;
3697 conf
= mddev
->private;
3698 rdev
= (void*)raid_bi
->bi_next
;
3699 raid_bi
->bi_next
= NULL
;
3701 rdev_dec_pending(rdev
, conf
->mddev
);
3703 if (!error
&& uptodate
) {
3704 bio_endio(raid_bi
, 0);
3705 if (atomic_dec_and_test(&conf
->active_aligned_reads
))
3706 wake_up(&conf
->wait_for_stripe
);
3711 pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
3713 add_bio_to_retry(raid_bi
, conf
);
3716 static int bio_fits_rdev(struct bio
*bi
)
3718 struct request_queue
*q
= bdev_get_queue(bi
->bi_bdev
);
3720 if ((bi
->bi_size
>>9) > queue_max_sectors(q
))
3722 blk_recount_segments(q
, bi
);
3723 if (bi
->bi_phys_segments
> queue_max_phys_segments(q
))
3726 if (q
->merge_bvec_fn
)
3727 /* it's too hard to apply the merge_bvec_fn at this stage,
3736 static int chunk_aligned_read(struct request_queue
*q
, struct bio
* raid_bio
)
3738 mddev_t
*mddev
= q
->queuedata
;
3739 raid5_conf_t
*conf
= mddev
->private;
3740 unsigned int dd_idx
;
3741 struct bio
* align_bi
;
3744 if (!in_chunk_boundary(mddev
, raid_bio
)) {
3745 pr_debug("chunk_aligned_read : non aligned\n");
3749 * use bio_clone to make a copy of the bio
3751 align_bi
= bio_clone(raid_bio
, GFP_NOIO
);
3755 * set bi_end_io to a new function, and set bi_private to the
3758 align_bi
->bi_end_io
= raid5_align_endio
;
3759 align_bi
->bi_private
= raid_bio
;
3763 align_bi
->bi_sector
= raid5_compute_sector(conf
, raid_bio
->bi_sector
,
3768 rdev
= rcu_dereference(conf
->disks
[dd_idx
].rdev
);
3769 if (rdev
&& test_bit(In_sync
, &rdev
->flags
)) {
3770 atomic_inc(&rdev
->nr_pending
);
3772 raid_bio
->bi_next
= (void*)rdev
;
3773 align_bi
->bi_bdev
= rdev
->bdev
;
3774 align_bi
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
3775 align_bi
->bi_sector
+= rdev
->data_offset
;
3777 if (!bio_fits_rdev(align_bi
)) {
3778 /* too big in some way */
3780 rdev_dec_pending(rdev
, mddev
);
3784 spin_lock_irq(&conf
->device_lock
);
3785 wait_event_lock_irq(conf
->wait_for_stripe
,
3787 conf
->device_lock
, /* nothing */);
3788 atomic_inc(&conf
->active_aligned_reads
);
3789 spin_unlock_irq(&conf
->device_lock
);
3791 generic_make_request(align_bi
);
3800 /* __get_priority_stripe - get the next stripe to process
3802 * Full stripe writes are allowed to pass preread active stripes up until
3803 * the bypass_threshold is exceeded. In general the bypass_count
3804 * increments when the handle_list is handled before the hold_list; however, it
3805 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
3806 * stripe with in flight i/o. The bypass_count will be reset when the
3807 * head of the hold_list has changed, i.e. the head was promoted to the
3810 static struct stripe_head
*__get_priority_stripe(raid5_conf_t
*conf
)
3812 struct stripe_head
*sh
;
3814 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
3816 list_empty(&conf
->handle_list
) ? "empty" : "busy",
3817 list_empty(&conf
->hold_list
) ? "empty" : "busy",
3818 atomic_read(&conf
->pending_full_writes
), conf
->bypass_count
);
3820 if (!list_empty(&conf
->handle_list
)) {
3821 sh
= list_entry(conf
->handle_list
.next
, typeof(*sh
), lru
);
3823 if (list_empty(&conf
->hold_list
))
3824 conf
->bypass_count
= 0;
3825 else if (!test_bit(STRIPE_IO_STARTED
, &sh
->state
)) {
3826 if (conf
->hold_list
.next
== conf
->last_hold
)
3827 conf
->bypass_count
++;
3829 conf
->last_hold
= conf
->hold_list
.next
;
3830 conf
->bypass_count
-= conf
->bypass_threshold
;
3831 if (conf
->bypass_count
< 0)
3832 conf
->bypass_count
= 0;
3835 } else if (!list_empty(&conf
->hold_list
) &&
3836 ((conf
->bypass_threshold
&&
3837 conf
->bypass_count
> conf
->bypass_threshold
) ||
3838 atomic_read(&conf
->pending_full_writes
) == 0)) {
3839 sh
= list_entry(conf
->hold_list
.next
,
3841 conf
->bypass_count
-= conf
->bypass_threshold
;
3842 if (conf
->bypass_count
< 0)
3843 conf
->bypass_count
= 0;
3847 list_del_init(&sh
->lru
);
3848 atomic_inc(&sh
->count
);
3849 BUG_ON(atomic_read(&sh
->count
) != 1);
3853 static int make_request(struct request_queue
*q
, struct bio
* bi
)
3855 mddev_t
*mddev
= q
->queuedata
;
3856 raid5_conf_t
*conf
= mddev
->private;
3858 sector_t new_sector
;
3859 sector_t logical_sector
, last_sector
;
3860 struct stripe_head
*sh
;
3861 const int rw
= bio_data_dir(bi
);
3864 if (unlikely(bio_rw_flagged(bi
, BIO_RW_BARRIER
))) {
3865 bio_endio(bi
, -EOPNOTSUPP
);
3869 md_write_start(mddev
, bi
);
3871 cpu
= part_stat_lock();
3872 part_stat_inc(cpu
, &mddev
->gendisk
->part0
, ios
[rw
]);
3873 part_stat_add(cpu
, &mddev
->gendisk
->part0
, sectors
[rw
],
3878 mddev
->reshape_position
== MaxSector
&&
3879 chunk_aligned_read(q
,bi
))
3882 logical_sector
= bi
->bi_sector
& ~((sector_t
)STRIPE_SECTORS
-1);
3883 last_sector
= bi
->bi_sector
+ (bi
->bi_size
>>9);
3885 bi
->bi_phys_segments
= 1; /* over-loaded to count active stripes */
3887 for (;logical_sector
< last_sector
; logical_sector
+= STRIPE_SECTORS
) {
3889 int disks
, data_disks
;
3894 disks
= conf
->raid_disks
;
3895 prepare_to_wait(&conf
->wait_for_overlap
, &w
, TASK_UNINTERRUPTIBLE
);
3896 if (unlikely(conf
->reshape_progress
!= MaxSector
)) {
3897 /* spinlock is needed as reshape_progress may be
3898 * 64bit on a 32bit platform, and so it might be
3899 * possible to see a half-updated value
3900 * Ofcourse reshape_progress could change after
3901 * the lock is dropped, so once we get a reference
3902 * to the stripe that we think it is, we will have
3905 spin_lock_irq(&conf
->device_lock
);
3906 if (mddev
->delta_disks
< 0
3907 ? logical_sector
< conf
->reshape_progress
3908 : logical_sector
>= conf
->reshape_progress
) {
3909 disks
= conf
->previous_raid_disks
;
3912 if (mddev
->delta_disks
< 0
3913 ? logical_sector
< conf
->reshape_safe
3914 : logical_sector
>= conf
->reshape_safe
) {
3915 spin_unlock_irq(&conf
->device_lock
);
3920 spin_unlock_irq(&conf
->device_lock
);
3922 data_disks
= disks
- conf
->max_degraded
;
3924 new_sector
= raid5_compute_sector(conf
, logical_sector
,
3927 pr_debug("raid5: make_request, sector %llu logical %llu\n",
3928 (unsigned long long)new_sector
,
3929 (unsigned long long)logical_sector
);
3931 sh
= get_active_stripe(conf
, new_sector
, previous
,
3932 (bi
->bi_rw
&RWA_MASK
), 0);
3934 if (unlikely(previous
)) {
3935 /* expansion might have moved on while waiting for a
3936 * stripe, so we must do the range check again.
3937 * Expansion could still move past after this
3938 * test, but as we are holding a reference to
3939 * 'sh', we know that if that happens,
3940 * STRIPE_EXPANDING will get set and the expansion
3941 * won't proceed until we finish with the stripe.
3944 spin_lock_irq(&conf
->device_lock
);
3945 if (mddev
->delta_disks
< 0
3946 ? logical_sector
>= conf
->reshape_progress
3947 : logical_sector
< conf
->reshape_progress
)
3948 /* mismatch, need to try again */
3950 spin_unlock_irq(&conf
->device_lock
);
3958 if (bio_data_dir(bi
) == WRITE
&&
3959 logical_sector
>= mddev
->suspend_lo
&&
3960 logical_sector
< mddev
->suspend_hi
) {
3962 /* As the suspend_* range is controlled by
3963 * userspace, we want an interruptible
3966 flush_signals(current
);
3967 prepare_to_wait(&conf
->wait_for_overlap
,
3968 &w
, TASK_INTERRUPTIBLE
);
3969 if (logical_sector
>= mddev
->suspend_lo
&&
3970 logical_sector
< mddev
->suspend_hi
)
3975 if (test_bit(STRIPE_EXPANDING
, &sh
->state
) ||
3976 !add_stripe_bio(sh
, bi
, dd_idx
, (bi
->bi_rw
&RW_MASK
))) {
3977 /* Stripe is busy expanding or
3978 * add failed due to overlap. Flush everything
3981 raid5_unplug_device(mddev
->queue
);
3986 finish_wait(&conf
->wait_for_overlap
, &w
);
3987 set_bit(STRIPE_HANDLE
, &sh
->state
);
3988 clear_bit(STRIPE_DELAYED
, &sh
->state
);
3991 /* cannot get stripe for read-ahead, just give-up */
3992 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
3993 finish_wait(&conf
->wait_for_overlap
, &w
);
3998 spin_lock_irq(&conf
->device_lock
);
3999 remaining
= raid5_dec_bi_phys_segments(bi
);
4000 spin_unlock_irq(&conf
->device_lock
);
4001 if (remaining
== 0) {
4004 md_write_end(mddev
);
4011 static sector_t
raid5_size(mddev_t
*mddev
, sector_t sectors
, int raid_disks
);
4013 static sector_t
reshape_request(mddev_t
*mddev
, sector_t sector_nr
, int *skipped
)
4015 /* reshaping is quite different to recovery/resync so it is
4016 * handled quite separately ... here.
4018 * On each call to sync_request, we gather one chunk worth of
4019 * destination stripes and flag them as expanding.
4020 * Then we find all the source stripes and request reads.
4021 * As the reads complete, handle_stripe will copy the data
4022 * into the destination stripe and release that stripe.
4024 raid5_conf_t
*conf
= (raid5_conf_t
*) mddev
->private;
4025 struct stripe_head
*sh
;
4026 sector_t first_sector
, last_sector
;
4027 int raid_disks
= conf
->previous_raid_disks
;
4028 int data_disks
= raid_disks
- conf
->max_degraded
;
4029 int new_data_disks
= conf
->raid_disks
- conf
->max_degraded
;
4032 sector_t writepos
, readpos
, safepos
;
4033 sector_t stripe_addr
;
4034 int reshape_sectors
;
4035 struct list_head stripes
;
4037 if (sector_nr
== 0) {
4038 /* If restarting in the middle, skip the initial sectors */
4039 if (mddev
->delta_disks
< 0 &&
4040 conf
->reshape_progress
< raid5_size(mddev
, 0, 0)) {
4041 sector_nr
= raid5_size(mddev
, 0, 0)
4042 - conf
->reshape_progress
;
4043 } else if (mddev
->delta_disks
>= 0 &&
4044 conf
->reshape_progress
> 0)
4045 sector_nr
= conf
->reshape_progress
;
4046 sector_div(sector_nr
, new_data_disks
);
4048 mddev
->curr_resync_completed
= sector_nr
;
4049 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
4055 /* We need to process a full chunk at a time.
4056 * If old and new chunk sizes differ, we need to process the
4059 if (mddev
->new_chunk_sectors
> mddev
->chunk_sectors
)
4060 reshape_sectors
= mddev
->new_chunk_sectors
;
4062 reshape_sectors
= mddev
->chunk_sectors
;
4064 /* we update the metadata when there is more than 3Meg
4065 * in the block range (that is rather arbitrary, should
4066 * probably be time based) or when the data about to be
4067 * copied would over-write the source of the data at
4068 * the front of the range.
4069 * i.e. one new_stripe along from reshape_progress new_maps
4070 * to after where reshape_safe old_maps to
4072 writepos
= conf
->reshape_progress
;
4073 sector_div(writepos
, new_data_disks
);
4074 readpos
= conf
->reshape_progress
;
4075 sector_div(readpos
, data_disks
);
4076 safepos
= conf
->reshape_safe
;
4077 sector_div(safepos
, data_disks
);
4078 if (mddev
->delta_disks
< 0) {
4079 writepos
-= min_t(sector_t
, reshape_sectors
, writepos
);
4080 readpos
+= reshape_sectors
;
4081 safepos
+= reshape_sectors
;
4083 writepos
+= reshape_sectors
;
4084 readpos
-= min_t(sector_t
, reshape_sectors
, readpos
);
4085 safepos
-= min_t(sector_t
, reshape_sectors
, safepos
);
4088 /* 'writepos' is the most advanced device address we might write.
4089 * 'readpos' is the least advanced device address we might read.
4090 * 'safepos' is the least address recorded in the metadata as having
4092 * If 'readpos' is behind 'writepos', then there is no way that we can
4093 * ensure safety in the face of a crash - that must be done by userspace
4094 * making a backup of the data. So in that case there is no particular
4095 * rush to update metadata.
4096 * Otherwise if 'safepos' is behind 'writepos', then we really need to
4097 * update the metadata to advance 'safepos' to match 'readpos' so that
4098 * we can be safe in the event of a crash.
4099 * So we insist on updating metadata if safepos is behind writepos and
4100 * readpos is beyond writepos.
4101 * In any case, update the metadata every 10 seconds.
4102 * Maybe that number should be configurable, but I'm not sure it is
4103 * worth it.... maybe it could be a multiple of safemode_delay???
4105 if ((mddev
->delta_disks
< 0
4106 ? (safepos
> writepos
&& readpos
< writepos
)
4107 : (safepos
< writepos
&& readpos
> writepos
)) ||
4108 time_after(jiffies
, conf
->reshape_checkpoint
+ 10*HZ
)) {
4109 /* Cannot proceed until we've updated the superblock... */
4110 wait_event(conf
->wait_for_overlap
,
4111 atomic_read(&conf
->reshape_stripes
)==0);
4112 mddev
->reshape_position
= conf
->reshape_progress
;
4113 mddev
->curr_resync_completed
= mddev
->curr_resync
;
4114 conf
->reshape_checkpoint
= jiffies
;
4115 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
4116 md_wakeup_thread(mddev
->thread
);
4117 wait_event(mddev
->sb_wait
, mddev
->flags
== 0 ||
4118 kthread_should_stop());
4119 spin_lock_irq(&conf
->device_lock
);
4120 conf
->reshape_safe
= mddev
->reshape_position
;
4121 spin_unlock_irq(&conf
->device_lock
);
4122 wake_up(&conf
->wait_for_overlap
);
4123 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
4126 if (mddev
->delta_disks
< 0) {
4127 BUG_ON(conf
->reshape_progress
== 0);
4128 stripe_addr
= writepos
;
4129 BUG_ON((mddev
->dev_sectors
&
4130 ~((sector_t
)reshape_sectors
- 1))
4131 - reshape_sectors
- stripe_addr
4134 BUG_ON(writepos
!= sector_nr
+ reshape_sectors
);
4135 stripe_addr
= sector_nr
;
4137 INIT_LIST_HEAD(&stripes
);
4138 for (i
= 0; i
< reshape_sectors
; i
+= STRIPE_SECTORS
) {
4140 int skipped_disk
= 0;
4141 sh
= get_active_stripe(conf
, stripe_addr
+i
, 0, 0, 1);
4142 set_bit(STRIPE_EXPANDING
, &sh
->state
);
4143 atomic_inc(&conf
->reshape_stripes
);
4144 /* If any of this stripe is beyond the end of the old
4145 * array, then we need to zero those blocks
4147 for (j
=sh
->disks
; j
--;) {
4149 if (j
== sh
->pd_idx
)
4151 if (conf
->level
== 6 &&
4154 s
= compute_blocknr(sh
, j
, 0);
4155 if (s
< raid5_size(mddev
, 0, 0)) {
4159 memset(page_address(sh
->dev
[j
].page
), 0, STRIPE_SIZE
);
4160 set_bit(R5_Expanded
, &sh
->dev
[j
].flags
);
4161 set_bit(R5_UPTODATE
, &sh
->dev
[j
].flags
);
4163 if (!skipped_disk
) {
4164 set_bit(STRIPE_EXPAND_READY
, &sh
->state
);
4165 set_bit(STRIPE_HANDLE
, &sh
->state
);
4167 list_add(&sh
->lru
, &stripes
);
4169 spin_lock_irq(&conf
->device_lock
);
4170 if (mddev
->delta_disks
< 0)
4171 conf
->reshape_progress
-= reshape_sectors
* new_data_disks
;
4173 conf
->reshape_progress
+= reshape_sectors
* new_data_disks
;
4174 spin_unlock_irq(&conf
->device_lock
);
4175 /* Ok, those stripe are ready. We can start scheduling
4176 * reads on the source stripes.
4177 * The source stripes are determined by mapping the first and last
4178 * block on the destination stripes.
4181 raid5_compute_sector(conf
, stripe_addr
*(new_data_disks
),
4184 raid5_compute_sector(conf
, ((stripe_addr
+reshape_sectors
)
4185 * new_data_disks
- 1),
4187 if (last_sector
>= mddev
->dev_sectors
)
4188 last_sector
= mddev
->dev_sectors
- 1;
4189 while (first_sector
<= last_sector
) {
4190 sh
= get_active_stripe(conf
, first_sector
, 1, 0, 1);
4191 set_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
4192 set_bit(STRIPE_HANDLE
, &sh
->state
);
4194 first_sector
+= STRIPE_SECTORS
;
4196 /* Now that the sources are clearly marked, we can release
4197 * the destination stripes
4199 while (!list_empty(&stripes
)) {
4200 sh
= list_entry(stripes
.next
, struct stripe_head
, lru
);
4201 list_del_init(&sh
->lru
);
4204 /* If this takes us to the resync_max point where we have to pause,
4205 * then we need to write out the superblock.
4207 sector_nr
+= reshape_sectors
;
4208 if ((sector_nr
- mddev
->curr_resync_completed
) * 2
4209 >= mddev
->resync_max
- mddev
->curr_resync_completed
) {
4210 /* Cannot proceed until we've updated the superblock... */
4211 wait_event(conf
->wait_for_overlap
,
4212 atomic_read(&conf
->reshape_stripes
) == 0);
4213 mddev
->reshape_position
= conf
->reshape_progress
;
4214 mddev
->curr_resync_completed
= mddev
->curr_resync
+ reshape_sectors
;
4215 conf
->reshape_checkpoint
= jiffies
;
4216 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
4217 md_wakeup_thread(mddev
->thread
);
4218 wait_event(mddev
->sb_wait
,
4219 !test_bit(MD_CHANGE_DEVS
, &mddev
->flags
)
4220 || kthread_should_stop());
4221 spin_lock_irq(&conf
->device_lock
);
4222 conf
->reshape_safe
= mddev
->reshape_position
;
4223 spin_unlock_irq(&conf
->device_lock
);
4224 wake_up(&conf
->wait_for_overlap
);
4225 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
4227 return reshape_sectors
;
4230 /* FIXME go_faster isn't used */
4231 static inline sector_t
sync_request(mddev_t
*mddev
, sector_t sector_nr
, int *skipped
, int go_faster
)
4233 raid5_conf_t
*conf
= (raid5_conf_t
*) mddev
->private;
4234 struct stripe_head
*sh
;
4235 sector_t max_sector
= mddev
->dev_sectors
;
4237 int still_degraded
= 0;
4240 if (sector_nr
>= max_sector
) {
4241 /* just being told to finish up .. nothing much to do */
4242 unplug_slaves(mddev
);
4244 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
)) {
4249 if (mddev
->curr_resync
< max_sector
) /* aborted */
4250 bitmap_end_sync(mddev
->bitmap
, mddev
->curr_resync
,
4252 else /* completed sync */
4254 bitmap_close_sync(mddev
->bitmap
);
4259 /* Allow raid5_quiesce to complete */
4260 wait_event(conf
->wait_for_overlap
, conf
->quiesce
!= 2);
4262 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
4263 return reshape_request(mddev
, sector_nr
, skipped
);
4265 /* No need to check resync_max as we never do more than one
4266 * stripe, and as resync_max will always be on a chunk boundary,
4267 * if the check in md_do_sync didn't fire, there is no chance
4268 * of overstepping resync_max here
4271 /* if there is too many failed drives and we are trying
4272 * to resync, then assert that we are finished, because there is
4273 * nothing we can do.
4275 if (mddev
->degraded
>= conf
->max_degraded
&&
4276 test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
4277 sector_t rv
= mddev
->dev_sectors
- sector_nr
;
4281 if (!bitmap_start_sync(mddev
->bitmap
, sector_nr
, &sync_blocks
, 1) &&
4282 !test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
) &&
4283 !conf
->fullsync
&& sync_blocks
>= STRIPE_SECTORS
) {
4284 /* we can skip this block, and probably more */
4285 sync_blocks
/= STRIPE_SECTORS
;
4287 return sync_blocks
* STRIPE_SECTORS
; /* keep things rounded to whole stripes */
4291 bitmap_cond_end_sync(mddev
->bitmap
, sector_nr
);
4293 sh
= get_active_stripe(conf
, sector_nr
, 0, 1, 0);
4295 sh
= get_active_stripe(conf
, sector_nr
, 0, 0, 0);
4296 /* make sure we don't swamp the stripe cache if someone else
4297 * is trying to get access
4299 schedule_timeout_uninterruptible(1);
4301 /* Need to check if array will still be degraded after recovery/resync
4302 * We don't need to check the 'failed' flag as when that gets set,
4305 for (i
= 0; i
< conf
->raid_disks
; i
++)
4306 if (conf
->disks
[i
].rdev
== NULL
)
4309 bitmap_start_sync(mddev
->bitmap
, sector_nr
, &sync_blocks
, still_degraded
);
4311 spin_lock(&sh
->lock
);
4312 set_bit(STRIPE_SYNCING
, &sh
->state
);
4313 clear_bit(STRIPE_INSYNC
, &sh
->state
);
4314 spin_unlock(&sh
->lock
);
4319 return STRIPE_SECTORS
;
4322 static int retry_aligned_read(raid5_conf_t
*conf
, struct bio
*raid_bio
)
4324 /* We may not be able to submit a whole bio at once as there
4325 * may not be enough stripe_heads available.
4326 * We cannot pre-allocate enough stripe_heads as we may need
4327 * more than exist in the cache (if we allow ever large chunks).
4328 * So we do one stripe head at a time and record in
4329 * ->bi_hw_segments how many have been done.
4331 * We *know* that this entire raid_bio is in one chunk, so
4332 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
4334 struct stripe_head
*sh
;
4336 sector_t sector
, logical_sector
, last_sector
;
4341 logical_sector
= raid_bio
->bi_sector
& ~((sector_t
)STRIPE_SECTORS
-1);
4342 sector
= raid5_compute_sector(conf
, logical_sector
,
4344 last_sector
= raid_bio
->bi_sector
+ (raid_bio
->bi_size
>>9);
4346 for (; logical_sector
< last_sector
;
4347 logical_sector
+= STRIPE_SECTORS
,
4348 sector
+= STRIPE_SECTORS
,
4351 if (scnt
< raid5_bi_hw_segments(raid_bio
))
4352 /* already done this stripe */
4355 sh
= get_active_stripe(conf
, sector
, 0, 1, 0);
4358 /* failed to get a stripe - must wait */
4359 raid5_set_bi_hw_segments(raid_bio
, scnt
);
4360 conf
->retry_read_aligned
= raid_bio
;
4364 set_bit(R5_ReadError
, &sh
->dev
[dd_idx
].flags
);
4365 if (!add_stripe_bio(sh
, raid_bio
, dd_idx
, 0)) {
4367 raid5_set_bi_hw_segments(raid_bio
, scnt
);
4368 conf
->retry_read_aligned
= raid_bio
;
4376 spin_lock_irq(&conf
->device_lock
);
4377 remaining
= raid5_dec_bi_phys_segments(raid_bio
);
4378 spin_unlock_irq(&conf
->device_lock
);
4380 bio_endio(raid_bio
, 0);
4381 if (atomic_dec_and_test(&conf
->active_aligned_reads
))
4382 wake_up(&conf
->wait_for_stripe
);
4388 * This is our raid5 kernel thread.
4390 * We scan the hash table for stripes which can be handled now.
4391 * During the scan, completed stripes are saved for us by the interrupt
4392 * handler, so that they will not have to wait for our next wakeup.
4394 static void raid5d(mddev_t
*mddev
)
4396 struct stripe_head
*sh
;
4397 raid5_conf_t
*conf
= mddev
->private;
4400 pr_debug("+++ raid5d active\n");
4402 md_check_recovery(mddev
);
4405 spin_lock_irq(&conf
->device_lock
);
4409 if (conf
->seq_flush
!= conf
->seq_write
) {
4410 int seq
= conf
->seq_flush
;
4411 spin_unlock_irq(&conf
->device_lock
);
4412 bitmap_unplug(mddev
->bitmap
);
4413 spin_lock_irq(&conf
->device_lock
);
4414 conf
->seq_write
= seq
;
4415 activate_bit_delay(conf
);
4418 while ((bio
= remove_bio_from_retry(conf
))) {
4420 spin_unlock_irq(&conf
->device_lock
);
4421 ok
= retry_aligned_read(conf
, bio
);
4422 spin_lock_irq(&conf
->device_lock
);
4428 sh
= __get_priority_stripe(conf
);
4432 spin_unlock_irq(&conf
->device_lock
);
4439 spin_lock_irq(&conf
->device_lock
);
4441 pr_debug("%d stripes handled\n", handled
);
4443 spin_unlock_irq(&conf
->device_lock
);
4445 async_tx_issue_pending_all();
4446 unplug_slaves(mddev
);
4448 pr_debug("--- raid5d inactive\n");
4452 raid5_show_stripe_cache_size(mddev_t
*mddev
, char *page
)
4454 raid5_conf_t
*conf
= mddev
->private;
4456 return sprintf(page
, "%d\n", conf
->max_nr_stripes
);
4462 raid5_store_stripe_cache_size(mddev_t
*mddev
, const char *page
, size_t len
)
4464 raid5_conf_t
*conf
= mddev
->private;
4468 if (len
>= PAGE_SIZE
)
4473 if (strict_strtoul(page
, 10, &new))
4475 if (new <= 16 || new > 32768)
4477 while (new < conf
->max_nr_stripes
) {
4478 if (drop_one_stripe(conf
))
4479 conf
->max_nr_stripes
--;
4483 err
= md_allow_write(mddev
);
4486 while (new > conf
->max_nr_stripes
) {
4487 if (grow_one_stripe(conf
))
4488 conf
->max_nr_stripes
++;
4494 static struct md_sysfs_entry
4495 raid5_stripecache_size
= __ATTR(stripe_cache_size
, S_IRUGO
| S_IWUSR
,
4496 raid5_show_stripe_cache_size
,
4497 raid5_store_stripe_cache_size
);
4500 raid5_show_preread_threshold(mddev_t
*mddev
, char *page
)
4502 raid5_conf_t
*conf
= mddev
->private;
4504 return sprintf(page
, "%d\n", conf
->bypass_threshold
);
4510 raid5_store_preread_threshold(mddev_t
*mddev
, const char *page
, size_t len
)
4512 raid5_conf_t
*conf
= mddev
->private;
4514 if (len
>= PAGE_SIZE
)
4519 if (strict_strtoul(page
, 10, &new))
4521 if (new > conf
->max_nr_stripes
)
4523 conf
->bypass_threshold
= new;
4527 static struct md_sysfs_entry
4528 raid5_preread_bypass_threshold
= __ATTR(preread_bypass_threshold
,
4530 raid5_show_preread_threshold
,
4531 raid5_store_preread_threshold
);
4534 stripe_cache_active_show(mddev_t
*mddev
, char *page
)
4536 raid5_conf_t
*conf
= mddev
->private;
4538 return sprintf(page
, "%d\n", atomic_read(&conf
->active_stripes
));
4543 static struct md_sysfs_entry
4544 raid5_stripecache_active
= __ATTR_RO(stripe_cache_active
);
4546 static struct attribute
*raid5_attrs
[] = {
4547 &raid5_stripecache_size
.attr
,
4548 &raid5_stripecache_active
.attr
,
4549 &raid5_preread_bypass_threshold
.attr
,
4552 static struct attribute_group raid5_attrs_group
= {
4554 .attrs
= raid5_attrs
,
4558 raid5_size(mddev_t
*mddev
, sector_t sectors
, int raid_disks
)
4560 raid5_conf_t
*conf
= mddev
->private;
4563 sectors
= mddev
->dev_sectors
;
4565 /* size is defined by the smallest of previous and new size */
4566 raid_disks
= min(conf
->raid_disks
, conf
->previous_raid_disks
);
4568 sectors
&= ~((sector_t
)mddev
->chunk_sectors
- 1);
4569 sectors
&= ~((sector_t
)mddev
->new_chunk_sectors
- 1);
4570 return sectors
* (raid_disks
- conf
->max_degraded
);
4573 static void raid5_free_percpu(raid5_conf_t
*conf
)
4575 struct raid5_percpu
*percpu
;
4582 for_each_possible_cpu(cpu
) {
4583 percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
4584 safe_put_page(percpu
->spare_page
);
4585 kfree(percpu
->scribble
);
4587 #ifdef CONFIG_HOTPLUG_CPU
4588 unregister_cpu_notifier(&conf
->cpu_notify
);
4592 free_percpu(conf
->percpu
);
4595 static void free_conf(raid5_conf_t
*conf
)
4597 shrink_stripes(conf
);
4598 raid5_free_percpu(conf
);
4600 kfree(conf
->stripe_hashtbl
);
4604 #ifdef CONFIG_HOTPLUG_CPU
4605 static int raid456_cpu_notify(struct notifier_block
*nfb
, unsigned long action
,
4608 raid5_conf_t
*conf
= container_of(nfb
, raid5_conf_t
, cpu_notify
);
4609 long cpu
= (long)hcpu
;
4610 struct raid5_percpu
*percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
4613 case CPU_UP_PREPARE
:
4614 case CPU_UP_PREPARE_FROZEN
:
4615 if (conf
->level
== 6 && !percpu
->spare_page
)
4616 percpu
->spare_page
= alloc_page(GFP_KERNEL
);
4617 if (!percpu
->scribble
)
4618 percpu
->scribble
= kmalloc(conf
->scribble_len
, GFP_KERNEL
);
4620 if (!percpu
->scribble
||
4621 (conf
->level
== 6 && !percpu
->spare_page
)) {
4622 safe_put_page(percpu
->spare_page
);
4623 kfree(percpu
->scribble
);
4624 pr_err("%s: failed memory allocation for cpu%ld\n",
4630 case CPU_DEAD_FROZEN
:
4631 safe_put_page(percpu
->spare_page
);
4632 kfree(percpu
->scribble
);
4633 percpu
->spare_page
= NULL
;
4634 percpu
->scribble
= NULL
;
4643 static int raid5_alloc_percpu(raid5_conf_t
*conf
)
4646 struct page
*spare_page
;
4647 struct raid5_percpu
*allcpus
;
4651 allcpus
= alloc_percpu(struct raid5_percpu
);
4654 conf
->percpu
= allcpus
;
4658 for_each_present_cpu(cpu
) {
4659 if (conf
->level
== 6) {
4660 spare_page
= alloc_page(GFP_KERNEL
);
4665 per_cpu_ptr(conf
->percpu
, cpu
)->spare_page
= spare_page
;
4667 scribble
= kmalloc(conf
->scribble_len
, GFP_KERNEL
);
4672 per_cpu_ptr(conf
->percpu
, cpu
)->scribble
= scribble
;
4674 #ifdef CONFIG_HOTPLUG_CPU
4675 conf
->cpu_notify
.notifier_call
= raid456_cpu_notify
;
4676 conf
->cpu_notify
.priority
= 0;
4678 err
= register_cpu_notifier(&conf
->cpu_notify
);
4685 static raid5_conf_t
*setup_conf(mddev_t
*mddev
)
4688 int raid_disk
, memory
, max_disks
;
4690 struct disk_info
*disk
;
4692 if (mddev
->new_level
!= 5
4693 && mddev
->new_level
!= 4
4694 && mddev
->new_level
!= 6) {
4695 printk(KERN_ERR
"raid5: %s: raid level not set to 4/5/6 (%d)\n",
4696 mdname(mddev
), mddev
->new_level
);
4697 return ERR_PTR(-EIO
);
4699 if ((mddev
->new_level
== 5
4700 && !algorithm_valid_raid5(mddev
->new_layout
)) ||
4701 (mddev
->new_level
== 6
4702 && !algorithm_valid_raid6(mddev
->new_layout
))) {
4703 printk(KERN_ERR
"raid5: %s: layout %d not supported\n",
4704 mdname(mddev
), mddev
->new_layout
);
4705 return ERR_PTR(-EIO
);
4707 if (mddev
->new_level
== 6 && mddev
->raid_disks
< 4) {
4708 printk(KERN_ERR
"raid6: not enough configured devices for %s (%d, minimum 4)\n",
4709 mdname(mddev
), mddev
->raid_disks
);
4710 return ERR_PTR(-EINVAL
);
4713 if (!mddev
->new_chunk_sectors
||
4714 (mddev
->new_chunk_sectors
<< 9) % PAGE_SIZE
||
4715 !is_power_of_2(mddev
->new_chunk_sectors
)) {
4716 printk(KERN_ERR
"raid5: invalid chunk size %d for %s\n",
4717 mddev
->new_chunk_sectors
<< 9, mdname(mddev
));
4718 return ERR_PTR(-EINVAL
);
4721 conf
= kzalloc(sizeof(raid5_conf_t
), GFP_KERNEL
);
4724 spin_lock_init(&conf
->device_lock
);
4725 init_waitqueue_head(&conf
->wait_for_stripe
);
4726 init_waitqueue_head(&conf
->wait_for_overlap
);
4727 INIT_LIST_HEAD(&conf
->handle_list
);
4728 INIT_LIST_HEAD(&conf
->hold_list
);
4729 INIT_LIST_HEAD(&conf
->delayed_list
);
4730 INIT_LIST_HEAD(&conf
->bitmap_list
);
4731 INIT_LIST_HEAD(&conf
->inactive_list
);
4732 atomic_set(&conf
->active_stripes
, 0);
4733 atomic_set(&conf
->preread_active_stripes
, 0);
4734 atomic_set(&conf
->active_aligned_reads
, 0);
4735 conf
->bypass_threshold
= BYPASS_THRESHOLD
;
4737 conf
->raid_disks
= mddev
->raid_disks
;
4738 if (mddev
->reshape_position
== MaxSector
)
4739 conf
->previous_raid_disks
= mddev
->raid_disks
;
4741 conf
->previous_raid_disks
= mddev
->raid_disks
- mddev
->delta_disks
;
4742 max_disks
= max(conf
->raid_disks
, conf
->previous_raid_disks
);
4743 conf
->scribble_len
= scribble_len(max_disks
);
4745 conf
->disks
= kzalloc(max_disks
* sizeof(struct disk_info
),
4750 conf
->mddev
= mddev
;
4752 if ((conf
->stripe_hashtbl
= kzalloc(PAGE_SIZE
, GFP_KERNEL
)) == NULL
)
4755 conf
->level
= mddev
->new_level
;
4756 if (raid5_alloc_percpu(conf
) != 0)
4759 pr_debug("raid5: run(%s) called.\n", mdname(mddev
));
4761 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
4762 raid_disk
= rdev
->raid_disk
;
4763 if (raid_disk
>= max_disks
4766 disk
= conf
->disks
+ raid_disk
;
4770 if (test_bit(In_sync
, &rdev
->flags
)) {
4771 char b
[BDEVNAME_SIZE
];
4772 printk(KERN_INFO
"raid5: device %s operational as raid"
4773 " disk %d\n", bdevname(rdev
->bdev
,b
),
4776 /* Cannot rely on bitmap to complete recovery */
4780 conf
->chunk_sectors
= mddev
->new_chunk_sectors
;
4781 conf
->level
= mddev
->new_level
;
4782 if (conf
->level
== 6)
4783 conf
->max_degraded
= 2;
4785 conf
->max_degraded
= 1;
4786 conf
->algorithm
= mddev
->new_layout
;
4787 conf
->max_nr_stripes
= NR_STRIPES
;
4788 conf
->reshape_progress
= mddev
->reshape_position
;
4789 if (conf
->reshape_progress
!= MaxSector
) {
4790 conf
->prev_chunk_sectors
= mddev
->chunk_sectors
;
4791 conf
->prev_algo
= mddev
->layout
;
4794 memory
= conf
->max_nr_stripes
* (sizeof(struct stripe_head
) +
4795 max_disks
* ((sizeof(struct bio
) + PAGE_SIZE
))) / 1024;
4796 if (grow_stripes(conf
, conf
->max_nr_stripes
)) {
4798 "raid5: couldn't allocate %dkB for buffers\n", memory
);
4801 printk(KERN_INFO
"raid5: allocated %dkB for %s\n",
4802 memory
, mdname(mddev
));
4804 conf
->thread
= md_register_thread(raid5d
, mddev
, NULL
);
4805 if (!conf
->thread
) {
4807 "raid5: couldn't allocate thread for %s\n",
4817 return ERR_PTR(-EIO
);
4819 return ERR_PTR(-ENOMEM
);
4823 static int only_parity(int raid_disk
, int algo
, int raid_disks
, int max_degraded
)
4826 case ALGORITHM_PARITY_0
:
4827 if (raid_disk
< max_degraded
)
4830 case ALGORITHM_PARITY_N
:
4831 if (raid_disk
>= raid_disks
- max_degraded
)
4834 case ALGORITHM_PARITY_0_6
:
4835 if (raid_disk
== 0 ||
4836 raid_disk
== raid_disks
- 1)
4839 case ALGORITHM_LEFT_ASYMMETRIC_6
:
4840 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
4841 case ALGORITHM_LEFT_SYMMETRIC_6
:
4842 case ALGORITHM_RIGHT_SYMMETRIC_6
:
4843 if (raid_disk
== raid_disks
- 1)
4849 static int run(mddev_t
*mddev
)
4852 int working_disks
= 0, chunk_size
;
4853 int dirty_parity_disks
= 0;
4855 sector_t reshape_offset
= 0;
4857 if (mddev
->recovery_cp
!= MaxSector
)
4858 printk(KERN_NOTICE
"raid5: %s is not clean"
4859 " -- starting background reconstruction\n",
4861 if (mddev
->reshape_position
!= MaxSector
) {
4862 /* Check that we can continue the reshape.
4863 * Currently only disks can change, it must
4864 * increase, and we must be past the point where
4865 * a stripe over-writes itself
4867 sector_t here_new
, here_old
;
4869 int max_degraded
= (mddev
->level
== 6 ? 2 : 1);
4871 if (mddev
->new_level
!= mddev
->level
) {
4872 printk(KERN_ERR
"raid5: %s: unsupported reshape "
4873 "required - aborting.\n",
4877 old_disks
= mddev
->raid_disks
- mddev
->delta_disks
;
4878 /* reshape_position must be on a new-stripe boundary, and one
4879 * further up in new geometry must map after here in old
4882 here_new
= mddev
->reshape_position
;
4883 if (sector_div(here_new
, mddev
->new_chunk_sectors
*
4884 (mddev
->raid_disks
- max_degraded
))) {
4885 printk(KERN_ERR
"raid5: reshape_position not "
4886 "on a stripe boundary\n");
4889 reshape_offset
= here_new
* mddev
->new_chunk_sectors
;
4890 /* here_new is the stripe we will write to */
4891 here_old
= mddev
->reshape_position
;
4892 sector_div(here_old
, mddev
->chunk_sectors
*
4893 (old_disks
-max_degraded
));
4894 /* here_old is the first stripe that we might need to read
4896 if (mddev
->delta_disks
== 0) {
4897 /* We cannot be sure it is safe to start an in-place
4898 * reshape. It is only safe if user-space if monitoring
4899 * and taking constant backups.
4900 * mdadm always starts a situation like this in
4901 * readonly mode so it can take control before
4902 * allowing any writes. So just check for that.
4904 if ((here_new
* mddev
->new_chunk_sectors
!=
4905 here_old
* mddev
->chunk_sectors
) ||
4907 printk(KERN_ERR
"raid5: in-place reshape must be started"
4908 " in read-only mode - aborting\n");
4911 } else if (mddev
->delta_disks
< 0
4912 ? (here_new
* mddev
->new_chunk_sectors
<=
4913 here_old
* mddev
->chunk_sectors
)
4914 : (here_new
* mddev
->new_chunk_sectors
>=
4915 here_old
* mddev
->chunk_sectors
)) {
4916 /* Reading from the same stripe as writing to - bad */
4917 printk(KERN_ERR
"raid5: reshape_position too early for "
4918 "auto-recovery - aborting.\n");
4921 printk(KERN_INFO
"raid5: reshape will continue\n");
4922 /* OK, we should be able to continue; */
4924 BUG_ON(mddev
->level
!= mddev
->new_level
);
4925 BUG_ON(mddev
->layout
!= mddev
->new_layout
);
4926 BUG_ON(mddev
->chunk_sectors
!= mddev
->new_chunk_sectors
);
4927 BUG_ON(mddev
->delta_disks
!= 0);
4930 if (mddev
->private == NULL
)
4931 conf
= setup_conf(mddev
);
4933 conf
= mddev
->private;
4936 return PTR_ERR(conf
);
4938 mddev
->thread
= conf
->thread
;
4939 conf
->thread
= NULL
;
4940 mddev
->private = conf
;
4943 * 0 for a fully functional array, 1 or 2 for a degraded array.
4945 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
4946 if (rdev
->raid_disk
< 0)
4948 if (test_bit(In_sync
, &rdev
->flags
))
4950 /* This disc is not fully in-sync. However if it
4951 * just stored parity (beyond the recovery_offset),
4952 * when we don't need to be concerned about the
4953 * array being dirty.
4954 * When reshape goes 'backwards', we never have
4955 * partially completed devices, so we only need
4956 * to worry about reshape going forwards.
4958 /* Hack because v0.91 doesn't store recovery_offset properly. */
4959 if (mddev
->major_version
== 0 &&
4960 mddev
->minor_version
> 90)
4961 rdev
->recovery_offset
= reshape_offset
;
4963 printk("%d: w=%d pa=%d pr=%d m=%d a=%d r=%d op1=%d op2=%d\n",
4964 rdev
->raid_disk
, working_disks
, conf
->prev_algo
,
4965 conf
->previous_raid_disks
, conf
->max_degraded
,
4966 conf
->algorithm
, conf
->raid_disks
,
4967 only_parity(rdev
->raid_disk
,
4969 conf
->previous_raid_disks
,
4970 conf
->max_degraded
),
4971 only_parity(rdev
->raid_disk
,
4974 conf
->max_degraded
));
4975 if (rdev
->recovery_offset
< reshape_offset
) {
4976 /* We need to check old and new layout */
4977 if (!only_parity(rdev
->raid_disk
,
4980 conf
->max_degraded
))
4983 if (!only_parity(rdev
->raid_disk
,
4985 conf
->previous_raid_disks
,
4986 conf
->max_degraded
))
4988 dirty_parity_disks
++;
4991 mddev
->degraded
= (max(conf
->raid_disks
, conf
->previous_raid_disks
)
4994 if (mddev
->degraded
> conf
->max_degraded
) {
4995 printk(KERN_ERR
"raid5: not enough operational devices for %s"
4996 " (%d/%d failed)\n",
4997 mdname(mddev
), mddev
->degraded
, conf
->raid_disks
);
5001 /* device size must be a multiple of chunk size */
5002 mddev
->dev_sectors
&= ~(mddev
->chunk_sectors
- 1);
5003 mddev
->resync_max_sectors
= mddev
->dev_sectors
;
5005 if (mddev
->degraded
> dirty_parity_disks
&&
5006 mddev
->recovery_cp
!= MaxSector
) {
5007 if (mddev
->ok_start_degraded
)
5009 "raid5: starting dirty degraded array: %s"
5010 "- data corruption possible.\n",
5014 "raid5: cannot start dirty degraded array for %s\n",
5020 if (mddev
->degraded
== 0)
5021 printk("raid5: raid level %d set %s active with %d out of %d"
5022 " devices, algorithm %d\n", conf
->level
, mdname(mddev
),
5023 mddev
->raid_disks
-mddev
->degraded
, mddev
->raid_disks
,
5026 printk(KERN_ALERT
"raid5: raid level %d set %s active with %d"
5027 " out of %d devices, algorithm %d\n", conf
->level
,
5028 mdname(mddev
), mddev
->raid_disks
- mddev
->degraded
,
5029 mddev
->raid_disks
, mddev
->new_layout
);
5031 print_raid5_conf(conf
);
5033 if (conf
->reshape_progress
!= MaxSector
) {
5034 printk("...ok start reshape thread\n");
5035 conf
->reshape_safe
= conf
->reshape_progress
;
5036 atomic_set(&conf
->reshape_stripes
, 0);
5037 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
5038 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
5039 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
5040 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
5041 mddev
->sync_thread
= md_register_thread(md_do_sync
, mddev
,
5045 /* read-ahead size must cover two whole stripes, which is
5046 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
5049 int data_disks
= conf
->previous_raid_disks
- conf
->max_degraded
;
5050 int stripe
= data_disks
*
5051 ((mddev
->chunk_sectors
<< 9) / PAGE_SIZE
);
5052 if (mddev
->queue
->backing_dev_info
.ra_pages
< 2 * stripe
)
5053 mddev
->queue
->backing_dev_info
.ra_pages
= 2 * stripe
;
5056 /* Ok, everything is just fine now */
5057 if (sysfs_create_group(&mddev
->kobj
, &raid5_attrs_group
))
5059 "raid5: failed to create sysfs attributes for %s\n",
5062 mddev
->queue
->queue_lock
= &conf
->device_lock
;
5064 mddev
->queue
->unplug_fn
= raid5_unplug_device
;
5065 mddev
->queue
->backing_dev_info
.congested_data
= mddev
;
5066 mddev
->queue
->backing_dev_info
.congested_fn
= raid5_congested
;
5068 md_set_array_sectors(mddev
, raid5_size(mddev
, 0, 0));
5070 blk_queue_merge_bvec(mddev
->queue
, raid5_mergeable_bvec
);
5071 chunk_size
= mddev
->chunk_sectors
<< 9;
5072 blk_queue_io_min(mddev
->queue
, chunk_size
);
5073 blk_queue_io_opt(mddev
->queue
, chunk_size
*
5074 (conf
->raid_disks
- conf
->max_degraded
));
5076 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
5077 disk_stack_limits(mddev
->gendisk
, rdev
->bdev
,
5078 rdev
->data_offset
<< 9);
5082 md_unregister_thread(mddev
->thread
);
5083 mddev
->thread
= NULL
;
5085 print_raid5_conf(conf
);
5088 mddev
->private = NULL
;
5089 printk(KERN_ALERT
"raid5: failed to run raid set %s\n", mdname(mddev
));
5095 static int stop(mddev_t
*mddev
)
5097 raid5_conf_t
*conf
= (raid5_conf_t
*) mddev
->private;
5099 md_unregister_thread(mddev
->thread
);
5100 mddev
->thread
= NULL
;
5101 mddev
->queue
->backing_dev_info
.congested_fn
= NULL
;
5102 blk_sync_queue(mddev
->queue
); /* the unplug fn references 'conf'*/
5103 sysfs_remove_group(&mddev
->kobj
, &raid5_attrs_group
);
5105 mddev
->private = NULL
;
5110 static void print_sh(struct seq_file
*seq
, struct stripe_head
*sh
)
5114 seq_printf(seq
, "sh %llu, pd_idx %d, state %ld.\n",
5115 (unsigned long long)sh
->sector
, sh
->pd_idx
, sh
->state
);
5116 seq_printf(seq
, "sh %llu, count %d.\n",
5117 (unsigned long long)sh
->sector
, atomic_read(&sh
->count
));
5118 seq_printf(seq
, "sh %llu, ", (unsigned long long)sh
->sector
);
5119 for (i
= 0; i
< sh
->disks
; i
++) {
5120 seq_printf(seq
, "(cache%d: %p %ld) ",
5121 i
, sh
->dev
[i
].page
, sh
->dev
[i
].flags
);
5123 seq_printf(seq
, "\n");
5126 static void printall(struct seq_file
*seq
, raid5_conf_t
*conf
)
5128 struct stripe_head
*sh
;
5129 struct hlist_node
*hn
;
5132 spin_lock_irq(&conf
->device_lock
);
5133 for (i
= 0; i
< NR_HASH
; i
++) {
5134 hlist_for_each_entry(sh
, hn
, &conf
->stripe_hashtbl
[i
], hash
) {
5135 if (sh
->raid_conf
!= conf
)
5140 spin_unlock_irq(&conf
->device_lock
);
5144 static void status(struct seq_file
*seq
, mddev_t
*mddev
)
5146 raid5_conf_t
*conf
= (raid5_conf_t
*) mddev
->private;
5149 seq_printf(seq
, " level %d, %dk chunk, algorithm %d", mddev
->level
,
5150 mddev
->chunk_sectors
/ 2, mddev
->layout
);
5151 seq_printf (seq
, " [%d/%d] [", conf
->raid_disks
, conf
->raid_disks
- mddev
->degraded
);
5152 for (i
= 0; i
< conf
->raid_disks
; i
++)
5153 seq_printf (seq
, "%s",
5154 conf
->disks
[i
].rdev
&&
5155 test_bit(In_sync
, &conf
->disks
[i
].rdev
->flags
) ? "U" : "_");
5156 seq_printf (seq
, "]");
5158 seq_printf (seq
, "\n");
5159 printall(seq
, conf
);
5163 static void print_raid5_conf (raid5_conf_t
*conf
)
5166 struct disk_info
*tmp
;
5168 printk("RAID5 conf printout:\n");
5170 printk("(conf==NULL)\n");
5173 printk(" --- rd:%d wd:%d\n", conf
->raid_disks
,
5174 conf
->raid_disks
- conf
->mddev
->degraded
);
5176 for (i
= 0; i
< conf
->raid_disks
; i
++) {
5177 char b
[BDEVNAME_SIZE
];
5178 tmp
= conf
->disks
+ i
;
5180 printk(" disk %d, o:%d, dev:%s\n",
5181 i
, !test_bit(Faulty
, &tmp
->rdev
->flags
),
5182 bdevname(tmp
->rdev
->bdev
,b
));
5186 static int raid5_spare_active(mddev_t
*mddev
)
5189 raid5_conf_t
*conf
= mddev
->private;
5190 struct disk_info
*tmp
;
5192 for (i
= 0; i
< conf
->raid_disks
; i
++) {
5193 tmp
= conf
->disks
+ i
;
5195 && !test_bit(Faulty
, &tmp
->rdev
->flags
)
5196 && !test_and_set_bit(In_sync
, &tmp
->rdev
->flags
)) {
5197 unsigned long flags
;
5198 spin_lock_irqsave(&conf
->device_lock
, flags
);
5200 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
5203 print_raid5_conf(conf
);
5207 static int raid5_remove_disk(mddev_t
*mddev
, int number
)
5209 raid5_conf_t
*conf
= mddev
->private;
5212 struct disk_info
*p
= conf
->disks
+ number
;
5214 print_raid5_conf(conf
);
5217 if (number
>= conf
->raid_disks
&&
5218 conf
->reshape_progress
== MaxSector
)
5219 clear_bit(In_sync
, &rdev
->flags
);
5221 if (test_bit(In_sync
, &rdev
->flags
) ||
5222 atomic_read(&rdev
->nr_pending
)) {
5226 /* Only remove non-faulty devices if recovery
5229 if (!test_bit(Faulty
, &rdev
->flags
) &&
5230 mddev
->degraded
<= conf
->max_degraded
&&
5231 number
< conf
->raid_disks
) {
5237 if (atomic_read(&rdev
->nr_pending
)) {
5238 /* lost the race, try later */
5245 print_raid5_conf(conf
);
5249 static int raid5_add_disk(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
5251 raid5_conf_t
*conf
= mddev
->private;
5254 struct disk_info
*p
;
5256 int last
= conf
->raid_disks
- 1;
5258 if (mddev
->degraded
> conf
->max_degraded
)
5259 /* no point adding a device */
5262 if (rdev
->raid_disk
>= 0)
5263 first
= last
= rdev
->raid_disk
;
5266 * find the disk ... but prefer rdev->saved_raid_disk
5269 if (rdev
->saved_raid_disk
>= 0 &&
5270 rdev
->saved_raid_disk
>= first
&&
5271 conf
->disks
[rdev
->saved_raid_disk
].rdev
== NULL
)
5272 disk
= rdev
->saved_raid_disk
;
5275 for ( ; disk
<= last
; disk
++)
5276 if ((p
=conf
->disks
+ disk
)->rdev
== NULL
) {
5277 clear_bit(In_sync
, &rdev
->flags
);
5278 rdev
->raid_disk
= disk
;
5280 if (rdev
->saved_raid_disk
!= disk
)
5282 rcu_assign_pointer(p
->rdev
, rdev
);
5285 print_raid5_conf(conf
);
5289 static int raid5_resize(mddev_t
*mddev
, sector_t sectors
)
5291 /* no resync is happening, and there is enough space
5292 * on all devices, so we can resize.
5293 * We need to make sure resync covers any new space.
5294 * If the array is shrinking we should possibly wait until
5295 * any io in the removed space completes, but it hardly seems
5298 sectors
&= ~((sector_t
)mddev
->chunk_sectors
- 1);
5299 md_set_array_sectors(mddev
, raid5_size(mddev
, sectors
,
5300 mddev
->raid_disks
));
5301 if (mddev
->array_sectors
>
5302 raid5_size(mddev
, sectors
, mddev
->raid_disks
))
5304 set_capacity(mddev
->gendisk
, mddev
->array_sectors
);
5306 revalidate_disk(mddev
->gendisk
);
5307 if (sectors
> mddev
->dev_sectors
&& mddev
->recovery_cp
== MaxSector
) {
5308 mddev
->recovery_cp
= mddev
->dev_sectors
;
5309 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
5311 mddev
->dev_sectors
= sectors
;
5312 mddev
->resync_max_sectors
= sectors
;
5316 static int check_stripe_cache(mddev_t
*mddev
)
5318 /* Can only proceed if there are plenty of stripe_heads.
5319 * We need a minimum of one full stripe,, and for sensible progress
5320 * it is best to have about 4 times that.
5321 * If we require 4 times, then the default 256 4K stripe_heads will
5322 * allow for chunk sizes up to 256K, which is probably OK.
5323 * If the chunk size is greater, user-space should request more
5324 * stripe_heads first.
5326 raid5_conf_t
*conf
= mddev
->private;
5327 if (((mddev
->chunk_sectors
<< 9) / STRIPE_SIZE
) * 4
5328 > conf
->max_nr_stripes
||
5329 ((mddev
->new_chunk_sectors
<< 9) / STRIPE_SIZE
) * 4
5330 > conf
->max_nr_stripes
) {
5331 printk(KERN_WARNING
"raid5: reshape: not enough stripes. Needed %lu\n",
5332 ((max(mddev
->chunk_sectors
, mddev
->new_chunk_sectors
) << 9)
5339 static int check_reshape(mddev_t
*mddev
)
5341 raid5_conf_t
*conf
= mddev
->private;
5343 if (mddev
->delta_disks
== 0 &&
5344 mddev
->new_layout
== mddev
->layout
&&
5345 mddev
->new_chunk_sectors
== mddev
->chunk_sectors
)
5346 return 0; /* nothing to do */
5348 /* Cannot grow a bitmap yet */
5350 if (mddev
->degraded
> conf
->max_degraded
)
5352 if (mddev
->delta_disks
< 0) {
5353 /* We might be able to shrink, but the devices must
5354 * be made bigger first.
5355 * For raid6, 4 is the minimum size.
5356 * Otherwise 2 is the minimum
5359 if (mddev
->level
== 6)
5361 if (mddev
->raid_disks
+ mddev
->delta_disks
< min
)
5365 if (!check_stripe_cache(mddev
))
5368 return resize_stripes(conf
, conf
->raid_disks
+ mddev
->delta_disks
);
5371 static int raid5_start_reshape(mddev_t
*mddev
)
5373 raid5_conf_t
*conf
= mddev
->private;
5376 int added_devices
= 0;
5377 unsigned long flags
;
5379 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
))
5382 if (!check_stripe_cache(mddev
))
5385 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
5386 if (rdev
->raid_disk
< 0 &&
5387 !test_bit(Faulty
, &rdev
->flags
))
5390 if (spares
- mddev
->degraded
< mddev
->delta_disks
- conf
->max_degraded
)
5391 /* Not enough devices even to make a degraded array
5396 /* Refuse to reduce size of the array. Any reductions in
5397 * array size must be through explicit setting of array_size
5400 if (raid5_size(mddev
, 0, conf
->raid_disks
+ mddev
->delta_disks
)
5401 < mddev
->array_sectors
) {
5402 printk(KERN_ERR
"md: %s: array size must be reduced "
5403 "before number of disks\n", mdname(mddev
));
5407 atomic_set(&conf
->reshape_stripes
, 0);
5408 spin_lock_irq(&conf
->device_lock
);
5409 conf
->previous_raid_disks
= conf
->raid_disks
;
5410 conf
->raid_disks
+= mddev
->delta_disks
;
5411 conf
->prev_chunk_sectors
= conf
->chunk_sectors
;
5412 conf
->chunk_sectors
= mddev
->new_chunk_sectors
;
5413 conf
->prev_algo
= conf
->algorithm
;
5414 conf
->algorithm
= mddev
->new_layout
;
5415 if (mddev
->delta_disks
< 0)
5416 conf
->reshape_progress
= raid5_size(mddev
, 0, 0);
5418 conf
->reshape_progress
= 0;
5419 conf
->reshape_safe
= conf
->reshape_progress
;
5421 spin_unlock_irq(&conf
->device_lock
);
5423 /* Add some new drives, as many as will fit.
5424 * We know there are enough to make the newly sized array work.
5426 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
5427 if (rdev
->raid_disk
< 0 &&
5428 !test_bit(Faulty
, &rdev
->flags
)) {
5429 if (raid5_add_disk(mddev
, rdev
) == 0) {
5431 if (rdev
->raid_disk
>= conf
->previous_raid_disks
) {
5432 set_bit(In_sync
, &rdev
->flags
);
5435 rdev
->recovery_offset
= 0;
5436 sprintf(nm
, "rd%d", rdev
->raid_disk
);
5437 if (sysfs_create_link(&mddev
->kobj
,
5440 "raid5: failed to create "
5441 " link %s for %s\n",
5447 /* When a reshape changes the number of devices, ->degraded
5448 * is measured against the large of the pre and post number of
5450 if (mddev
->delta_disks
> 0) {
5451 spin_lock_irqsave(&conf
->device_lock
, flags
);
5452 mddev
->degraded
+= (conf
->raid_disks
- conf
->previous_raid_disks
)
5454 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
5456 mddev
->raid_disks
= conf
->raid_disks
;
5457 mddev
->reshape_position
= conf
->reshape_progress
;
5458 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
5460 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
5461 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
5462 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
5463 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
5464 mddev
->sync_thread
= md_register_thread(md_do_sync
, mddev
,
5466 if (!mddev
->sync_thread
) {
5467 mddev
->recovery
= 0;
5468 spin_lock_irq(&conf
->device_lock
);
5469 mddev
->raid_disks
= conf
->raid_disks
= conf
->previous_raid_disks
;
5470 conf
->reshape_progress
= MaxSector
;
5471 spin_unlock_irq(&conf
->device_lock
);
5474 conf
->reshape_checkpoint
= jiffies
;
5475 md_wakeup_thread(mddev
->sync_thread
);
5476 md_new_event(mddev
);
5480 /* This is called from the reshape thread and should make any
5481 * changes needed in 'conf'
5483 static void end_reshape(raid5_conf_t
*conf
)
5486 if (!test_bit(MD_RECOVERY_INTR
, &conf
->mddev
->recovery
)) {
5488 spin_lock_irq(&conf
->device_lock
);
5489 conf
->previous_raid_disks
= conf
->raid_disks
;
5490 conf
->reshape_progress
= MaxSector
;
5491 spin_unlock_irq(&conf
->device_lock
);
5492 wake_up(&conf
->wait_for_overlap
);
5494 /* read-ahead size must cover two whole stripes, which is
5495 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
5498 int data_disks
= conf
->raid_disks
- conf
->max_degraded
;
5499 int stripe
= data_disks
* ((conf
->chunk_sectors
<< 9)
5501 if (conf
->mddev
->queue
->backing_dev_info
.ra_pages
< 2 * stripe
)
5502 conf
->mddev
->queue
->backing_dev_info
.ra_pages
= 2 * stripe
;
5507 /* This is called from the raid5d thread with mddev_lock held.
5508 * It makes config changes to the device.
5510 static void raid5_finish_reshape(mddev_t
*mddev
)
5512 raid5_conf_t
*conf
= mddev
->private;
5514 if (!test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
)) {
5516 if (mddev
->delta_disks
> 0) {
5517 md_set_array_sectors(mddev
, raid5_size(mddev
, 0, 0));
5518 set_capacity(mddev
->gendisk
, mddev
->array_sectors
);
5520 revalidate_disk(mddev
->gendisk
);
5523 mddev
->degraded
= conf
->raid_disks
;
5524 for (d
= 0; d
< conf
->raid_disks
; d
++)
5525 if (conf
->disks
[d
].rdev
&&
5527 &conf
->disks
[d
].rdev
->flags
))
5529 for (d
= conf
->raid_disks
;
5530 d
< conf
->raid_disks
- mddev
->delta_disks
;
5532 mdk_rdev_t
*rdev
= conf
->disks
[d
].rdev
;
5533 if (rdev
&& raid5_remove_disk(mddev
, d
) == 0) {
5535 sprintf(nm
, "rd%d", rdev
->raid_disk
);
5536 sysfs_remove_link(&mddev
->kobj
, nm
);
5537 rdev
->raid_disk
= -1;
5541 mddev
->layout
= conf
->algorithm
;
5542 mddev
->chunk_sectors
= conf
->chunk_sectors
;
5543 mddev
->reshape_position
= MaxSector
;
5544 mddev
->delta_disks
= 0;
5548 static void raid5_quiesce(mddev_t
*mddev
, int state
)
5550 raid5_conf_t
*conf
= mddev
->private;
5553 case 2: /* resume for a suspend */
5554 wake_up(&conf
->wait_for_overlap
);
5557 case 1: /* stop all writes */
5558 spin_lock_irq(&conf
->device_lock
);
5559 /* '2' tells resync/reshape to pause so that all
5560 * active stripes can drain
5563 wait_event_lock_irq(conf
->wait_for_stripe
,
5564 atomic_read(&conf
->active_stripes
) == 0 &&
5565 atomic_read(&conf
->active_aligned_reads
) == 0,
5566 conf
->device_lock
, /* nothing */);
5568 spin_unlock_irq(&conf
->device_lock
);
5569 /* allow reshape to continue */
5570 wake_up(&conf
->wait_for_overlap
);
5573 case 0: /* re-enable writes */
5574 spin_lock_irq(&conf
->device_lock
);
5576 wake_up(&conf
->wait_for_stripe
);
5577 wake_up(&conf
->wait_for_overlap
);
5578 spin_unlock_irq(&conf
->device_lock
);
5584 static void *raid5_takeover_raid1(mddev_t
*mddev
)
5588 if (mddev
->raid_disks
!= 2 ||
5589 mddev
->degraded
> 1)
5590 return ERR_PTR(-EINVAL
);
5592 /* Should check if there are write-behind devices? */
5594 chunksect
= 64*2; /* 64K by default */
5596 /* The array must be an exact multiple of chunksize */
5597 while (chunksect
&& (mddev
->array_sectors
& (chunksect
-1)))
5600 if ((chunksect
<<9) < STRIPE_SIZE
)
5601 /* array size does not allow a suitable chunk size */
5602 return ERR_PTR(-EINVAL
);
5604 mddev
->new_level
= 5;
5605 mddev
->new_layout
= ALGORITHM_LEFT_SYMMETRIC
;
5606 mddev
->new_chunk_sectors
= chunksect
;
5608 return setup_conf(mddev
);
5611 static void *raid5_takeover_raid6(mddev_t
*mddev
)
5615 switch (mddev
->layout
) {
5616 case ALGORITHM_LEFT_ASYMMETRIC_6
:
5617 new_layout
= ALGORITHM_LEFT_ASYMMETRIC
;
5619 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
5620 new_layout
= ALGORITHM_RIGHT_ASYMMETRIC
;
5622 case ALGORITHM_LEFT_SYMMETRIC_6
:
5623 new_layout
= ALGORITHM_LEFT_SYMMETRIC
;
5625 case ALGORITHM_RIGHT_SYMMETRIC_6
:
5626 new_layout
= ALGORITHM_RIGHT_SYMMETRIC
;
5628 case ALGORITHM_PARITY_0_6
:
5629 new_layout
= ALGORITHM_PARITY_0
;
5631 case ALGORITHM_PARITY_N
:
5632 new_layout
= ALGORITHM_PARITY_N
;
5635 return ERR_PTR(-EINVAL
);
5637 mddev
->new_level
= 5;
5638 mddev
->new_layout
= new_layout
;
5639 mddev
->delta_disks
= -1;
5640 mddev
->raid_disks
-= 1;
5641 return setup_conf(mddev
);
5645 static int raid5_check_reshape(mddev_t
*mddev
)
5647 /* For a 2-drive array, the layout and chunk size can be changed
5648 * immediately as not restriping is needed.
5649 * For larger arrays we record the new value - after validation
5650 * to be used by a reshape pass.
5652 raid5_conf_t
*conf
= mddev
->private;
5653 int new_chunk
= mddev
->new_chunk_sectors
;
5655 if (mddev
->new_layout
>= 0 && !algorithm_valid_raid5(mddev
->new_layout
))
5657 if (new_chunk
> 0) {
5658 if (!is_power_of_2(new_chunk
))
5660 if (new_chunk
< (PAGE_SIZE
>>9))
5662 if (mddev
->array_sectors
& (new_chunk
-1))
5663 /* not factor of array size */
5667 /* They look valid */
5669 if (mddev
->raid_disks
== 2) {
5670 /* can make the change immediately */
5671 if (mddev
->new_layout
>= 0) {
5672 conf
->algorithm
= mddev
->new_layout
;
5673 mddev
->layout
= mddev
->new_layout
;
5675 if (new_chunk
> 0) {
5676 conf
->chunk_sectors
= new_chunk
;
5677 mddev
->chunk_sectors
= new_chunk
;
5679 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
5680 md_wakeup_thread(mddev
->thread
);
5682 return check_reshape(mddev
);
5685 static int raid6_check_reshape(mddev_t
*mddev
)
5687 int new_chunk
= mddev
->new_chunk_sectors
;
5689 if (mddev
->new_layout
>= 0 && !algorithm_valid_raid6(mddev
->new_layout
))
5691 if (new_chunk
> 0) {
5692 if (!is_power_of_2(new_chunk
))
5694 if (new_chunk
< (PAGE_SIZE
>> 9))
5696 if (mddev
->array_sectors
& (new_chunk
-1))
5697 /* not factor of array size */
5701 /* They look valid */
5702 return check_reshape(mddev
);
5705 static void *raid5_takeover(mddev_t
*mddev
)
5707 /* raid5 can take over:
5708 * raid0 - if all devices are the same - make it a raid4 layout
5709 * raid1 - if there are two drives. We need to know the chunk size
5710 * raid4 - trivial - just use a raid4 layout.
5711 * raid6 - Providing it is a *_6 layout
5714 if (mddev
->level
== 1)
5715 return raid5_takeover_raid1(mddev
);
5716 if (mddev
->level
== 4) {
5717 mddev
->new_layout
= ALGORITHM_PARITY_N
;
5718 mddev
->new_level
= 5;
5719 return setup_conf(mddev
);
5721 if (mddev
->level
== 6)
5722 return raid5_takeover_raid6(mddev
);
5724 return ERR_PTR(-EINVAL
);
5728 static struct mdk_personality raid5_personality
;
5730 static void *raid6_takeover(mddev_t
*mddev
)
5732 /* Currently can only take over a raid5. We map the
5733 * personality to an equivalent raid6 personality
5734 * with the Q block at the end.
5738 if (mddev
->pers
!= &raid5_personality
)
5739 return ERR_PTR(-EINVAL
);
5740 if (mddev
->degraded
> 1)
5741 return ERR_PTR(-EINVAL
);
5742 if (mddev
->raid_disks
> 253)
5743 return ERR_PTR(-EINVAL
);
5744 if (mddev
->raid_disks
< 3)
5745 return ERR_PTR(-EINVAL
);
5747 switch (mddev
->layout
) {
5748 case ALGORITHM_LEFT_ASYMMETRIC
:
5749 new_layout
= ALGORITHM_LEFT_ASYMMETRIC_6
;
5751 case ALGORITHM_RIGHT_ASYMMETRIC
:
5752 new_layout
= ALGORITHM_RIGHT_ASYMMETRIC_6
;
5754 case ALGORITHM_LEFT_SYMMETRIC
:
5755 new_layout
= ALGORITHM_LEFT_SYMMETRIC_6
;
5757 case ALGORITHM_RIGHT_SYMMETRIC
:
5758 new_layout
= ALGORITHM_RIGHT_SYMMETRIC_6
;
5760 case ALGORITHM_PARITY_0
:
5761 new_layout
= ALGORITHM_PARITY_0_6
;
5763 case ALGORITHM_PARITY_N
:
5764 new_layout
= ALGORITHM_PARITY_N
;
5767 return ERR_PTR(-EINVAL
);
5769 mddev
->new_level
= 6;
5770 mddev
->new_layout
= new_layout
;
5771 mddev
->delta_disks
= 1;
5772 mddev
->raid_disks
+= 1;
5773 return setup_conf(mddev
);
5777 static struct mdk_personality raid6_personality
=
5781 .owner
= THIS_MODULE
,
5782 .make_request
= make_request
,
5786 .error_handler
= error
,
5787 .hot_add_disk
= raid5_add_disk
,
5788 .hot_remove_disk
= raid5_remove_disk
,
5789 .spare_active
= raid5_spare_active
,
5790 .sync_request
= sync_request
,
5791 .resize
= raid5_resize
,
5793 .check_reshape
= raid6_check_reshape
,
5794 .start_reshape
= raid5_start_reshape
,
5795 .finish_reshape
= raid5_finish_reshape
,
5796 .quiesce
= raid5_quiesce
,
5797 .takeover
= raid6_takeover
,
5799 static struct mdk_personality raid5_personality
=
5803 .owner
= THIS_MODULE
,
5804 .make_request
= make_request
,
5808 .error_handler
= error
,
5809 .hot_add_disk
= raid5_add_disk
,
5810 .hot_remove_disk
= raid5_remove_disk
,
5811 .spare_active
= raid5_spare_active
,
5812 .sync_request
= sync_request
,
5813 .resize
= raid5_resize
,
5815 .check_reshape
= raid5_check_reshape
,
5816 .start_reshape
= raid5_start_reshape
,
5817 .finish_reshape
= raid5_finish_reshape
,
5818 .quiesce
= raid5_quiesce
,
5819 .takeover
= raid5_takeover
,
5822 static struct mdk_personality raid4_personality
=
5826 .owner
= THIS_MODULE
,
5827 .make_request
= make_request
,
5831 .error_handler
= error
,
5832 .hot_add_disk
= raid5_add_disk
,
5833 .hot_remove_disk
= raid5_remove_disk
,
5834 .spare_active
= raid5_spare_active
,
5835 .sync_request
= sync_request
,
5836 .resize
= raid5_resize
,
5838 .check_reshape
= raid5_check_reshape
,
5839 .start_reshape
= raid5_start_reshape
,
5840 .finish_reshape
= raid5_finish_reshape
,
5841 .quiesce
= raid5_quiesce
,
5844 static int __init
raid5_init(void)
5846 register_md_personality(&raid6_personality
);
5847 register_md_personality(&raid5_personality
);
5848 register_md_personality(&raid4_personality
);
5852 static void raid5_exit(void)
5854 unregister_md_personality(&raid6_personality
);
5855 unregister_md_personality(&raid5_personality
);
5856 unregister_md_personality(&raid4_personality
);
5859 module_init(raid5_init
);
5860 module_exit(raid5_exit
);
5861 MODULE_LICENSE("GPL");
5862 MODULE_ALIAS("md-personality-4"); /* RAID5 */
5863 MODULE_ALIAS("md-raid5");
5864 MODULE_ALIAS("md-raid4");
5865 MODULE_ALIAS("md-level-5");
5866 MODULE_ALIAS("md-level-4");
5867 MODULE_ALIAS("md-personality-8"); /* RAID6 */
5868 MODULE_ALIAS("md-raid6");
5869 MODULE_ALIAS("md-level-6");
5871 /* This used to be two separate modules, they were: */
5872 MODULE_ALIAS("raid5");
5873 MODULE_ALIAS("raid6");