2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
5 * Copyright (C) 2002, 2003 H. Peter Anvin
7 * RAID-4/5/6 management functions.
8 * Thanks to Penguin Computing for making the RAID-6 development possible
9 * by donating a test server!
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 * The sequencing for updating the bitmap reliably is a little
25 * subtle (and I got it wrong the first time) so it deserves some
28 * We group bitmap updates into batches. Each batch has a number.
29 * We may write out several batches at once, but that isn't very important.
30 * conf->bm_write is the number of the last batch successfully written.
31 * conf->bm_flush is the number of the last batch that was closed to
33 * When we discover that we will need to write to any block in a stripe
34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
35 * the number of the batch it will be in. This is bm_flush+1.
36 * When we are ready to do a write, if that batch hasn't been written yet,
37 * we plug the array and queue the stripe for later.
38 * When an unplug happens, we increment bm_flush, thus closing the current
40 * When we notice that bm_flush > bm_write, we write out all pending updates
41 * to the bitmap, and advance bm_write to where bm_flush was.
42 * This may occasionally write a bit out twice, but is sure never to
46 #include <linux/blkdev.h>
47 #include <linux/kthread.h>
48 #include <linux/raid/pq.h>
49 #include <linux/async_tx.h>
50 #include <linux/async.h>
51 #include <linux/seq_file.h>
52 #include <linux/cpu.h>
61 #define NR_STRIPES 256
62 #define STRIPE_SIZE PAGE_SIZE
63 #define STRIPE_SHIFT (PAGE_SHIFT - 9)
64 #define STRIPE_SECTORS (STRIPE_SIZE>>9)
65 #define IO_THRESHOLD 1
66 #define BYPASS_THRESHOLD 1
67 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
68 #define HASH_MASK (NR_HASH - 1)
70 #define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
72 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
73 * order without overlap. There may be several bio's per stripe+device, and
74 * a bio could span several devices.
75 * When walking this list for a particular stripe+device, we must never proceed
76 * beyond a bio that extends past this device, as the next bio might no longer
78 * This macro is used to determine the 'next' bio in the list, given the sector
79 * of the current stripe+device
81 #define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
83 * The following can be used to debug the driver
85 #define RAID5_PARANOIA 1
86 #if RAID5_PARANOIA && defined(CONFIG_SMP)
87 # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
89 # define CHECK_DEVLOCK()
97 #define printk_rl(args...) ((void) (printk_ratelimit() && printk(args)))
100 * We maintain a biased count of active stripes in the bottom 16 bits of
101 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
103 static inline int raid5_bi_phys_segments(struct bio
*bio
)
105 return bio
->bi_phys_segments
& 0xffff;
108 static inline int raid5_bi_hw_segments(struct bio
*bio
)
110 return (bio
->bi_phys_segments
>> 16) & 0xffff;
113 static inline int raid5_dec_bi_phys_segments(struct bio
*bio
)
115 --bio
->bi_phys_segments
;
116 return raid5_bi_phys_segments(bio
);
119 static inline int raid5_dec_bi_hw_segments(struct bio
*bio
)
121 unsigned short val
= raid5_bi_hw_segments(bio
);
124 bio
->bi_phys_segments
= (val
<< 16) | raid5_bi_phys_segments(bio
);
128 static inline void raid5_set_bi_hw_segments(struct bio
*bio
, unsigned int cnt
)
130 bio
->bi_phys_segments
= raid5_bi_phys_segments(bio
) || (cnt
<< 16);
133 /* Find first data disk in a raid6 stripe */
134 static inline int raid6_d0(struct stripe_head
*sh
)
137 /* ddf always start from first device */
139 /* md starts just after Q block */
140 if (sh
->qd_idx
== sh
->disks
- 1)
143 return sh
->qd_idx
+ 1;
145 static inline int raid6_next_disk(int disk
, int raid_disks
)
148 return (disk
< raid_disks
) ? disk
: 0;
151 /* When walking through the disks in a raid5, starting at raid6_d0,
152 * We need to map each disk to a 'slot', where the data disks are slot
153 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
154 * is raid_disks-1. This help does that mapping.
156 static int raid6_idx_to_slot(int idx
, struct stripe_head
*sh
,
157 int *count
, int syndrome_disks
)
163 if (idx
== sh
->pd_idx
)
164 return syndrome_disks
;
165 if (idx
== sh
->qd_idx
)
166 return syndrome_disks
+ 1;
172 static void return_io(struct bio
*return_bi
)
174 struct bio
*bi
= return_bi
;
177 return_bi
= bi
->bi_next
;
185 static void print_raid5_conf (raid5_conf_t
*conf
);
187 static int stripe_operations_active(struct stripe_head
*sh
)
189 return sh
->check_state
|| sh
->reconstruct_state
||
190 test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
) ||
191 test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
194 static void __release_stripe(raid5_conf_t
*conf
, struct stripe_head
*sh
)
196 if (atomic_dec_and_test(&sh
->count
)) {
197 BUG_ON(!list_empty(&sh
->lru
));
198 BUG_ON(atomic_read(&conf
->active_stripes
)==0);
199 if (test_bit(STRIPE_HANDLE
, &sh
->state
)) {
200 if (test_bit(STRIPE_DELAYED
, &sh
->state
)) {
201 list_add_tail(&sh
->lru
, &conf
->delayed_list
);
202 blk_plug_device(conf
->mddev
->queue
);
203 } else if (test_bit(STRIPE_BIT_DELAY
, &sh
->state
) &&
204 sh
->bm_seq
- conf
->seq_write
> 0) {
205 list_add_tail(&sh
->lru
, &conf
->bitmap_list
);
206 blk_plug_device(conf
->mddev
->queue
);
208 clear_bit(STRIPE_BIT_DELAY
, &sh
->state
);
209 list_add_tail(&sh
->lru
, &conf
->handle_list
);
211 md_wakeup_thread(conf
->mddev
->thread
);
213 BUG_ON(stripe_operations_active(sh
));
214 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
215 atomic_dec(&conf
->preread_active_stripes
);
216 if (atomic_read(&conf
->preread_active_stripes
) < IO_THRESHOLD
)
217 md_wakeup_thread(conf
->mddev
->thread
);
219 atomic_dec(&conf
->active_stripes
);
220 if (!test_bit(STRIPE_EXPANDING
, &sh
->state
)) {
221 list_add_tail(&sh
->lru
, &conf
->inactive_list
);
222 wake_up(&conf
->wait_for_stripe
);
223 if (conf
->retry_read_aligned
)
224 md_wakeup_thread(conf
->mddev
->thread
);
230 static void release_stripe(struct stripe_head
*sh
)
232 raid5_conf_t
*conf
= sh
->raid_conf
;
235 spin_lock_irqsave(&conf
->device_lock
, flags
);
236 __release_stripe(conf
, sh
);
237 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
240 static inline void remove_hash(struct stripe_head
*sh
)
242 pr_debug("remove_hash(), stripe %llu\n",
243 (unsigned long long)sh
->sector
);
245 hlist_del_init(&sh
->hash
);
248 static inline void insert_hash(raid5_conf_t
*conf
, struct stripe_head
*sh
)
250 struct hlist_head
*hp
= stripe_hash(conf
, sh
->sector
);
252 pr_debug("insert_hash(), stripe %llu\n",
253 (unsigned long long)sh
->sector
);
256 hlist_add_head(&sh
->hash
, hp
);
260 /* find an idle stripe, make sure it is unhashed, and return it. */
261 static struct stripe_head
*get_free_stripe(raid5_conf_t
*conf
)
263 struct stripe_head
*sh
= NULL
;
264 struct list_head
*first
;
267 if (list_empty(&conf
->inactive_list
))
269 first
= conf
->inactive_list
.next
;
270 sh
= list_entry(first
, struct stripe_head
, lru
);
271 list_del_init(first
);
273 atomic_inc(&conf
->active_stripes
);
278 static void shrink_buffers(struct stripe_head
*sh
, int num
)
283 for (i
=0; i
<num
; i
++) {
287 sh
->dev
[i
].page
= NULL
;
292 static int grow_buffers(struct stripe_head
*sh
, int num
)
296 for (i
=0; i
<num
; i
++) {
299 if (!(page
= alloc_page(GFP_KERNEL
))) {
302 sh
->dev
[i
].page
= page
;
307 static void raid5_build_block(struct stripe_head
*sh
, int i
, int previous
);
308 static void stripe_set_idx(sector_t stripe
, raid5_conf_t
*conf
, int previous
,
309 struct stripe_head
*sh
);
311 static void init_stripe(struct stripe_head
*sh
, sector_t sector
, int previous
)
313 raid5_conf_t
*conf
= sh
->raid_conf
;
316 BUG_ON(atomic_read(&sh
->count
) != 0);
317 BUG_ON(test_bit(STRIPE_HANDLE
, &sh
->state
));
318 BUG_ON(stripe_operations_active(sh
));
321 pr_debug("init_stripe called, stripe %llu\n",
322 (unsigned long long)sh
->sector
);
326 sh
->generation
= conf
->generation
- previous
;
327 sh
->disks
= previous
? conf
->previous_raid_disks
: conf
->raid_disks
;
329 stripe_set_idx(sector
, conf
, previous
, sh
);
333 for (i
= sh
->disks
; i
--; ) {
334 struct r5dev
*dev
= &sh
->dev
[i
];
336 if (dev
->toread
|| dev
->read
|| dev
->towrite
|| dev
->written
||
337 test_bit(R5_LOCKED
, &dev
->flags
)) {
338 printk(KERN_ERR
"sector=%llx i=%d %p %p %p %p %d\n",
339 (unsigned long long)sh
->sector
, i
, dev
->toread
,
340 dev
->read
, dev
->towrite
, dev
->written
,
341 test_bit(R5_LOCKED
, &dev
->flags
));
345 raid5_build_block(sh
, i
, previous
);
347 insert_hash(conf
, sh
);
350 static struct stripe_head
*__find_stripe(raid5_conf_t
*conf
, sector_t sector
,
353 struct stripe_head
*sh
;
354 struct hlist_node
*hn
;
357 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector
);
358 hlist_for_each_entry(sh
, hn
, stripe_hash(conf
, sector
), hash
)
359 if (sh
->sector
== sector
&& sh
->generation
== generation
)
361 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector
);
365 static void unplug_slaves(mddev_t
*mddev
);
366 static void raid5_unplug_device(struct request_queue
*q
);
368 static struct stripe_head
*
369 get_active_stripe(raid5_conf_t
*conf
, sector_t sector
,
370 int previous
, int noblock
, int noquiesce
)
372 struct stripe_head
*sh
;
374 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector
);
376 spin_lock_irq(&conf
->device_lock
);
379 wait_event_lock_irq(conf
->wait_for_stripe
,
380 conf
->quiesce
== 0 || noquiesce
,
381 conf
->device_lock
, /* nothing */);
382 sh
= __find_stripe(conf
, sector
, conf
->generation
- previous
);
384 if (!conf
->inactive_blocked
)
385 sh
= get_free_stripe(conf
);
386 if (noblock
&& sh
== NULL
)
389 conf
->inactive_blocked
= 1;
390 wait_event_lock_irq(conf
->wait_for_stripe
,
391 !list_empty(&conf
->inactive_list
) &&
392 (atomic_read(&conf
->active_stripes
)
393 < (conf
->max_nr_stripes
*3/4)
394 || !conf
->inactive_blocked
),
396 raid5_unplug_device(conf
->mddev
->queue
)
398 conf
->inactive_blocked
= 0;
400 init_stripe(sh
, sector
, previous
);
402 if (atomic_read(&sh
->count
)) {
403 BUG_ON(!list_empty(&sh
->lru
)
404 && !test_bit(STRIPE_EXPANDING
, &sh
->state
));
406 if (!test_bit(STRIPE_HANDLE
, &sh
->state
))
407 atomic_inc(&conf
->active_stripes
);
408 if (list_empty(&sh
->lru
) &&
409 !test_bit(STRIPE_EXPANDING
, &sh
->state
))
411 list_del_init(&sh
->lru
);
414 } while (sh
== NULL
);
417 atomic_inc(&sh
->count
);
419 spin_unlock_irq(&conf
->device_lock
);
424 raid5_end_read_request(struct bio
*bi
, int error
);
426 raid5_end_write_request(struct bio
*bi
, int error
);
428 static void ops_run_io(struct stripe_head
*sh
, struct stripe_head_state
*s
)
430 raid5_conf_t
*conf
= sh
->raid_conf
;
431 int i
, disks
= sh
->disks
;
435 for (i
= disks
; i
--; ) {
439 if (test_and_clear_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
))
441 else if (test_and_clear_bit(R5_Wantread
, &sh
->dev
[i
].flags
))
446 bi
= &sh
->dev
[i
].req
;
450 bi
->bi_end_io
= raid5_end_write_request
;
452 bi
->bi_end_io
= raid5_end_read_request
;
455 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
456 if (rdev
&& test_bit(Faulty
, &rdev
->flags
))
459 atomic_inc(&rdev
->nr_pending
);
463 if (s
->syncing
|| s
->expanding
|| s
->expanded
)
464 md_sync_acct(rdev
->bdev
, STRIPE_SECTORS
);
466 set_bit(STRIPE_IO_STARTED
, &sh
->state
);
468 bi
->bi_bdev
= rdev
->bdev
;
469 pr_debug("%s: for %llu schedule op %ld on disc %d\n",
470 __func__
, (unsigned long long)sh
->sector
,
472 atomic_inc(&sh
->count
);
473 bi
->bi_sector
= sh
->sector
+ rdev
->data_offset
;
474 bi
->bi_flags
= 1 << BIO_UPTODATE
;
478 bi
->bi_io_vec
= &sh
->dev
[i
].vec
;
479 bi
->bi_io_vec
[0].bv_len
= STRIPE_SIZE
;
480 bi
->bi_io_vec
[0].bv_offset
= 0;
481 bi
->bi_size
= STRIPE_SIZE
;
484 test_bit(R5_ReWrite
, &sh
->dev
[i
].flags
))
485 atomic_add(STRIPE_SECTORS
,
486 &rdev
->corrected_errors
);
487 generic_make_request(bi
);
490 set_bit(STRIPE_DEGRADED
, &sh
->state
);
491 pr_debug("skip op %ld on disc %d for sector %llu\n",
492 bi
->bi_rw
, i
, (unsigned long long)sh
->sector
);
493 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
494 set_bit(STRIPE_HANDLE
, &sh
->state
);
499 static struct dma_async_tx_descriptor
*
500 async_copy_data(int frombio
, struct bio
*bio
, struct page
*page
,
501 sector_t sector
, struct dma_async_tx_descriptor
*tx
)
504 struct page
*bio_page
;
507 struct async_submit_ctl submit
;
508 enum async_tx_flags flags
= 0;
510 if (bio
->bi_sector
>= sector
)
511 page_offset
= (signed)(bio
->bi_sector
- sector
) * 512;
513 page_offset
= (signed)(sector
- bio
->bi_sector
) * -512;
516 flags
|= ASYNC_TX_FENCE
;
517 init_async_submit(&submit
, flags
, tx
, NULL
, NULL
, NULL
);
519 bio_for_each_segment(bvl
, bio
, i
) {
520 int len
= bio_iovec_idx(bio
, i
)->bv_len
;
524 if (page_offset
< 0) {
525 b_offset
= -page_offset
;
526 page_offset
+= b_offset
;
530 if (len
> 0 && page_offset
+ len
> STRIPE_SIZE
)
531 clen
= STRIPE_SIZE
- page_offset
;
536 b_offset
+= bio_iovec_idx(bio
, i
)->bv_offset
;
537 bio_page
= bio_iovec_idx(bio
, i
)->bv_page
;
539 tx
= async_memcpy(page
, bio_page
, page_offset
,
540 b_offset
, clen
, &submit
);
542 tx
= async_memcpy(bio_page
, page
, b_offset
,
543 page_offset
, clen
, &submit
);
545 /* chain the operations */
546 submit
.depend_tx
= tx
;
548 if (clen
< len
) /* hit end of page */
556 static void ops_complete_biofill(void *stripe_head_ref
)
558 struct stripe_head
*sh
= stripe_head_ref
;
559 struct bio
*return_bi
= NULL
;
560 raid5_conf_t
*conf
= sh
->raid_conf
;
563 pr_debug("%s: stripe %llu\n", __func__
,
564 (unsigned long long)sh
->sector
);
566 /* clear completed biofills */
567 spin_lock_irq(&conf
->device_lock
);
568 for (i
= sh
->disks
; i
--; ) {
569 struct r5dev
*dev
= &sh
->dev
[i
];
571 /* acknowledge completion of a biofill operation */
572 /* and check if we need to reply to a read request,
573 * new R5_Wantfill requests are held off until
574 * !STRIPE_BIOFILL_RUN
576 if (test_and_clear_bit(R5_Wantfill
, &dev
->flags
)) {
577 struct bio
*rbi
, *rbi2
;
582 while (rbi
&& rbi
->bi_sector
<
583 dev
->sector
+ STRIPE_SECTORS
) {
584 rbi2
= r5_next_bio(rbi
, dev
->sector
);
585 if (!raid5_dec_bi_phys_segments(rbi
)) {
586 rbi
->bi_next
= return_bi
;
593 spin_unlock_irq(&conf
->device_lock
);
594 clear_bit(STRIPE_BIOFILL_RUN
, &sh
->state
);
596 return_io(return_bi
);
598 set_bit(STRIPE_HANDLE
, &sh
->state
);
602 static void ops_run_biofill(struct stripe_head
*sh
)
604 struct dma_async_tx_descriptor
*tx
= NULL
;
605 raid5_conf_t
*conf
= sh
->raid_conf
;
606 struct async_submit_ctl submit
;
609 pr_debug("%s: stripe %llu\n", __func__
,
610 (unsigned long long)sh
->sector
);
612 for (i
= sh
->disks
; i
--; ) {
613 struct r5dev
*dev
= &sh
->dev
[i
];
614 if (test_bit(R5_Wantfill
, &dev
->flags
)) {
616 spin_lock_irq(&conf
->device_lock
);
617 dev
->read
= rbi
= dev
->toread
;
619 spin_unlock_irq(&conf
->device_lock
);
620 while (rbi
&& rbi
->bi_sector
<
621 dev
->sector
+ STRIPE_SECTORS
) {
622 tx
= async_copy_data(0, rbi
, dev
->page
,
624 rbi
= r5_next_bio(rbi
, dev
->sector
);
629 atomic_inc(&sh
->count
);
630 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, ops_complete_biofill
, sh
, NULL
);
631 async_trigger_callback(&submit
);
634 static void mark_target_uptodate(struct stripe_head
*sh
, int target
)
641 tgt
= &sh
->dev
[target
];
642 set_bit(R5_UPTODATE
, &tgt
->flags
);
643 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
644 clear_bit(R5_Wantcompute
, &tgt
->flags
);
647 static void ops_complete_compute(void *stripe_head_ref
)
649 struct stripe_head
*sh
= stripe_head_ref
;
651 pr_debug("%s: stripe %llu\n", __func__
,
652 (unsigned long long)sh
->sector
);
654 /* mark the computed target(s) as uptodate */
655 mark_target_uptodate(sh
, sh
->ops
.target
);
656 mark_target_uptodate(sh
, sh
->ops
.target2
);
658 clear_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
659 if (sh
->check_state
== check_state_compute_run
)
660 sh
->check_state
= check_state_compute_result
;
661 set_bit(STRIPE_HANDLE
, &sh
->state
);
665 /* return a pointer to the address conversion region of the scribble buffer */
666 static addr_conv_t
*to_addr_conv(struct stripe_head
*sh
,
667 struct raid5_percpu
*percpu
)
669 return percpu
->scribble
+ sizeof(struct page
*) * (sh
->disks
+ 2);
672 static struct dma_async_tx_descriptor
*
673 ops_run_compute5(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
675 int disks
= sh
->disks
;
676 struct page
**xor_srcs
= percpu
->scribble
;
677 int target
= sh
->ops
.target
;
678 struct r5dev
*tgt
= &sh
->dev
[target
];
679 struct page
*xor_dest
= tgt
->page
;
681 struct dma_async_tx_descriptor
*tx
;
682 struct async_submit_ctl submit
;
685 pr_debug("%s: stripe %llu block: %d\n",
686 __func__
, (unsigned long long)sh
->sector
, target
);
687 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
689 for (i
= disks
; i
--; )
691 xor_srcs
[count
++] = sh
->dev
[i
].page
;
693 atomic_inc(&sh
->count
);
695 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
, NULL
,
696 ops_complete_compute
, sh
, to_addr_conv(sh
, percpu
));
697 if (unlikely(count
== 1))
698 tx
= async_memcpy(xor_dest
, xor_srcs
[0], 0, 0, STRIPE_SIZE
, &submit
);
700 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
, &submit
);
705 /* set_syndrome_sources - populate source buffers for gen_syndrome
706 * @srcs - (struct page *) array of size sh->disks
707 * @sh - stripe_head to parse
709 * Populates srcs in proper layout order for the stripe and returns the
710 * 'count' of sources to be used in a call to async_gen_syndrome. The P
711 * destination buffer is recorded in srcs[count] and the Q destination
712 * is recorded in srcs[count+1]].
714 static int set_syndrome_sources(struct page
**srcs
, struct stripe_head
*sh
)
716 int disks
= sh
->disks
;
717 int syndrome_disks
= sh
->ddf_layout
? disks
: (disks
- 2);
718 int d0_idx
= raid6_d0(sh
);
722 for (i
= 0; i
< disks
; i
++)
728 int slot
= raid6_idx_to_slot(i
, sh
, &count
, syndrome_disks
);
730 srcs
[slot
] = sh
->dev
[i
].page
;
731 i
= raid6_next_disk(i
, disks
);
732 } while (i
!= d0_idx
);
734 return syndrome_disks
;
737 static struct dma_async_tx_descriptor
*
738 ops_run_compute6_1(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
740 int disks
= sh
->disks
;
741 struct page
**blocks
= percpu
->scribble
;
743 int qd_idx
= sh
->qd_idx
;
744 struct dma_async_tx_descriptor
*tx
;
745 struct async_submit_ctl submit
;
751 if (sh
->ops
.target
< 0)
752 target
= sh
->ops
.target2
;
753 else if (sh
->ops
.target2
< 0)
754 target
= sh
->ops
.target
;
756 /* we should only have one valid target */
759 pr_debug("%s: stripe %llu block: %d\n",
760 __func__
, (unsigned long long)sh
->sector
, target
);
762 tgt
= &sh
->dev
[target
];
763 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
766 atomic_inc(&sh
->count
);
768 if (target
== qd_idx
) {
769 count
= set_syndrome_sources(blocks
, sh
);
770 blocks
[count
] = NULL
; /* regenerating p is not necessary */
771 BUG_ON(blocks
[count
+1] != dest
); /* q should already be set */
772 init_async_submit(&submit
, ASYNC_TX_FENCE
, NULL
,
773 ops_complete_compute
, sh
,
774 to_addr_conv(sh
, percpu
));
775 tx
= async_gen_syndrome(blocks
, 0, count
+2, STRIPE_SIZE
, &submit
);
777 /* Compute any data- or p-drive using XOR */
779 for (i
= disks
; i
-- ; ) {
780 if (i
== target
|| i
== qd_idx
)
782 blocks
[count
++] = sh
->dev
[i
].page
;
785 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
,
786 NULL
, ops_complete_compute
, sh
,
787 to_addr_conv(sh
, percpu
));
788 tx
= async_xor(dest
, blocks
, 0, count
, STRIPE_SIZE
, &submit
);
794 static struct dma_async_tx_descriptor
*
795 ops_run_compute6_2(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
797 int i
, count
, disks
= sh
->disks
;
798 int syndrome_disks
= sh
->ddf_layout
? disks
: disks
-2;
799 int d0_idx
= raid6_d0(sh
);
800 int faila
= -1, failb
= -1;
801 int target
= sh
->ops
.target
;
802 int target2
= sh
->ops
.target2
;
803 struct r5dev
*tgt
= &sh
->dev
[target
];
804 struct r5dev
*tgt2
= &sh
->dev
[target2
];
805 struct dma_async_tx_descriptor
*tx
;
806 struct page
**blocks
= percpu
->scribble
;
807 struct async_submit_ctl submit
;
809 pr_debug("%s: stripe %llu block1: %d block2: %d\n",
810 __func__
, (unsigned long long)sh
->sector
, target
, target2
);
811 BUG_ON(target
< 0 || target2
< 0);
812 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
813 BUG_ON(!test_bit(R5_Wantcompute
, &tgt2
->flags
));
815 /* we need to open-code set_syndrome_sources to handle the
816 * slot number conversion for 'faila' and 'failb'
818 for (i
= 0; i
< disks
; i
++)
823 int slot
= raid6_idx_to_slot(i
, sh
, &count
, syndrome_disks
);
825 blocks
[slot
] = sh
->dev
[i
].page
;
831 i
= raid6_next_disk(i
, disks
);
832 } while (i
!= d0_idx
);
834 BUG_ON(faila
== failb
);
837 pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
838 __func__
, (unsigned long long)sh
->sector
, faila
, failb
);
840 atomic_inc(&sh
->count
);
842 if (failb
== syndrome_disks
+1) {
843 /* Q disk is one of the missing disks */
844 if (faila
== syndrome_disks
) {
845 /* Missing P+Q, just recompute */
846 init_async_submit(&submit
, ASYNC_TX_FENCE
, NULL
,
847 ops_complete_compute
, sh
,
848 to_addr_conv(sh
, percpu
));
849 return async_gen_syndrome(blocks
, 0, syndrome_disks
+2,
850 STRIPE_SIZE
, &submit
);
854 int qd_idx
= sh
->qd_idx
;
856 /* Missing D+Q: recompute D from P, then recompute Q */
857 if (target
== qd_idx
)
858 data_target
= target2
;
860 data_target
= target
;
863 for (i
= disks
; i
-- ; ) {
864 if (i
== data_target
|| i
== qd_idx
)
866 blocks
[count
++] = sh
->dev
[i
].page
;
868 dest
= sh
->dev
[data_target
].page
;
869 init_async_submit(&submit
,
870 ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
,
872 to_addr_conv(sh
, percpu
));
873 tx
= async_xor(dest
, blocks
, 0, count
, STRIPE_SIZE
,
876 count
= set_syndrome_sources(blocks
, sh
);
877 init_async_submit(&submit
, ASYNC_TX_FENCE
, tx
,
878 ops_complete_compute
, sh
,
879 to_addr_conv(sh
, percpu
));
880 return async_gen_syndrome(blocks
, 0, count
+2,
881 STRIPE_SIZE
, &submit
);
884 init_async_submit(&submit
, ASYNC_TX_FENCE
, NULL
,
885 ops_complete_compute
, sh
,
886 to_addr_conv(sh
, percpu
));
887 if (failb
== syndrome_disks
) {
888 /* We're missing D+P. */
889 return async_raid6_datap_recov(syndrome_disks
+2,
893 /* We're missing D+D. */
894 return async_raid6_2data_recov(syndrome_disks
+2,
895 STRIPE_SIZE
, faila
, failb
,
902 static void ops_complete_prexor(void *stripe_head_ref
)
904 struct stripe_head
*sh
= stripe_head_ref
;
906 pr_debug("%s: stripe %llu\n", __func__
,
907 (unsigned long long)sh
->sector
);
910 static struct dma_async_tx_descriptor
*
911 ops_run_prexor(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
912 struct dma_async_tx_descriptor
*tx
)
914 int disks
= sh
->disks
;
915 struct page
**xor_srcs
= percpu
->scribble
;
916 int count
= 0, pd_idx
= sh
->pd_idx
, i
;
917 struct async_submit_ctl submit
;
919 /* existing parity data subtracted */
920 struct page
*xor_dest
= xor_srcs
[count
++] = sh
->dev
[pd_idx
].page
;
922 pr_debug("%s: stripe %llu\n", __func__
,
923 (unsigned long long)sh
->sector
);
925 for (i
= disks
; i
--; ) {
926 struct r5dev
*dev
= &sh
->dev
[i
];
927 /* Only process blocks that are known to be uptodate */
928 if (test_bit(R5_Wantdrain
, &dev
->flags
))
929 xor_srcs
[count
++] = dev
->page
;
932 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_DROP_DST
, tx
,
933 ops_complete_prexor
, sh
, to_addr_conv(sh
, percpu
));
934 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
, &submit
);
939 static struct dma_async_tx_descriptor
*
940 ops_run_biodrain(struct stripe_head
*sh
, struct dma_async_tx_descriptor
*tx
)
942 int disks
= sh
->disks
;
945 pr_debug("%s: stripe %llu\n", __func__
,
946 (unsigned long long)sh
->sector
);
948 for (i
= disks
; i
--; ) {
949 struct r5dev
*dev
= &sh
->dev
[i
];
952 if (test_and_clear_bit(R5_Wantdrain
, &dev
->flags
)) {
955 spin_lock(&sh
->lock
);
956 chosen
= dev
->towrite
;
958 BUG_ON(dev
->written
);
959 wbi
= dev
->written
= chosen
;
960 spin_unlock(&sh
->lock
);
962 while (wbi
&& wbi
->bi_sector
<
963 dev
->sector
+ STRIPE_SECTORS
) {
964 tx
= async_copy_data(1, wbi
, dev
->page
,
966 wbi
= r5_next_bio(wbi
, dev
->sector
);
974 static void ops_complete_reconstruct(void *stripe_head_ref
)
976 struct stripe_head
*sh
= stripe_head_ref
;
977 int disks
= sh
->disks
;
978 int pd_idx
= sh
->pd_idx
;
979 int qd_idx
= sh
->qd_idx
;
982 pr_debug("%s: stripe %llu\n", __func__
,
983 (unsigned long long)sh
->sector
);
985 for (i
= disks
; i
--; ) {
986 struct r5dev
*dev
= &sh
->dev
[i
];
988 if (dev
->written
|| i
== pd_idx
|| i
== qd_idx
)
989 set_bit(R5_UPTODATE
, &dev
->flags
);
992 if (sh
->reconstruct_state
== reconstruct_state_drain_run
)
993 sh
->reconstruct_state
= reconstruct_state_drain_result
;
994 else if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_run
)
995 sh
->reconstruct_state
= reconstruct_state_prexor_drain_result
;
997 BUG_ON(sh
->reconstruct_state
!= reconstruct_state_run
);
998 sh
->reconstruct_state
= reconstruct_state_result
;
1001 set_bit(STRIPE_HANDLE
, &sh
->state
);
1006 ops_run_reconstruct5(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
1007 struct dma_async_tx_descriptor
*tx
)
1009 int disks
= sh
->disks
;
1010 struct page
**xor_srcs
= percpu
->scribble
;
1011 struct async_submit_ctl submit
;
1012 int count
= 0, pd_idx
= sh
->pd_idx
, i
;
1013 struct page
*xor_dest
;
1015 unsigned long flags
;
1017 pr_debug("%s: stripe %llu\n", __func__
,
1018 (unsigned long long)sh
->sector
);
1020 /* check if prexor is active which means only process blocks
1021 * that are part of a read-modify-write (written)
1023 if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_run
) {
1025 xor_dest
= xor_srcs
[count
++] = sh
->dev
[pd_idx
].page
;
1026 for (i
= disks
; i
--; ) {
1027 struct r5dev
*dev
= &sh
->dev
[i
];
1029 xor_srcs
[count
++] = dev
->page
;
1032 xor_dest
= sh
->dev
[pd_idx
].page
;
1033 for (i
= disks
; i
--; ) {
1034 struct r5dev
*dev
= &sh
->dev
[i
];
1036 xor_srcs
[count
++] = dev
->page
;
1040 /* 1/ if we prexor'd then the dest is reused as a source
1041 * 2/ if we did not prexor then we are redoing the parity
1042 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
1043 * for the synchronous xor case
1045 flags
= ASYNC_TX_ACK
|
1046 (prexor
? ASYNC_TX_XOR_DROP_DST
: ASYNC_TX_XOR_ZERO_DST
);
1048 atomic_inc(&sh
->count
);
1050 init_async_submit(&submit
, flags
, tx
, ops_complete_reconstruct
, sh
,
1051 to_addr_conv(sh
, percpu
));
1052 if (unlikely(count
== 1))
1053 tx
= async_memcpy(xor_dest
, xor_srcs
[0], 0, 0, STRIPE_SIZE
, &submit
);
1055 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
, &submit
);
1059 ops_run_reconstruct6(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
1060 struct dma_async_tx_descriptor
*tx
)
1062 struct async_submit_ctl submit
;
1063 struct page
**blocks
= percpu
->scribble
;
1066 pr_debug("%s: stripe %llu\n", __func__
, (unsigned long long)sh
->sector
);
1068 count
= set_syndrome_sources(blocks
, sh
);
1070 atomic_inc(&sh
->count
);
1072 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, ops_complete_reconstruct
,
1073 sh
, to_addr_conv(sh
, percpu
));
1074 async_gen_syndrome(blocks
, 0, count
+2, STRIPE_SIZE
, &submit
);
1077 static void ops_complete_check(void *stripe_head_ref
)
1079 struct stripe_head
*sh
= stripe_head_ref
;
1081 pr_debug("%s: stripe %llu\n", __func__
,
1082 (unsigned long long)sh
->sector
);
1084 sh
->check_state
= check_state_check_result
;
1085 set_bit(STRIPE_HANDLE
, &sh
->state
);
1089 static void ops_run_check_p(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
1091 int disks
= sh
->disks
;
1092 int pd_idx
= sh
->pd_idx
;
1093 int qd_idx
= sh
->qd_idx
;
1094 struct page
*xor_dest
;
1095 struct page
**xor_srcs
= percpu
->scribble
;
1096 struct dma_async_tx_descriptor
*tx
;
1097 struct async_submit_ctl submit
;
1101 pr_debug("%s: stripe %llu\n", __func__
,
1102 (unsigned long long)sh
->sector
);
1105 xor_dest
= sh
->dev
[pd_idx
].page
;
1106 xor_srcs
[count
++] = xor_dest
;
1107 for (i
= disks
; i
--; ) {
1108 if (i
== pd_idx
|| i
== qd_idx
)
1110 xor_srcs
[count
++] = sh
->dev
[i
].page
;
1113 init_async_submit(&submit
, 0, NULL
, NULL
, NULL
,
1114 to_addr_conv(sh
, percpu
));
1115 tx
= async_xor_val(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
,
1116 &sh
->ops
.zero_sum_result
, &submit
);
1118 atomic_inc(&sh
->count
);
1119 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, ops_complete_check
, sh
, NULL
);
1120 tx
= async_trigger_callback(&submit
);
1123 static void ops_run_check_pq(struct stripe_head
*sh
, struct raid5_percpu
*percpu
, int checkp
)
1125 struct page
**srcs
= percpu
->scribble
;
1126 struct async_submit_ctl submit
;
1129 pr_debug("%s: stripe %llu checkp: %d\n", __func__
,
1130 (unsigned long long)sh
->sector
, checkp
);
1132 count
= set_syndrome_sources(srcs
, sh
);
1136 atomic_inc(&sh
->count
);
1137 init_async_submit(&submit
, ASYNC_TX_ACK
, NULL
, ops_complete_check
,
1138 sh
, to_addr_conv(sh
, percpu
));
1139 async_syndrome_val(srcs
, 0, count
+2, STRIPE_SIZE
,
1140 &sh
->ops
.zero_sum_result
, percpu
->spare_page
, &submit
);
1143 static void __raid_run_ops(struct stripe_head
*sh
, unsigned long ops_request
)
1145 int overlap_clear
= 0, i
, disks
= sh
->disks
;
1146 struct dma_async_tx_descriptor
*tx
= NULL
;
1147 raid5_conf_t
*conf
= sh
->raid_conf
;
1148 int level
= conf
->level
;
1149 struct raid5_percpu
*percpu
;
1153 percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
1154 if (test_bit(STRIPE_OP_BIOFILL
, &ops_request
)) {
1155 ops_run_biofill(sh
);
1159 if (test_bit(STRIPE_OP_COMPUTE_BLK
, &ops_request
)) {
1161 tx
= ops_run_compute5(sh
, percpu
);
1163 if (sh
->ops
.target2
< 0 || sh
->ops
.target
< 0)
1164 tx
= ops_run_compute6_1(sh
, percpu
);
1166 tx
= ops_run_compute6_2(sh
, percpu
);
1168 /* terminate the chain if reconstruct is not set to be run */
1169 if (tx
&& !test_bit(STRIPE_OP_RECONSTRUCT
, &ops_request
))
1173 if (test_bit(STRIPE_OP_PREXOR
, &ops_request
))
1174 tx
= ops_run_prexor(sh
, percpu
, tx
);
1176 if (test_bit(STRIPE_OP_BIODRAIN
, &ops_request
)) {
1177 tx
= ops_run_biodrain(sh
, tx
);
1181 if (test_bit(STRIPE_OP_RECONSTRUCT
, &ops_request
)) {
1183 ops_run_reconstruct5(sh
, percpu
, tx
);
1185 ops_run_reconstruct6(sh
, percpu
, tx
);
1188 if (test_bit(STRIPE_OP_CHECK
, &ops_request
)) {
1189 if (sh
->check_state
== check_state_run
)
1190 ops_run_check_p(sh
, percpu
);
1191 else if (sh
->check_state
== check_state_run_q
)
1192 ops_run_check_pq(sh
, percpu
, 0);
1193 else if (sh
->check_state
== check_state_run_pq
)
1194 ops_run_check_pq(sh
, percpu
, 1);
1200 for (i
= disks
; i
--; ) {
1201 struct r5dev
*dev
= &sh
->dev
[i
];
1202 if (test_and_clear_bit(R5_Overlap
, &dev
->flags
))
1203 wake_up(&sh
->raid_conf
->wait_for_overlap
);
1208 #ifdef CONFIG_MULTICORE_RAID456
1209 static void async_run_ops(void *param
, async_cookie_t cookie
)
1211 struct stripe_head
*sh
= param
;
1212 unsigned long ops_request
= sh
->ops
.request
;
1214 clear_bit_unlock(STRIPE_OPS_REQ_PENDING
, &sh
->state
);
1215 wake_up(&sh
->ops
.wait_for_ops
);
1217 __raid_run_ops(sh
, ops_request
);
1221 static void raid_run_ops(struct stripe_head
*sh
, unsigned long ops_request
)
1223 /* since handle_stripe can be called outside of raid5d context
1224 * we need to ensure sh->ops.request is de-staged before another
1227 wait_event(sh
->ops
.wait_for_ops
,
1228 !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING
, &sh
->state
));
1229 sh
->ops
.request
= ops_request
;
1231 atomic_inc(&sh
->count
);
1232 async_schedule(async_run_ops
, sh
);
1235 #define raid_run_ops __raid_run_ops
1238 static int grow_one_stripe(raid5_conf_t
*conf
)
1240 struct stripe_head
*sh
;
1241 int disks
= max(conf
->raid_disks
, conf
->previous_raid_disks
);
1242 sh
= kmem_cache_alloc(conf
->slab_cache
, GFP_KERNEL
);
1245 memset(sh
, 0, sizeof(*sh
) + (disks
-1)*sizeof(struct r5dev
));
1246 sh
->raid_conf
= conf
;
1247 spin_lock_init(&sh
->lock
);
1248 #ifdef CONFIG_MULTICORE_RAID456
1249 init_waitqueue_head(&sh
->ops
.wait_for_ops
);
1252 if (grow_buffers(sh
, disks
)) {
1253 shrink_buffers(sh
, disks
);
1254 kmem_cache_free(conf
->slab_cache
, sh
);
1257 /* we just created an active stripe so... */
1258 atomic_set(&sh
->count
, 1);
1259 atomic_inc(&conf
->active_stripes
);
1260 INIT_LIST_HEAD(&sh
->lru
);
1265 static int grow_stripes(raid5_conf_t
*conf
, int num
)
1267 struct kmem_cache
*sc
;
1268 int devs
= max(conf
->raid_disks
, conf
->previous_raid_disks
);
1270 sprintf(conf
->cache_name
[0],
1271 "raid%d-%s", conf
->level
, mdname(conf
->mddev
));
1272 sprintf(conf
->cache_name
[1],
1273 "raid%d-%s-alt", conf
->level
, mdname(conf
->mddev
));
1274 conf
->active_name
= 0;
1275 sc
= kmem_cache_create(conf
->cache_name
[conf
->active_name
],
1276 sizeof(struct stripe_head
)+(devs
-1)*sizeof(struct r5dev
),
1280 conf
->slab_cache
= sc
;
1281 conf
->pool_size
= devs
;
1283 if (!grow_one_stripe(conf
))
1289 * scribble_len - return the required size of the scribble region
1290 * @num - total number of disks in the array
1292 * The size must be enough to contain:
1293 * 1/ a struct page pointer for each device in the array +2
1294 * 2/ room to convert each entry in (1) to its corresponding dma
1295 * (dma_map_page()) or page (page_address()) address.
1297 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
1298 * calculate over all devices (not just the data blocks), using zeros in place
1299 * of the P and Q blocks.
1301 static size_t scribble_len(int num
)
1305 len
= sizeof(struct page
*) * (num
+2) + sizeof(addr_conv_t
) * (num
+2);
1310 static int resize_stripes(raid5_conf_t
*conf
, int newsize
)
1312 /* Make all the stripes able to hold 'newsize' devices.
1313 * New slots in each stripe get 'page' set to a new page.
1315 * This happens in stages:
1316 * 1/ create a new kmem_cache and allocate the required number of
1318 * 2/ gather all the old stripe_heads and tranfer the pages across
1319 * to the new stripe_heads. This will have the side effect of
1320 * freezing the array as once all stripe_heads have been collected,
1321 * no IO will be possible. Old stripe heads are freed once their
1322 * pages have been transferred over, and the old kmem_cache is
1323 * freed when all stripes are done.
1324 * 3/ reallocate conf->disks to be suitable bigger. If this fails,
1325 * we simple return a failre status - no need to clean anything up.
1326 * 4/ allocate new pages for the new slots in the new stripe_heads.
1327 * If this fails, we don't bother trying the shrink the
1328 * stripe_heads down again, we just leave them as they are.
1329 * As each stripe_head is processed the new one is released into
1332 * Once step2 is started, we cannot afford to wait for a write,
1333 * so we use GFP_NOIO allocations.
1335 struct stripe_head
*osh
, *nsh
;
1336 LIST_HEAD(newstripes
);
1337 struct disk_info
*ndisks
;
1340 struct kmem_cache
*sc
;
1343 if (newsize
<= conf
->pool_size
)
1344 return 0; /* never bother to shrink */
1346 err
= md_allow_write(conf
->mddev
);
1351 sc
= kmem_cache_create(conf
->cache_name
[1-conf
->active_name
],
1352 sizeof(struct stripe_head
)+(newsize
-1)*sizeof(struct r5dev
),
1357 for (i
= conf
->max_nr_stripes
; i
; i
--) {
1358 nsh
= kmem_cache_alloc(sc
, GFP_KERNEL
);
1362 memset(nsh
, 0, sizeof(*nsh
) + (newsize
-1)*sizeof(struct r5dev
));
1364 nsh
->raid_conf
= conf
;
1365 spin_lock_init(&nsh
->lock
);
1366 #ifdef CONFIG_MULTICORE_RAID456
1367 init_waitqueue_head(&nsh
->ops
.wait_for_ops
);
1370 list_add(&nsh
->lru
, &newstripes
);
1373 /* didn't get enough, give up */
1374 while (!list_empty(&newstripes
)) {
1375 nsh
= list_entry(newstripes
.next
, struct stripe_head
, lru
);
1376 list_del(&nsh
->lru
);
1377 kmem_cache_free(sc
, nsh
);
1379 kmem_cache_destroy(sc
);
1382 /* Step 2 - Must use GFP_NOIO now.
1383 * OK, we have enough stripes, start collecting inactive
1384 * stripes and copying them over
1386 list_for_each_entry(nsh
, &newstripes
, lru
) {
1387 spin_lock_irq(&conf
->device_lock
);
1388 wait_event_lock_irq(conf
->wait_for_stripe
,
1389 !list_empty(&conf
->inactive_list
),
1391 unplug_slaves(conf
->mddev
)
1393 osh
= get_free_stripe(conf
);
1394 spin_unlock_irq(&conf
->device_lock
);
1395 atomic_set(&nsh
->count
, 1);
1396 for(i
=0; i
<conf
->pool_size
; i
++)
1397 nsh
->dev
[i
].page
= osh
->dev
[i
].page
;
1398 for( ; i
<newsize
; i
++)
1399 nsh
->dev
[i
].page
= NULL
;
1400 kmem_cache_free(conf
->slab_cache
, osh
);
1402 kmem_cache_destroy(conf
->slab_cache
);
1405 * At this point, we are holding all the stripes so the array
1406 * is completely stalled, so now is a good time to resize
1407 * conf->disks and the scribble region
1409 ndisks
= kzalloc(newsize
* sizeof(struct disk_info
), GFP_NOIO
);
1411 for (i
=0; i
<conf
->raid_disks
; i
++)
1412 ndisks
[i
] = conf
->disks
[i
];
1414 conf
->disks
= ndisks
;
1419 conf
->scribble_len
= scribble_len(newsize
);
1420 for_each_present_cpu(cpu
) {
1421 struct raid5_percpu
*percpu
;
1424 percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
1425 scribble
= kmalloc(conf
->scribble_len
, GFP_NOIO
);
1428 kfree(percpu
->scribble
);
1429 percpu
->scribble
= scribble
;
1437 /* Step 4, return new stripes to service */
1438 while(!list_empty(&newstripes
)) {
1439 nsh
= list_entry(newstripes
.next
, struct stripe_head
, lru
);
1440 list_del_init(&nsh
->lru
);
1442 for (i
=conf
->raid_disks
; i
< newsize
; i
++)
1443 if (nsh
->dev
[i
].page
== NULL
) {
1444 struct page
*p
= alloc_page(GFP_NOIO
);
1445 nsh
->dev
[i
].page
= p
;
1449 release_stripe(nsh
);
1451 /* critical section pass, GFP_NOIO no longer needed */
1453 conf
->slab_cache
= sc
;
1454 conf
->active_name
= 1-conf
->active_name
;
1455 conf
->pool_size
= newsize
;
1459 static int drop_one_stripe(raid5_conf_t
*conf
)
1461 struct stripe_head
*sh
;
1463 spin_lock_irq(&conf
->device_lock
);
1464 sh
= get_free_stripe(conf
);
1465 spin_unlock_irq(&conf
->device_lock
);
1468 BUG_ON(atomic_read(&sh
->count
));
1469 shrink_buffers(sh
, conf
->pool_size
);
1470 kmem_cache_free(conf
->slab_cache
, sh
);
1471 atomic_dec(&conf
->active_stripes
);
1475 static void shrink_stripes(raid5_conf_t
*conf
)
1477 while (drop_one_stripe(conf
))
1480 if (conf
->slab_cache
)
1481 kmem_cache_destroy(conf
->slab_cache
);
1482 conf
->slab_cache
= NULL
;
1485 static void raid5_end_read_request(struct bio
* bi
, int error
)
1487 struct stripe_head
*sh
= bi
->bi_private
;
1488 raid5_conf_t
*conf
= sh
->raid_conf
;
1489 int disks
= sh
->disks
, i
;
1490 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
1491 char b
[BDEVNAME_SIZE
];
1495 for (i
=0 ; i
<disks
; i
++)
1496 if (bi
== &sh
->dev
[i
].req
)
1499 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
1500 (unsigned long long)sh
->sector
, i
, atomic_read(&sh
->count
),
1508 set_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
);
1509 if (test_bit(R5_ReadError
, &sh
->dev
[i
].flags
)) {
1510 rdev
= conf
->disks
[i
].rdev
;
1511 printk_rl(KERN_INFO
"raid5:%s: read error corrected"
1512 " (%lu sectors at %llu on %s)\n",
1513 mdname(conf
->mddev
), STRIPE_SECTORS
,
1514 (unsigned long long)(sh
->sector
1515 + rdev
->data_offset
),
1516 bdevname(rdev
->bdev
, b
));
1517 clear_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
1518 clear_bit(R5_ReWrite
, &sh
->dev
[i
].flags
);
1520 if (atomic_read(&conf
->disks
[i
].rdev
->read_errors
))
1521 atomic_set(&conf
->disks
[i
].rdev
->read_errors
, 0);
1523 const char *bdn
= bdevname(conf
->disks
[i
].rdev
->bdev
, b
);
1525 rdev
= conf
->disks
[i
].rdev
;
1527 clear_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
);
1528 atomic_inc(&rdev
->read_errors
);
1529 if (conf
->mddev
->degraded
>= conf
->max_degraded
)
1530 printk_rl(KERN_WARNING
1531 "raid5:%s: read error not correctable "
1532 "(sector %llu on %s).\n",
1533 mdname(conf
->mddev
),
1534 (unsigned long long)(sh
->sector
1535 + rdev
->data_offset
),
1537 else if (test_bit(R5_ReWrite
, &sh
->dev
[i
].flags
))
1539 printk_rl(KERN_WARNING
1540 "raid5:%s: read error NOT corrected!! "
1541 "(sector %llu on %s).\n",
1542 mdname(conf
->mddev
),
1543 (unsigned long long)(sh
->sector
1544 + rdev
->data_offset
),
1546 else if (atomic_read(&rdev
->read_errors
)
1547 > conf
->max_nr_stripes
)
1549 "raid5:%s: Too many read errors, failing device %s.\n",
1550 mdname(conf
->mddev
), bdn
);
1554 set_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
1556 clear_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
1557 clear_bit(R5_ReWrite
, &sh
->dev
[i
].flags
);
1558 md_error(conf
->mddev
, rdev
);
1561 rdev_dec_pending(conf
->disks
[i
].rdev
, conf
->mddev
);
1562 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
1563 set_bit(STRIPE_HANDLE
, &sh
->state
);
1567 static void raid5_end_write_request(struct bio
*bi
, int error
)
1569 struct stripe_head
*sh
= bi
->bi_private
;
1570 raid5_conf_t
*conf
= sh
->raid_conf
;
1571 int disks
= sh
->disks
, i
;
1572 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
1574 for (i
=0 ; i
<disks
; i
++)
1575 if (bi
== &sh
->dev
[i
].req
)
1578 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
1579 (unsigned long long)sh
->sector
, i
, atomic_read(&sh
->count
),
1587 md_error(conf
->mddev
, conf
->disks
[i
].rdev
);
1589 rdev_dec_pending(conf
->disks
[i
].rdev
, conf
->mddev
);
1591 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
1592 set_bit(STRIPE_HANDLE
, &sh
->state
);
1597 static sector_t
compute_blocknr(struct stripe_head
*sh
, int i
, int previous
);
1599 static void raid5_build_block(struct stripe_head
*sh
, int i
, int previous
)
1601 struct r5dev
*dev
= &sh
->dev
[i
];
1603 bio_init(&dev
->req
);
1604 dev
->req
.bi_io_vec
= &dev
->vec
;
1606 dev
->req
.bi_max_vecs
++;
1607 dev
->vec
.bv_page
= dev
->page
;
1608 dev
->vec
.bv_len
= STRIPE_SIZE
;
1609 dev
->vec
.bv_offset
= 0;
1611 dev
->req
.bi_sector
= sh
->sector
;
1612 dev
->req
.bi_private
= sh
;
1615 dev
->sector
= compute_blocknr(sh
, i
, previous
);
1618 static void error(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1620 char b
[BDEVNAME_SIZE
];
1621 raid5_conf_t
*conf
= (raid5_conf_t
*) mddev
->private;
1622 pr_debug("raid5: error called\n");
1624 if (!test_bit(Faulty
, &rdev
->flags
)) {
1625 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
1626 if (test_and_clear_bit(In_sync
, &rdev
->flags
)) {
1627 unsigned long flags
;
1628 spin_lock_irqsave(&conf
->device_lock
, flags
);
1630 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1632 * if recovery was running, make sure it aborts.
1634 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
1636 set_bit(Faulty
, &rdev
->flags
);
1638 "raid5: Disk failure on %s, disabling device.\n"
1639 "raid5: Operation continuing on %d devices.\n",
1640 bdevname(rdev
->bdev
,b
), conf
->raid_disks
- mddev
->degraded
);
1645 * Input: a 'big' sector number,
1646 * Output: index of the data and parity disk, and the sector # in them.
1648 static sector_t
raid5_compute_sector(raid5_conf_t
*conf
, sector_t r_sector
,
1649 int previous
, int *dd_idx
,
1650 struct stripe_head
*sh
)
1652 sector_t stripe
, stripe2
;
1653 sector_t chunk_number
;
1654 unsigned int chunk_offset
;
1657 sector_t new_sector
;
1658 int algorithm
= previous
? conf
->prev_algo
1660 int sectors_per_chunk
= previous
? conf
->prev_chunk_sectors
1661 : conf
->chunk_sectors
;
1662 int raid_disks
= previous
? conf
->previous_raid_disks
1664 int data_disks
= raid_disks
- conf
->max_degraded
;
1666 /* First compute the information on this sector */
1669 * Compute the chunk number and the sector offset inside the chunk
1671 chunk_offset
= sector_div(r_sector
, sectors_per_chunk
);
1672 chunk_number
= r_sector
;
1675 * Compute the stripe number
1677 stripe
= chunk_number
;
1678 *dd_idx
= sector_div(stripe
, data_disks
);
1681 * Select the parity disk based on the user selected algorithm.
1683 pd_idx
= qd_idx
= ~0;
1684 switch(conf
->level
) {
1686 pd_idx
= data_disks
;
1689 switch (algorithm
) {
1690 case ALGORITHM_LEFT_ASYMMETRIC
:
1691 pd_idx
= data_disks
- sector_div(stripe2
, raid_disks
);
1692 if (*dd_idx
>= pd_idx
)
1695 case ALGORITHM_RIGHT_ASYMMETRIC
:
1696 pd_idx
= sector_div(stripe2
, raid_disks
);
1697 if (*dd_idx
>= pd_idx
)
1700 case ALGORITHM_LEFT_SYMMETRIC
:
1701 pd_idx
= data_disks
- sector_div(stripe2
, raid_disks
);
1702 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
1704 case ALGORITHM_RIGHT_SYMMETRIC
:
1705 pd_idx
= sector_div(stripe2
, raid_disks
);
1706 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
1708 case ALGORITHM_PARITY_0
:
1712 case ALGORITHM_PARITY_N
:
1713 pd_idx
= data_disks
;
1716 printk(KERN_ERR
"raid5: unsupported algorithm %d\n",
1723 switch (algorithm
) {
1724 case ALGORITHM_LEFT_ASYMMETRIC
:
1725 pd_idx
= raid_disks
- 1 - sector_div(stripe2
, raid_disks
);
1726 qd_idx
= pd_idx
+ 1;
1727 if (pd_idx
== raid_disks
-1) {
1728 (*dd_idx
)++; /* Q D D D P */
1730 } else if (*dd_idx
>= pd_idx
)
1731 (*dd_idx
) += 2; /* D D P Q D */
1733 case ALGORITHM_RIGHT_ASYMMETRIC
:
1734 pd_idx
= sector_div(stripe2
, raid_disks
);
1735 qd_idx
= pd_idx
+ 1;
1736 if (pd_idx
== raid_disks
-1) {
1737 (*dd_idx
)++; /* Q D D D P */
1739 } else if (*dd_idx
>= pd_idx
)
1740 (*dd_idx
) += 2; /* D D P Q D */
1742 case ALGORITHM_LEFT_SYMMETRIC
:
1743 pd_idx
= raid_disks
- 1 - sector_div(stripe2
, raid_disks
);
1744 qd_idx
= (pd_idx
+ 1) % raid_disks
;
1745 *dd_idx
= (pd_idx
+ 2 + *dd_idx
) % raid_disks
;
1747 case ALGORITHM_RIGHT_SYMMETRIC
:
1748 pd_idx
= sector_div(stripe2
, raid_disks
);
1749 qd_idx
= (pd_idx
+ 1) % raid_disks
;
1750 *dd_idx
= (pd_idx
+ 2 + *dd_idx
) % raid_disks
;
1753 case ALGORITHM_PARITY_0
:
1758 case ALGORITHM_PARITY_N
:
1759 pd_idx
= data_disks
;
1760 qd_idx
= data_disks
+ 1;
1763 case ALGORITHM_ROTATING_ZERO_RESTART
:
1764 /* Exactly the same as RIGHT_ASYMMETRIC, but or
1765 * of blocks for computing Q is different.
1767 pd_idx
= sector_div(stripe2
, raid_disks
);
1768 qd_idx
= pd_idx
+ 1;
1769 if (pd_idx
== raid_disks
-1) {
1770 (*dd_idx
)++; /* Q D D D P */
1772 } else if (*dd_idx
>= pd_idx
)
1773 (*dd_idx
) += 2; /* D D P Q D */
1777 case ALGORITHM_ROTATING_N_RESTART
:
1778 /* Same a left_asymmetric, by first stripe is
1779 * D D D P Q rather than
1783 pd_idx
= raid_disks
- 1 - sector_div(stripe2
, raid_disks
);
1784 qd_idx
= pd_idx
+ 1;
1785 if (pd_idx
== raid_disks
-1) {
1786 (*dd_idx
)++; /* Q D D D P */
1788 } else if (*dd_idx
>= pd_idx
)
1789 (*dd_idx
) += 2; /* D D P Q D */
1793 case ALGORITHM_ROTATING_N_CONTINUE
:
1794 /* Same as left_symmetric but Q is before P */
1795 pd_idx
= raid_disks
- 1 - sector_div(stripe2
, raid_disks
);
1796 qd_idx
= (pd_idx
+ raid_disks
- 1) % raid_disks
;
1797 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
1801 case ALGORITHM_LEFT_ASYMMETRIC_6
:
1802 /* RAID5 left_asymmetric, with Q on last device */
1803 pd_idx
= data_disks
- sector_div(stripe2
, raid_disks
-1);
1804 if (*dd_idx
>= pd_idx
)
1806 qd_idx
= raid_disks
- 1;
1809 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
1810 pd_idx
= sector_div(stripe2
, raid_disks
-1);
1811 if (*dd_idx
>= pd_idx
)
1813 qd_idx
= raid_disks
- 1;
1816 case ALGORITHM_LEFT_SYMMETRIC_6
:
1817 pd_idx
= data_disks
- sector_div(stripe2
, raid_disks
-1);
1818 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % (raid_disks
-1);
1819 qd_idx
= raid_disks
- 1;
1822 case ALGORITHM_RIGHT_SYMMETRIC_6
:
1823 pd_idx
= sector_div(stripe2
, raid_disks
-1);
1824 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % (raid_disks
-1);
1825 qd_idx
= raid_disks
- 1;
1828 case ALGORITHM_PARITY_0_6
:
1831 qd_idx
= raid_disks
- 1;
1836 printk(KERN_CRIT
"raid6: unsupported algorithm %d\n",
1844 sh
->pd_idx
= pd_idx
;
1845 sh
->qd_idx
= qd_idx
;
1846 sh
->ddf_layout
= ddf_layout
;
1849 * Finally, compute the new sector number
1851 new_sector
= (sector_t
)stripe
* sectors_per_chunk
+ chunk_offset
;
1856 static sector_t
compute_blocknr(struct stripe_head
*sh
, int i
, int previous
)
1858 raid5_conf_t
*conf
= sh
->raid_conf
;
1859 int raid_disks
= sh
->disks
;
1860 int data_disks
= raid_disks
- conf
->max_degraded
;
1861 sector_t new_sector
= sh
->sector
, check
;
1862 int sectors_per_chunk
= previous
? conf
->prev_chunk_sectors
1863 : conf
->chunk_sectors
;
1864 int algorithm
= previous
? conf
->prev_algo
1868 sector_t chunk_number
;
1869 int dummy1
, dd_idx
= i
;
1871 struct stripe_head sh2
;
1874 chunk_offset
= sector_div(new_sector
, sectors_per_chunk
);
1875 stripe
= new_sector
;
1877 if (i
== sh
->pd_idx
)
1879 switch(conf
->level
) {
1882 switch (algorithm
) {
1883 case ALGORITHM_LEFT_ASYMMETRIC
:
1884 case ALGORITHM_RIGHT_ASYMMETRIC
:
1888 case ALGORITHM_LEFT_SYMMETRIC
:
1889 case ALGORITHM_RIGHT_SYMMETRIC
:
1892 i
-= (sh
->pd_idx
+ 1);
1894 case ALGORITHM_PARITY_0
:
1897 case ALGORITHM_PARITY_N
:
1900 printk(KERN_ERR
"raid5: unsupported algorithm %d\n",
1906 if (i
== sh
->qd_idx
)
1907 return 0; /* It is the Q disk */
1908 switch (algorithm
) {
1909 case ALGORITHM_LEFT_ASYMMETRIC
:
1910 case ALGORITHM_RIGHT_ASYMMETRIC
:
1911 case ALGORITHM_ROTATING_ZERO_RESTART
:
1912 case ALGORITHM_ROTATING_N_RESTART
:
1913 if (sh
->pd_idx
== raid_disks
-1)
1914 i
--; /* Q D D D P */
1915 else if (i
> sh
->pd_idx
)
1916 i
-= 2; /* D D P Q D */
1918 case ALGORITHM_LEFT_SYMMETRIC
:
1919 case ALGORITHM_RIGHT_SYMMETRIC
:
1920 if (sh
->pd_idx
== raid_disks
-1)
1921 i
--; /* Q D D D P */
1926 i
-= (sh
->pd_idx
+ 2);
1929 case ALGORITHM_PARITY_0
:
1932 case ALGORITHM_PARITY_N
:
1934 case ALGORITHM_ROTATING_N_CONTINUE
:
1935 /* Like left_symmetric, but P is before Q */
1936 if (sh
->pd_idx
== 0)
1937 i
--; /* P D D D Q */
1942 i
-= (sh
->pd_idx
+ 1);
1945 case ALGORITHM_LEFT_ASYMMETRIC_6
:
1946 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
1950 case ALGORITHM_LEFT_SYMMETRIC_6
:
1951 case ALGORITHM_RIGHT_SYMMETRIC_6
:
1953 i
+= data_disks
+ 1;
1954 i
-= (sh
->pd_idx
+ 1);
1956 case ALGORITHM_PARITY_0_6
:
1960 printk(KERN_CRIT
"raid6: unsupported algorithm %d\n",
1967 chunk_number
= stripe
* data_disks
+ i
;
1968 r_sector
= chunk_number
* sectors_per_chunk
+ chunk_offset
;
1970 check
= raid5_compute_sector(conf
, r_sector
,
1971 previous
, &dummy1
, &sh2
);
1972 if (check
!= sh
->sector
|| dummy1
!= dd_idx
|| sh2
.pd_idx
!= sh
->pd_idx
1973 || sh2
.qd_idx
!= sh
->qd_idx
) {
1974 printk(KERN_ERR
"compute_blocknr: map not correct\n");
1982 schedule_reconstruction(struct stripe_head
*sh
, struct stripe_head_state
*s
,
1983 int rcw
, int expand
)
1985 int i
, pd_idx
= sh
->pd_idx
, disks
= sh
->disks
;
1986 raid5_conf_t
*conf
= sh
->raid_conf
;
1987 int level
= conf
->level
;
1990 /* if we are not expanding this is a proper write request, and
1991 * there will be bios with new data to be drained into the
1995 sh
->reconstruct_state
= reconstruct_state_drain_run
;
1996 set_bit(STRIPE_OP_BIODRAIN
, &s
->ops_request
);
1998 sh
->reconstruct_state
= reconstruct_state_run
;
2000 set_bit(STRIPE_OP_RECONSTRUCT
, &s
->ops_request
);
2002 for (i
= disks
; i
--; ) {
2003 struct r5dev
*dev
= &sh
->dev
[i
];
2006 set_bit(R5_LOCKED
, &dev
->flags
);
2007 set_bit(R5_Wantdrain
, &dev
->flags
);
2009 clear_bit(R5_UPTODATE
, &dev
->flags
);
2013 if (s
->locked
+ conf
->max_degraded
== disks
)
2014 if (!test_and_set_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2015 atomic_inc(&conf
->pending_full_writes
);
2018 BUG_ON(!(test_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
) ||
2019 test_bit(R5_Wantcompute
, &sh
->dev
[pd_idx
].flags
)));
2021 sh
->reconstruct_state
= reconstruct_state_prexor_drain_run
;
2022 set_bit(STRIPE_OP_PREXOR
, &s
->ops_request
);
2023 set_bit(STRIPE_OP_BIODRAIN
, &s
->ops_request
);
2024 set_bit(STRIPE_OP_RECONSTRUCT
, &s
->ops_request
);
2026 for (i
= disks
; i
--; ) {
2027 struct r5dev
*dev
= &sh
->dev
[i
];
2032 (test_bit(R5_UPTODATE
, &dev
->flags
) ||
2033 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2034 set_bit(R5_Wantdrain
, &dev
->flags
);
2035 set_bit(R5_LOCKED
, &dev
->flags
);
2036 clear_bit(R5_UPTODATE
, &dev
->flags
);
2042 /* keep the parity disk(s) locked while asynchronous operations
2045 set_bit(R5_LOCKED
, &sh
->dev
[pd_idx
].flags
);
2046 clear_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
);
2050 int qd_idx
= sh
->qd_idx
;
2051 struct r5dev
*dev
= &sh
->dev
[qd_idx
];
2053 set_bit(R5_LOCKED
, &dev
->flags
);
2054 clear_bit(R5_UPTODATE
, &dev
->flags
);
2058 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
2059 __func__
, (unsigned long long)sh
->sector
,
2060 s
->locked
, s
->ops_request
);
2064 * Each stripe/dev can have one or more bion attached.
2065 * toread/towrite point to the first in a chain.
2066 * The bi_next chain must be in order.
2068 static int add_stripe_bio(struct stripe_head
*sh
, struct bio
*bi
, int dd_idx
, int forwrite
)
2071 raid5_conf_t
*conf
= sh
->raid_conf
;
2074 pr_debug("adding bh b#%llu to stripe s#%llu\n",
2075 (unsigned long long)bi
->bi_sector
,
2076 (unsigned long long)sh
->sector
);
2079 spin_lock(&sh
->lock
);
2080 spin_lock_irq(&conf
->device_lock
);
2082 bip
= &sh
->dev
[dd_idx
].towrite
;
2083 if (*bip
== NULL
&& sh
->dev
[dd_idx
].written
== NULL
)
2086 bip
= &sh
->dev
[dd_idx
].toread
;
2087 while (*bip
&& (*bip
)->bi_sector
< bi
->bi_sector
) {
2088 if ((*bip
)->bi_sector
+ ((*bip
)->bi_size
>> 9) > bi
->bi_sector
)
2090 bip
= & (*bip
)->bi_next
;
2092 if (*bip
&& (*bip
)->bi_sector
< bi
->bi_sector
+ ((bi
->bi_size
)>>9))
2095 BUG_ON(*bip
&& bi
->bi_next
&& (*bip
) != bi
->bi_next
);
2099 bi
->bi_phys_segments
++;
2100 spin_unlock_irq(&conf
->device_lock
);
2101 spin_unlock(&sh
->lock
);
2103 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2104 (unsigned long long)bi
->bi_sector
,
2105 (unsigned long long)sh
->sector
, dd_idx
);
2107 if (conf
->mddev
->bitmap
&& firstwrite
) {
2108 bitmap_startwrite(conf
->mddev
->bitmap
, sh
->sector
,
2110 sh
->bm_seq
= conf
->seq_flush
+1;
2111 set_bit(STRIPE_BIT_DELAY
, &sh
->state
);
2115 /* check if page is covered */
2116 sector_t sector
= sh
->dev
[dd_idx
].sector
;
2117 for (bi
=sh
->dev
[dd_idx
].towrite
;
2118 sector
< sh
->dev
[dd_idx
].sector
+ STRIPE_SECTORS
&&
2119 bi
&& bi
->bi_sector
<= sector
;
2120 bi
= r5_next_bio(bi
, sh
->dev
[dd_idx
].sector
)) {
2121 if (bi
->bi_sector
+ (bi
->bi_size
>>9) >= sector
)
2122 sector
= bi
->bi_sector
+ (bi
->bi_size
>>9);
2124 if (sector
>= sh
->dev
[dd_idx
].sector
+ STRIPE_SECTORS
)
2125 set_bit(R5_OVERWRITE
, &sh
->dev
[dd_idx
].flags
);
2130 set_bit(R5_Overlap
, &sh
->dev
[dd_idx
].flags
);
2131 spin_unlock_irq(&conf
->device_lock
);
2132 spin_unlock(&sh
->lock
);
2136 static void end_reshape(raid5_conf_t
*conf
);
2138 static void stripe_set_idx(sector_t stripe
, raid5_conf_t
*conf
, int previous
,
2139 struct stripe_head
*sh
)
2141 int sectors_per_chunk
=
2142 previous
? conf
->prev_chunk_sectors
: conf
->chunk_sectors
;
2144 int chunk_offset
= sector_div(stripe
, sectors_per_chunk
);
2145 int disks
= previous
? conf
->previous_raid_disks
: conf
->raid_disks
;
2147 raid5_compute_sector(conf
,
2148 stripe
* (disks
- conf
->max_degraded
)
2149 *sectors_per_chunk
+ chunk_offset
,
2155 handle_failed_stripe(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2156 struct stripe_head_state
*s
, int disks
,
2157 struct bio
**return_bi
)
2160 for (i
= disks
; i
--; ) {
2164 if (test_bit(R5_ReadError
, &sh
->dev
[i
].flags
)) {
2167 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
2168 if (rdev
&& test_bit(In_sync
, &rdev
->flags
))
2169 /* multiple read failures in one stripe */
2170 md_error(conf
->mddev
, rdev
);
2173 spin_lock_irq(&conf
->device_lock
);
2174 /* fail all writes first */
2175 bi
= sh
->dev
[i
].towrite
;
2176 sh
->dev
[i
].towrite
= NULL
;
2182 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[i
].flags
))
2183 wake_up(&conf
->wait_for_overlap
);
2185 while (bi
&& bi
->bi_sector
<
2186 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
2187 struct bio
*nextbi
= r5_next_bio(bi
, sh
->dev
[i
].sector
);
2188 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2189 if (!raid5_dec_bi_phys_segments(bi
)) {
2190 md_write_end(conf
->mddev
);
2191 bi
->bi_next
= *return_bi
;
2196 /* and fail all 'written' */
2197 bi
= sh
->dev
[i
].written
;
2198 sh
->dev
[i
].written
= NULL
;
2199 if (bi
) bitmap_end
= 1;
2200 while (bi
&& bi
->bi_sector
<
2201 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
2202 struct bio
*bi2
= r5_next_bio(bi
, sh
->dev
[i
].sector
);
2203 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2204 if (!raid5_dec_bi_phys_segments(bi
)) {
2205 md_write_end(conf
->mddev
);
2206 bi
->bi_next
= *return_bi
;
2212 /* fail any reads if this device is non-operational and
2213 * the data has not reached the cache yet.
2215 if (!test_bit(R5_Wantfill
, &sh
->dev
[i
].flags
) &&
2216 (!test_bit(R5_Insync
, &sh
->dev
[i
].flags
) ||
2217 test_bit(R5_ReadError
, &sh
->dev
[i
].flags
))) {
2218 bi
= sh
->dev
[i
].toread
;
2219 sh
->dev
[i
].toread
= NULL
;
2220 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[i
].flags
))
2221 wake_up(&conf
->wait_for_overlap
);
2222 if (bi
) s
->to_read
--;
2223 while (bi
&& bi
->bi_sector
<
2224 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
2225 struct bio
*nextbi
=
2226 r5_next_bio(bi
, sh
->dev
[i
].sector
);
2227 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2228 if (!raid5_dec_bi_phys_segments(bi
)) {
2229 bi
->bi_next
= *return_bi
;
2235 spin_unlock_irq(&conf
->device_lock
);
2237 bitmap_endwrite(conf
->mddev
->bitmap
, sh
->sector
,
2238 STRIPE_SECTORS
, 0, 0);
2241 if (test_and_clear_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2242 if (atomic_dec_and_test(&conf
->pending_full_writes
))
2243 md_wakeup_thread(conf
->mddev
->thread
);
2246 /* fetch_block5 - checks the given member device to see if its data needs
2247 * to be read or computed to satisfy a request.
2249 * Returns 1 when no more member devices need to be checked, otherwise returns
2250 * 0 to tell the loop in handle_stripe_fill5 to continue
2252 static int fetch_block5(struct stripe_head
*sh
, struct stripe_head_state
*s
,
2253 int disk_idx
, int disks
)
2255 struct r5dev
*dev
= &sh
->dev
[disk_idx
];
2256 struct r5dev
*failed_dev
= &sh
->dev
[s
->failed_num
];
2258 /* is the data in this block needed, and can we get it? */
2259 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
2260 !test_bit(R5_UPTODATE
, &dev
->flags
) &&
2262 (dev
->towrite
&& !test_bit(R5_OVERWRITE
, &dev
->flags
)) ||
2263 s
->syncing
|| s
->expanding
||
2265 (failed_dev
->toread
||
2266 (failed_dev
->towrite
&&
2267 !test_bit(R5_OVERWRITE
, &failed_dev
->flags
)))))) {
2268 /* We would like to get this block, possibly by computing it,
2269 * otherwise read it if the backing disk is insync
2271 if ((s
->uptodate
== disks
- 1) &&
2272 (s
->failed
&& disk_idx
== s
->failed_num
)) {
2273 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2274 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2275 set_bit(R5_Wantcompute
, &dev
->flags
);
2276 sh
->ops
.target
= disk_idx
;
2277 sh
->ops
.target2
= -1;
2279 /* Careful: from this point on 'uptodate' is in the eye
2280 * of raid_run_ops which services 'compute' operations
2281 * before writes. R5_Wantcompute flags a block that will
2282 * be R5_UPTODATE by the time it is needed for a
2283 * subsequent operation.
2286 return 1; /* uptodate + compute == disks */
2287 } else if (test_bit(R5_Insync
, &dev
->flags
)) {
2288 set_bit(R5_LOCKED
, &dev
->flags
);
2289 set_bit(R5_Wantread
, &dev
->flags
);
2291 pr_debug("Reading block %d (sync=%d)\n", disk_idx
,
2300 * handle_stripe_fill5 - read or compute data to satisfy pending requests.
2302 static void handle_stripe_fill5(struct stripe_head
*sh
,
2303 struct stripe_head_state
*s
, int disks
)
2307 /* look for blocks to read/compute, skip this if a compute
2308 * is already in flight, or if the stripe contents are in the
2309 * midst of changing due to a write
2311 if (!test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) && !sh
->check_state
&&
2312 !sh
->reconstruct_state
)
2313 for (i
= disks
; i
--; )
2314 if (fetch_block5(sh
, s
, i
, disks
))
2316 set_bit(STRIPE_HANDLE
, &sh
->state
);
2319 /* fetch_block6 - checks the given member device to see if its data needs
2320 * to be read or computed to satisfy a request.
2322 * Returns 1 when no more member devices need to be checked, otherwise returns
2323 * 0 to tell the loop in handle_stripe_fill6 to continue
2325 static int fetch_block6(struct stripe_head
*sh
, struct stripe_head_state
*s
,
2326 struct r6_state
*r6s
, int disk_idx
, int disks
)
2328 struct r5dev
*dev
= &sh
->dev
[disk_idx
];
2329 struct r5dev
*fdev
[2] = { &sh
->dev
[r6s
->failed_num
[0]],
2330 &sh
->dev
[r6s
->failed_num
[1]] };
2332 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
2333 !test_bit(R5_UPTODATE
, &dev
->flags
) &&
2335 (dev
->towrite
&& !test_bit(R5_OVERWRITE
, &dev
->flags
)) ||
2336 s
->syncing
|| s
->expanding
||
2338 (fdev
[0]->toread
|| s
->to_write
)) ||
2340 (fdev
[1]->toread
|| s
->to_write
)))) {
2341 /* we would like to get this block, possibly by computing it,
2342 * otherwise read it if the backing disk is insync
2344 BUG_ON(test_bit(R5_Wantcompute
, &dev
->flags
));
2345 BUG_ON(test_bit(R5_Wantread
, &dev
->flags
));
2346 if ((s
->uptodate
== disks
- 1) &&
2347 (s
->failed
&& (disk_idx
== r6s
->failed_num
[0] ||
2348 disk_idx
== r6s
->failed_num
[1]))) {
2349 /* have disk failed, and we're requested to fetch it;
2352 pr_debug("Computing stripe %llu block %d\n",
2353 (unsigned long long)sh
->sector
, disk_idx
);
2354 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2355 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2356 set_bit(R5_Wantcompute
, &dev
->flags
);
2357 sh
->ops
.target
= disk_idx
;
2358 sh
->ops
.target2
= -1; /* no 2nd target */
2362 } else if (s
->uptodate
== disks
-2 && s
->failed
>= 2) {
2363 /* Computing 2-failure is *very* expensive; only
2364 * do it if failed >= 2
2367 for (other
= disks
; other
--; ) {
2368 if (other
== disk_idx
)
2370 if (!test_bit(R5_UPTODATE
,
2371 &sh
->dev
[other
].flags
))
2375 pr_debug("Computing stripe %llu blocks %d,%d\n",
2376 (unsigned long long)sh
->sector
,
2378 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2379 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2380 set_bit(R5_Wantcompute
, &sh
->dev
[disk_idx
].flags
);
2381 set_bit(R5_Wantcompute
, &sh
->dev
[other
].flags
);
2382 sh
->ops
.target
= disk_idx
;
2383 sh
->ops
.target2
= other
;
2387 } else if (test_bit(R5_Insync
, &dev
->flags
)) {
2388 set_bit(R5_LOCKED
, &dev
->flags
);
2389 set_bit(R5_Wantread
, &dev
->flags
);
2391 pr_debug("Reading block %d (sync=%d)\n",
2392 disk_idx
, s
->syncing
);
2400 * handle_stripe_fill6 - read or compute data to satisfy pending requests.
2402 static void handle_stripe_fill6(struct stripe_head
*sh
,
2403 struct stripe_head_state
*s
, struct r6_state
*r6s
,
2408 /* look for blocks to read/compute, skip this if a compute
2409 * is already in flight, or if the stripe contents are in the
2410 * midst of changing due to a write
2412 if (!test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) && !sh
->check_state
&&
2413 !sh
->reconstruct_state
)
2414 for (i
= disks
; i
--; )
2415 if (fetch_block6(sh
, s
, r6s
, i
, disks
))
2417 set_bit(STRIPE_HANDLE
, &sh
->state
);
2421 /* handle_stripe_clean_event
2422 * any written block on an uptodate or failed drive can be returned.
2423 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
2424 * never LOCKED, so we don't need to test 'failed' directly.
2426 static void handle_stripe_clean_event(raid5_conf_t
*conf
,
2427 struct stripe_head
*sh
, int disks
, struct bio
**return_bi
)
2432 for (i
= disks
; i
--; )
2433 if (sh
->dev
[i
].written
) {
2435 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
2436 test_bit(R5_UPTODATE
, &dev
->flags
)) {
2437 /* We can return any write requests */
2438 struct bio
*wbi
, *wbi2
;
2440 pr_debug("Return write for disc %d\n", i
);
2441 spin_lock_irq(&conf
->device_lock
);
2443 dev
->written
= NULL
;
2444 while (wbi
&& wbi
->bi_sector
<
2445 dev
->sector
+ STRIPE_SECTORS
) {
2446 wbi2
= r5_next_bio(wbi
, dev
->sector
);
2447 if (!raid5_dec_bi_phys_segments(wbi
)) {
2448 md_write_end(conf
->mddev
);
2449 wbi
->bi_next
= *return_bi
;
2454 if (dev
->towrite
== NULL
)
2456 spin_unlock_irq(&conf
->device_lock
);
2458 bitmap_endwrite(conf
->mddev
->bitmap
,
2461 !test_bit(STRIPE_DEGRADED
, &sh
->state
),
2466 if (test_and_clear_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2467 if (atomic_dec_and_test(&conf
->pending_full_writes
))
2468 md_wakeup_thread(conf
->mddev
->thread
);
2471 static void handle_stripe_dirtying5(raid5_conf_t
*conf
,
2472 struct stripe_head
*sh
, struct stripe_head_state
*s
, int disks
)
2474 int rmw
= 0, rcw
= 0, i
;
2475 for (i
= disks
; i
--; ) {
2476 /* would I have to read this buffer for read_modify_write */
2477 struct r5dev
*dev
= &sh
->dev
[i
];
2478 if ((dev
->towrite
|| i
== sh
->pd_idx
) &&
2479 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2480 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2481 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2482 if (test_bit(R5_Insync
, &dev
->flags
))
2485 rmw
+= 2*disks
; /* cannot read it */
2487 /* Would I have to read this buffer for reconstruct_write */
2488 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) && i
!= sh
->pd_idx
&&
2489 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2490 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2491 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2492 if (test_bit(R5_Insync
, &dev
->flags
)) rcw
++;
2497 pr_debug("for sector %llu, rmw=%d rcw=%d\n",
2498 (unsigned long long)sh
->sector
, rmw
, rcw
);
2499 set_bit(STRIPE_HANDLE
, &sh
->state
);
2500 if (rmw
< rcw
&& rmw
> 0)
2501 /* prefer read-modify-write, but need to get some data */
2502 for (i
= disks
; i
--; ) {
2503 struct r5dev
*dev
= &sh
->dev
[i
];
2504 if ((dev
->towrite
|| i
== sh
->pd_idx
) &&
2505 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2506 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2507 test_bit(R5_Wantcompute
, &dev
->flags
)) &&
2508 test_bit(R5_Insync
, &dev
->flags
)) {
2510 test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2511 pr_debug("Read_old block "
2512 "%d for r-m-w\n", i
);
2513 set_bit(R5_LOCKED
, &dev
->flags
);
2514 set_bit(R5_Wantread
, &dev
->flags
);
2517 set_bit(STRIPE_DELAYED
, &sh
->state
);
2518 set_bit(STRIPE_HANDLE
, &sh
->state
);
2522 if (rcw
<= rmw
&& rcw
> 0)
2523 /* want reconstruct write, but need to get some data */
2524 for (i
= disks
; i
--; ) {
2525 struct r5dev
*dev
= &sh
->dev
[i
];
2526 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) &&
2528 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2529 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2530 test_bit(R5_Wantcompute
, &dev
->flags
)) &&
2531 test_bit(R5_Insync
, &dev
->flags
)) {
2533 test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2534 pr_debug("Read_old block "
2535 "%d for Reconstruct\n", i
);
2536 set_bit(R5_LOCKED
, &dev
->flags
);
2537 set_bit(R5_Wantread
, &dev
->flags
);
2540 set_bit(STRIPE_DELAYED
, &sh
->state
);
2541 set_bit(STRIPE_HANDLE
, &sh
->state
);
2545 /* now if nothing is locked, and if we have enough data,
2546 * we can start a write request
2548 /* since handle_stripe can be called at any time we need to handle the
2549 * case where a compute block operation has been submitted and then a
2550 * subsequent call wants to start a write request. raid_run_ops only
2551 * handles the case where compute block and reconstruct are requested
2552 * simultaneously. If this is not the case then new writes need to be
2553 * held off until the compute completes.
2555 if ((s
->req_compute
|| !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
)) &&
2556 (s
->locked
== 0 && (rcw
== 0 || rmw
== 0) &&
2557 !test_bit(STRIPE_BIT_DELAY
, &sh
->state
)))
2558 schedule_reconstruction(sh
, s
, rcw
== 0, 0);
2561 static void handle_stripe_dirtying6(raid5_conf_t
*conf
,
2562 struct stripe_head
*sh
, struct stripe_head_state
*s
,
2563 struct r6_state
*r6s
, int disks
)
2565 int rcw
= 0, pd_idx
= sh
->pd_idx
, i
;
2566 int qd_idx
= sh
->qd_idx
;
2568 set_bit(STRIPE_HANDLE
, &sh
->state
);
2569 for (i
= disks
; i
--; ) {
2570 struct r5dev
*dev
= &sh
->dev
[i
];
2571 /* check if we haven't enough data */
2572 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) &&
2573 i
!= pd_idx
&& i
!= qd_idx
&&
2574 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2575 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2576 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2578 if (!test_bit(R5_Insync
, &dev
->flags
))
2579 continue; /* it's a failed drive */
2582 test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2583 pr_debug("Read_old stripe %llu "
2584 "block %d for Reconstruct\n",
2585 (unsigned long long)sh
->sector
, i
);
2586 set_bit(R5_LOCKED
, &dev
->flags
);
2587 set_bit(R5_Wantread
, &dev
->flags
);
2590 pr_debug("Request delayed stripe %llu "
2591 "block %d for Reconstruct\n",
2592 (unsigned long long)sh
->sector
, i
);
2593 set_bit(STRIPE_DELAYED
, &sh
->state
);
2594 set_bit(STRIPE_HANDLE
, &sh
->state
);
2598 /* now if nothing is locked, and if we have enough data, we can start a
2601 if ((s
->req_compute
|| !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
)) &&
2602 s
->locked
== 0 && rcw
== 0 &&
2603 !test_bit(STRIPE_BIT_DELAY
, &sh
->state
)) {
2604 schedule_reconstruction(sh
, s
, 1, 0);
2608 static void handle_parity_checks5(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2609 struct stripe_head_state
*s
, int disks
)
2611 struct r5dev
*dev
= NULL
;
2613 set_bit(STRIPE_HANDLE
, &sh
->state
);
2615 switch (sh
->check_state
) {
2616 case check_state_idle
:
2617 /* start a new check operation if there are no failures */
2618 if (s
->failed
== 0) {
2619 BUG_ON(s
->uptodate
!= disks
);
2620 sh
->check_state
= check_state_run
;
2621 set_bit(STRIPE_OP_CHECK
, &s
->ops_request
);
2622 clear_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
);
2626 dev
= &sh
->dev
[s
->failed_num
];
2628 case check_state_compute_result
:
2629 sh
->check_state
= check_state_idle
;
2631 dev
= &sh
->dev
[sh
->pd_idx
];
2633 /* check that a write has not made the stripe insync */
2634 if (test_bit(STRIPE_INSYNC
, &sh
->state
))
2637 /* either failed parity check, or recovery is happening */
2638 BUG_ON(!test_bit(R5_UPTODATE
, &dev
->flags
));
2639 BUG_ON(s
->uptodate
!= disks
);
2641 set_bit(R5_LOCKED
, &dev
->flags
);
2643 set_bit(R5_Wantwrite
, &dev
->flags
);
2645 clear_bit(STRIPE_DEGRADED
, &sh
->state
);
2646 set_bit(STRIPE_INSYNC
, &sh
->state
);
2648 case check_state_run
:
2649 break; /* we will be called again upon completion */
2650 case check_state_check_result
:
2651 sh
->check_state
= check_state_idle
;
2653 /* if a failure occurred during the check operation, leave
2654 * STRIPE_INSYNC not set and let the stripe be handled again
2659 /* handle a successful check operation, if parity is correct
2660 * we are done. Otherwise update the mismatch count and repair
2661 * parity if !MD_RECOVERY_CHECK
2663 if ((sh
->ops
.zero_sum_result
& SUM_CHECK_P_RESULT
) == 0)
2664 /* parity is correct (on disc,
2665 * not in buffer any more)
2667 set_bit(STRIPE_INSYNC
, &sh
->state
);
2669 conf
->mddev
->resync_mismatches
+= STRIPE_SECTORS
;
2670 if (test_bit(MD_RECOVERY_CHECK
, &conf
->mddev
->recovery
))
2671 /* don't try to repair!! */
2672 set_bit(STRIPE_INSYNC
, &sh
->state
);
2674 sh
->check_state
= check_state_compute_run
;
2675 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2676 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2677 set_bit(R5_Wantcompute
,
2678 &sh
->dev
[sh
->pd_idx
].flags
);
2679 sh
->ops
.target
= sh
->pd_idx
;
2680 sh
->ops
.target2
= -1;
2685 case check_state_compute_run
:
2688 printk(KERN_ERR
"%s: unknown check_state: %d sector: %llu\n",
2689 __func__
, sh
->check_state
,
2690 (unsigned long long) sh
->sector
);
2696 static void handle_parity_checks6(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2697 struct stripe_head_state
*s
,
2698 struct r6_state
*r6s
, int disks
)
2700 int pd_idx
= sh
->pd_idx
;
2701 int qd_idx
= sh
->qd_idx
;
2704 set_bit(STRIPE_HANDLE
, &sh
->state
);
2706 BUG_ON(s
->failed
> 2);
2708 /* Want to check and possibly repair P and Q.
2709 * However there could be one 'failed' device, in which
2710 * case we can only check one of them, possibly using the
2711 * other to generate missing data
2714 switch (sh
->check_state
) {
2715 case check_state_idle
:
2716 /* start a new check operation if there are < 2 failures */
2717 if (s
->failed
== r6s
->q_failed
) {
2718 /* The only possible failed device holds Q, so it
2719 * makes sense to check P (If anything else were failed,
2720 * we would have used P to recreate it).
2722 sh
->check_state
= check_state_run
;
2724 if (!r6s
->q_failed
&& s
->failed
< 2) {
2725 /* Q is not failed, and we didn't use it to generate
2726 * anything, so it makes sense to check it
2728 if (sh
->check_state
== check_state_run
)
2729 sh
->check_state
= check_state_run_pq
;
2731 sh
->check_state
= check_state_run_q
;
2734 /* discard potentially stale zero_sum_result */
2735 sh
->ops
.zero_sum_result
= 0;
2737 if (sh
->check_state
== check_state_run
) {
2738 /* async_xor_zero_sum destroys the contents of P */
2739 clear_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
);
2742 if (sh
->check_state
>= check_state_run
&&
2743 sh
->check_state
<= check_state_run_pq
) {
2744 /* async_syndrome_zero_sum preserves P and Q, so
2745 * no need to mark them !uptodate here
2747 set_bit(STRIPE_OP_CHECK
, &s
->ops_request
);
2751 /* we have 2-disk failure */
2752 BUG_ON(s
->failed
!= 2);
2754 case check_state_compute_result
:
2755 sh
->check_state
= check_state_idle
;
2757 /* check that a write has not made the stripe insync */
2758 if (test_bit(STRIPE_INSYNC
, &sh
->state
))
2761 /* now write out any block on a failed drive,
2762 * or P or Q if they were recomputed
2764 BUG_ON(s
->uptodate
< disks
- 1); /* We don't need Q to recover */
2765 if (s
->failed
== 2) {
2766 dev
= &sh
->dev
[r6s
->failed_num
[1]];
2768 set_bit(R5_LOCKED
, &dev
->flags
);
2769 set_bit(R5_Wantwrite
, &dev
->flags
);
2771 if (s
->failed
>= 1) {
2772 dev
= &sh
->dev
[r6s
->failed_num
[0]];
2774 set_bit(R5_LOCKED
, &dev
->flags
);
2775 set_bit(R5_Wantwrite
, &dev
->flags
);
2777 if (sh
->ops
.zero_sum_result
& SUM_CHECK_P_RESULT
) {
2778 dev
= &sh
->dev
[pd_idx
];
2780 set_bit(R5_LOCKED
, &dev
->flags
);
2781 set_bit(R5_Wantwrite
, &dev
->flags
);
2783 if (sh
->ops
.zero_sum_result
& SUM_CHECK_Q_RESULT
) {
2784 dev
= &sh
->dev
[qd_idx
];
2786 set_bit(R5_LOCKED
, &dev
->flags
);
2787 set_bit(R5_Wantwrite
, &dev
->flags
);
2789 clear_bit(STRIPE_DEGRADED
, &sh
->state
);
2791 set_bit(STRIPE_INSYNC
, &sh
->state
);
2793 case check_state_run
:
2794 case check_state_run_q
:
2795 case check_state_run_pq
:
2796 break; /* we will be called again upon completion */
2797 case check_state_check_result
:
2798 sh
->check_state
= check_state_idle
;
2800 /* handle a successful check operation, if parity is correct
2801 * we are done. Otherwise update the mismatch count and repair
2802 * parity if !MD_RECOVERY_CHECK
2804 if (sh
->ops
.zero_sum_result
== 0) {
2805 /* both parities are correct */
2807 set_bit(STRIPE_INSYNC
, &sh
->state
);
2809 /* in contrast to the raid5 case we can validate
2810 * parity, but still have a failure to write
2813 sh
->check_state
= check_state_compute_result
;
2814 /* Returning at this point means that we may go
2815 * off and bring p and/or q uptodate again so
2816 * we make sure to check zero_sum_result again
2817 * to verify if p or q need writeback
2821 conf
->mddev
->resync_mismatches
+= STRIPE_SECTORS
;
2822 if (test_bit(MD_RECOVERY_CHECK
, &conf
->mddev
->recovery
))
2823 /* don't try to repair!! */
2824 set_bit(STRIPE_INSYNC
, &sh
->state
);
2826 int *target
= &sh
->ops
.target
;
2828 sh
->ops
.target
= -1;
2829 sh
->ops
.target2
= -1;
2830 sh
->check_state
= check_state_compute_run
;
2831 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2832 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2833 if (sh
->ops
.zero_sum_result
& SUM_CHECK_P_RESULT
) {
2834 set_bit(R5_Wantcompute
,
2835 &sh
->dev
[pd_idx
].flags
);
2837 target
= &sh
->ops
.target2
;
2840 if (sh
->ops
.zero_sum_result
& SUM_CHECK_Q_RESULT
) {
2841 set_bit(R5_Wantcompute
,
2842 &sh
->dev
[qd_idx
].flags
);
2849 case check_state_compute_run
:
2852 printk(KERN_ERR
"%s: unknown check_state: %d sector: %llu\n",
2853 __func__
, sh
->check_state
,
2854 (unsigned long long) sh
->sector
);
2859 static void handle_stripe_expansion(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2860 struct r6_state
*r6s
)
2864 /* We have read all the blocks in this stripe and now we need to
2865 * copy some of them into a target stripe for expand.
2867 struct dma_async_tx_descriptor
*tx
= NULL
;
2868 clear_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
2869 for (i
= 0; i
< sh
->disks
; i
++)
2870 if (i
!= sh
->pd_idx
&& i
!= sh
->qd_idx
) {
2872 struct stripe_head
*sh2
;
2873 struct async_submit_ctl submit
;
2875 sector_t bn
= compute_blocknr(sh
, i
, 1);
2876 sector_t s
= raid5_compute_sector(conf
, bn
, 0,
2878 sh2
= get_active_stripe(conf
, s
, 0, 1, 1);
2880 /* so far only the early blocks of this stripe
2881 * have been requested. When later blocks
2882 * get requested, we will try again
2885 if (!test_bit(STRIPE_EXPANDING
, &sh2
->state
) ||
2886 test_bit(R5_Expanded
, &sh2
->dev
[dd_idx
].flags
)) {
2887 /* must have already done this block */
2888 release_stripe(sh2
);
2892 /* place all the copies on one channel */
2893 init_async_submit(&submit
, 0, tx
, NULL
, NULL
, NULL
);
2894 tx
= async_memcpy(sh2
->dev
[dd_idx
].page
,
2895 sh
->dev
[i
].page
, 0, 0, STRIPE_SIZE
,
2898 set_bit(R5_Expanded
, &sh2
->dev
[dd_idx
].flags
);
2899 set_bit(R5_UPTODATE
, &sh2
->dev
[dd_idx
].flags
);
2900 for (j
= 0; j
< conf
->raid_disks
; j
++)
2901 if (j
!= sh2
->pd_idx
&&
2902 (!r6s
|| j
!= sh2
->qd_idx
) &&
2903 !test_bit(R5_Expanded
, &sh2
->dev
[j
].flags
))
2905 if (j
== conf
->raid_disks
) {
2906 set_bit(STRIPE_EXPAND_READY
, &sh2
->state
);
2907 set_bit(STRIPE_HANDLE
, &sh2
->state
);
2909 release_stripe(sh2
);
2912 /* done submitting copies, wait for them to complete */
2915 dma_wait_for_async_tx(tx
);
2921 * handle_stripe - do things to a stripe.
2923 * We lock the stripe and then examine the state of various bits
2924 * to see what needs to be done.
2926 * return some read request which now have data
2927 * return some write requests which are safely on disc
2928 * schedule a read on some buffers
2929 * schedule a write of some buffers
2930 * return confirmation of parity correctness
2932 * buffers are taken off read_list or write_list, and bh_cache buffers
2933 * get BH_Lock set before the stripe lock is released.
2937 static void handle_stripe5(struct stripe_head
*sh
)
2939 raid5_conf_t
*conf
= sh
->raid_conf
;
2940 int disks
= sh
->disks
, i
;
2941 struct bio
*return_bi
= NULL
;
2942 struct stripe_head_state s
;
2944 mdk_rdev_t
*blocked_rdev
= NULL
;
2946 int dec_preread_active
= 0;
2948 memset(&s
, 0, sizeof(s
));
2949 pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d "
2950 "reconstruct:%d\n", (unsigned long long)sh
->sector
, sh
->state
,
2951 atomic_read(&sh
->count
), sh
->pd_idx
, sh
->check_state
,
2952 sh
->reconstruct_state
);
2954 spin_lock(&sh
->lock
);
2955 clear_bit(STRIPE_HANDLE
, &sh
->state
);
2956 clear_bit(STRIPE_DELAYED
, &sh
->state
);
2958 s
.syncing
= test_bit(STRIPE_SYNCING
, &sh
->state
);
2959 s
.expanding
= test_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
2960 s
.expanded
= test_bit(STRIPE_EXPAND_READY
, &sh
->state
);
2962 /* Now to look around and see what can be done */
2964 for (i
=disks
; i
--; ) {
2968 clear_bit(R5_Insync
, &dev
->flags
);
2970 pr_debug("check %d: state 0x%lx toread %p read %p write %p "
2971 "written %p\n", i
, dev
->flags
, dev
->toread
, dev
->read
,
2972 dev
->towrite
, dev
->written
);
2974 /* maybe we can request a biofill operation
2976 * new wantfill requests are only permitted while
2977 * ops_complete_biofill is guaranteed to be inactive
2979 if (test_bit(R5_UPTODATE
, &dev
->flags
) && dev
->toread
&&
2980 !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
))
2981 set_bit(R5_Wantfill
, &dev
->flags
);
2983 /* now count some things */
2984 if (test_bit(R5_LOCKED
, &dev
->flags
)) s
.locked
++;
2985 if (test_bit(R5_UPTODATE
, &dev
->flags
)) s
.uptodate
++;
2986 if (test_bit(R5_Wantcompute
, &dev
->flags
)) s
.compute
++;
2988 if (test_bit(R5_Wantfill
, &dev
->flags
))
2990 else if (dev
->toread
)
2994 if (!test_bit(R5_OVERWRITE
, &dev
->flags
))
2999 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
3000 if (blocked_rdev
== NULL
&&
3001 rdev
&& unlikely(test_bit(Blocked
, &rdev
->flags
))) {
3002 blocked_rdev
= rdev
;
3003 atomic_inc(&rdev
->nr_pending
);
3005 if (!rdev
|| !test_bit(In_sync
, &rdev
->flags
)) {
3006 /* The ReadError flag will just be confusing now */
3007 clear_bit(R5_ReadError
, &dev
->flags
);
3008 clear_bit(R5_ReWrite
, &dev
->flags
);
3010 if (!rdev
|| !test_bit(In_sync
, &rdev
->flags
)
3011 || test_bit(R5_ReadError
, &dev
->flags
)) {
3015 set_bit(R5_Insync
, &dev
->flags
);
3019 if (unlikely(blocked_rdev
)) {
3020 if (s
.syncing
|| s
.expanding
|| s
.expanded
||
3021 s
.to_write
|| s
.written
) {
3022 set_bit(STRIPE_HANDLE
, &sh
->state
);
3025 /* There is nothing for the blocked_rdev to block */
3026 rdev_dec_pending(blocked_rdev
, conf
->mddev
);
3027 blocked_rdev
= NULL
;
3030 if (s
.to_fill
&& !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
)) {
3031 set_bit(STRIPE_OP_BIOFILL
, &s
.ops_request
);
3032 set_bit(STRIPE_BIOFILL_RUN
, &sh
->state
);
3035 pr_debug("locked=%d uptodate=%d to_read=%d"
3036 " to_write=%d failed=%d failed_num=%d\n",
3037 s
.locked
, s
.uptodate
, s
.to_read
, s
.to_write
,
3038 s
.failed
, s
.failed_num
);
3039 /* check if the array has lost two devices and, if so, some requests might
3042 if (s
.failed
> 1 && s
.to_read
+s
.to_write
+s
.written
)
3043 handle_failed_stripe(conf
, sh
, &s
, disks
, &return_bi
);
3044 if (s
.failed
> 1 && s
.syncing
) {
3045 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,0);
3046 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3050 /* might be able to return some write requests if the parity block
3051 * is safe, or on a failed drive
3053 dev
= &sh
->dev
[sh
->pd_idx
];
3055 ((test_bit(R5_Insync
, &dev
->flags
) &&
3056 !test_bit(R5_LOCKED
, &dev
->flags
) &&
3057 test_bit(R5_UPTODATE
, &dev
->flags
)) ||
3058 (s
.failed
== 1 && s
.failed_num
== sh
->pd_idx
)))
3059 handle_stripe_clean_event(conf
, sh
, disks
, &return_bi
);
3061 /* Now we might consider reading some blocks, either to check/generate
3062 * parity, or to satisfy requests
3063 * or to load a block that is being partially written.
3065 if (s
.to_read
|| s
.non_overwrite
||
3066 (s
.syncing
&& (s
.uptodate
+ s
.compute
< disks
)) || s
.expanding
)
3067 handle_stripe_fill5(sh
, &s
, disks
);
3069 /* Now we check to see if any write operations have recently
3073 if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_result
)
3075 if (sh
->reconstruct_state
== reconstruct_state_drain_result
||
3076 sh
->reconstruct_state
== reconstruct_state_prexor_drain_result
) {
3077 sh
->reconstruct_state
= reconstruct_state_idle
;
3079 /* All the 'written' buffers and the parity block are ready to
3080 * be written back to disk
3082 BUG_ON(!test_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
));
3083 for (i
= disks
; i
--; ) {
3085 if (test_bit(R5_LOCKED
, &dev
->flags
) &&
3086 (i
== sh
->pd_idx
|| dev
->written
)) {
3087 pr_debug("Writing block %d\n", i
);
3088 set_bit(R5_Wantwrite
, &dev
->flags
);
3091 if (!test_bit(R5_Insync
, &dev
->flags
) ||
3092 (i
== sh
->pd_idx
&& s
.failed
== 0))
3093 set_bit(STRIPE_INSYNC
, &sh
->state
);
3096 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
3097 dec_preread_active
= 1;
3100 /* Now to consider new write requests and what else, if anything
3101 * should be read. We do not handle new writes when:
3102 * 1/ A 'write' operation (copy+xor) is already in flight.
3103 * 2/ A 'check' operation is in flight, as it may clobber the parity
3106 if (s
.to_write
&& !sh
->reconstruct_state
&& !sh
->check_state
)
3107 handle_stripe_dirtying5(conf
, sh
, &s
, disks
);
3109 /* maybe we need to check and possibly fix the parity for this stripe
3110 * Any reads will already have been scheduled, so we just see if enough
3111 * data is available. The parity check is held off while parity
3112 * dependent operations are in flight.
3114 if (sh
->check_state
||
3115 (s
.syncing
&& s
.locked
== 0 &&
3116 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) &&
3117 !test_bit(STRIPE_INSYNC
, &sh
->state
)))
3118 handle_parity_checks5(conf
, sh
, &s
, disks
);
3120 if (s
.syncing
&& s
.locked
== 0 && test_bit(STRIPE_INSYNC
, &sh
->state
)) {
3121 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,1);
3122 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3125 /* If the failed drive is just a ReadError, then we might need to progress
3126 * the repair/check process
3128 if (s
.failed
== 1 && !conf
->mddev
->ro
&&
3129 test_bit(R5_ReadError
, &sh
->dev
[s
.failed_num
].flags
)
3130 && !test_bit(R5_LOCKED
, &sh
->dev
[s
.failed_num
].flags
)
3131 && test_bit(R5_UPTODATE
, &sh
->dev
[s
.failed_num
].flags
)
3133 dev
= &sh
->dev
[s
.failed_num
];
3134 if (!test_bit(R5_ReWrite
, &dev
->flags
)) {
3135 set_bit(R5_Wantwrite
, &dev
->flags
);
3136 set_bit(R5_ReWrite
, &dev
->flags
);
3137 set_bit(R5_LOCKED
, &dev
->flags
);
3140 /* let's read it back */
3141 set_bit(R5_Wantread
, &dev
->flags
);
3142 set_bit(R5_LOCKED
, &dev
->flags
);
3147 /* Finish reconstruct operations initiated by the expansion process */
3148 if (sh
->reconstruct_state
== reconstruct_state_result
) {
3149 struct stripe_head
*sh2
3150 = get_active_stripe(conf
, sh
->sector
, 1, 1, 1);
3151 if (sh2
&& test_bit(STRIPE_EXPAND_SOURCE
, &sh2
->state
)) {
3152 /* sh cannot be written until sh2 has been read.
3153 * so arrange for sh to be delayed a little
3155 set_bit(STRIPE_DELAYED
, &sh
->state
);
3156 set_bit(STRIPE_HANDLE
, &sh
->state
);
3157 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
,
3159 atomic_inc(&conf
->preread_active_stripes
);
3160 release_stripe(sh2
);
3164 release_stripe(sh2
);
3166 sh
->reconstruct_state
= reconstruct_state_idle
;
3167 clear_bit(STRIPE_EXPANDING
, &sh
->state
);
3168 for (i
= conf
->raid_disks
; i
--; ) {
3169 set_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
);
3170 set_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
3175 if (s
.expanded
&& test_bit(STRIPE_EXPANDING
, &sh
->state
) &&
3176 !sh
->reconstruct_state
) {
3177 /* Need to write out all blocks after computing parity */
3178 sh
->disks
= conf
->raid_disks
;
3179 stripe_set_idx(sh
->sector
, conf
, 0, sh
);
3180 schedule_reconstruction(sh
, &s
, 1, 1);
3181 } else if (s
.expanded
&& !sh
->reconstruct_state
&& s
.locked
== 0) {
3182 clear_bit(STRIPE_EXPAND_READY
, &sh
->state
);
3183 atomic_dec(&conf
->reshape_stripes
);
3184 wake_up(&conf
->wait_for_overlap
);
3185 md_done_sync(conf
->mddev
, STRIPE_SECTORS
, 1);
3188 if (s
.expanding
&& s
.locked
== 0 &&
3189 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
))
3190 handle_stripe_expansion(conf
, sh
, NULL
);
3193 spin_unlock(&sh
->lock
);
3195 /* wait for this device to become unblocked */
3196 if (unlikely(blocked_rdev
))
3197 md_wait_for_blocked_rdev(blocked_rdev
, conf
->mddev
);
3200 raid_run_ops(sh
, s
.ops_request
);
3204 if (dec_preread_active
) {
3205 /* We delay this until after ops_run_io so that if make_request
3206 * is waiting on a barrier, it won't continue until the writes
3207 * have actually been submitted.
3209 atomic_dec(&conf
->preread_active_stripes
);
3210 if (atomic_read(&conf
->preread_active_stripes
) <
3212 md_wakeup_thread(conf
->mddev
->thread
);
3214 return_io(return_bi
);
3217 static void handle_stripe6(struct stripe_head
*sh
)
3219 raid5_conf_t
*conf
= sh
->raid_conf
;
3220 int disks
= sh
->disks
;
3221 struct bio
*return_bi
= NULL
;
3222 int i
, pd_idx
= sh
->pd_idx
, qd_idx
= sh
->qd_idx
;
3223 struct stripe_head_state s
;
3224 struct r6_state r6s
;
3225 struct r5dev
*dev
, *pdev
, *qdev
;
3226 mdk_rdev_t
*blocked_rdev
= NULL
;
3227 int dec_preread_active
= 0;
3229 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
3230 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
3231 (unsigned long long)sh
->sector
, sh
->state
,
3232 atomic_read(&sh
->count
), pd_idx
, qd_idx
,
3233 sh
->check_state
, sh
->reconstruct_state
);
3234 memset(&s
, 0, sizeof(s
));
3236 spin_lock(&sh
->lock
);
3237 clear_bit(STRIPE_HANDLE
, &sh
->state
);
3238 clear_bit(STRIPE_DELAYED
, &sh
->state
);
3240 s
.syncing
= test_bit(STRIPE_SYNCING
, &sh
->state
);
3241 s
.expanding
= test_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
3242 s
.expanded
= test_bit(STRIPE_EXPAND_READY
, &sh
->state
);
3243 /* Now to look around and see what can be done */
3246 for (i
=disks
; i
--; ) {
3249 clear_bit(R5_Insync
, &dev
->flags
);
3251 pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
3252 i
, dev
->flags
, dev
->toread
, dev
->towrite
, dev
->written
);
3253 /* maybe we can reply to a read
3255 * new wantfill requests are only permitted while
3256 * ops_complete_biofill is guaranteed to be inactive
3258 if (test_bit(R5_UPTODATE
, &dev
->flags
) && dev
->toread
&&
3259 !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
))
3260 set_bit(R5_Wantfill
, &dev
->flags
);
3262 /* now count some things */
3263 if (test_bit(R5_LOCKED
, &dev
->flags
)) s
.locked
++;
3264 if (test_bit(R5_UPTODATE
, &dev
->flags
)) s
.uptodate
++;
3265 if (test_bit(R5_Wantcompute
, &dev
->flags
)) {
3267 BUG_ON(s
.compute
> 2);
3270 if (test_bit(R5_Wantfill
, &dev
->flags
)) {
3272 } else if (dev
->toread
)
3276 if (!test_bit(R5_OVERWRITE
, &dev
->flags
))
3281 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
3282 if (blocked_rdev
== NULL
&&
3283 rdev
&& unlikely(test_bit(Blocked
, &rdev
->flags
))) {
3284 blocked_rdev
= rdev
;
3285 atomic_inc(&rdev
->nr_pending
);
3287 if (!rdev
|| !test_bit(In_sync
, &rdev
->flags
)) {
3288 /* The ReadError flag will just be confusing now */
3289 clear_bit(R5_ReadError
, &dev
->flags
);
3290 clear_bit(R5_ReWrite
, &dev
->flags
);
3292 if (!rdev
|| !test_bit(In_sync
, &rdev
->flags
)
3293 || test_bit(R5_ReadError
, &dev
->flags
)) {
3295 r6s
.failed_num
[s
.failed
] = i
;
3298 set_bit(R5_Insync
, &dev
->flags
);
3302 if (unlikely(blocked_rdev
)) {
3303 if (s
.syncing
|| s
.expanding
|| s
.expanded
||
3304 s
.to_write
|| s
.written
) {
3305 set_bit(STRIPE_HANDLE
, &sh
->state
);
3308 /* There is nothing for the blocked_rdev to block */
3309 rdev_dec_pending(blocked_rdev
, conf
->mddev
);
3310 blocked_rdev
= NULL
;
3313 if (s
.to_fill
&& !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
)) {
3314 set_bit(STRIPE_OP_BIOFILL
, &s
.ops_request
);
3315 set_bit(STRIPE_BIOFILL_RUN
, &sh
->state
);
3318 pr_debug("locked=%d uptodate=%d to_read=%d"
3319 " to_write=%d failed=%d failed_num=%d,%d\n",
3320 s
.locked
, s
.uptodate
, s
.to_read
, s
.to_write
, s
.failed
,
3321 r6s
.failed_num
[0], r6s
.failed_num
[1]);
3322 /* check if the array has lost >2 devices and, if so, some requests
3323 * might need to be failed
3325 if (s
.failed
> 2 && s
.to_read
+s
.to_write
+s
.written
)
3326 handle_failed_stripe(conf
, sh
, &s
, disks
, &return_bi
);
3327 if (s
.failed
> 2 && s
.syncing
) {
3328 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,0);
3329 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3334 * might be able to return some write requests if the parity blocks
3335 * are safe, or on a failed drive
3337 pdev
= &sh
->dev
[pd_idx
];
3338 r6s
.p_failed
= (s
.failed
>= 1 && r6s
.failed_num
[0] == pd_idx
)
3339 || (s
.failed
>= 2 && r6s
.failed_num
[1] == pd_idx
);
3340 qdev
= &sh
->dev
[qd_idx
];
3341 r6s
.q_failed
= (s
.failed
>= 1 && r6s
.failed_num
[0] == qd_idx
)
3342 || (s
.failed
>= 2 && r6s
.failed_num
[1] == qd_idx
);
3345 ( r6s
.p_failed
|| ((test_bit(R5_Insync
, &pdev
->flags
)
3346 && !test_bit(R5_LOCKED
, &pdev
->flags
)
3347 && test_bit(R5_UPTODATE
, &pdev
->flags
)))) &&
3348 ( r6s
.q_failed
|| ((test_bit(R5_Insync
, &qdev
->flags
)
3349 && !test_bit(R5_LOCKED
, &qdev
->flags
)
3350 && test_bit(R5_UPTODATE
, &qdev
->flags
)))))
3351 handle_stripe_clean_event(conf
, sh
, disks
, &return_bi
);
3353 /* Now we might consider reading some blocks, either to check/generate
3354 * parity, or to satisfy requests
3355 * or to load a block that is being partially written.
3357 if (s
.to_read
|| s
.non_overwrite
|| (s
.to_write
&& s
.failed
) ||
3358 (s
.syncing
&& (s
.uptodate
+ s
.compute
< disks
)) || s
.expanding
)
3359 handle_stripe_fill6(sh
, &s
, &r6s
, disks
);
3361 /* Now we check to see if any write operations have recently
3364 if (sh
->reconstruct_state
== reconstruct_state_drain_result
) {
3366 sh
->reconstruct_state
= reconstruct_state_idle
;
3367 /* All the 'written' buffers and the parity blocks are ready to
3368 * be written back to disk
3370 BUG_ON(!test_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
));
3371 BUG_ON(!test_bit(R5_UPTODATE
, &sh
->dev
[qd_idx
].flags
));
3372 for (i
= disks
; i
--; ) {
3374 if (test_bit(R5_LOCKED
, &dev
->flags
) &&
3375 (i
== sh
->pd_idx
|| i
== qd_idx
||
3377 pr_debug("Writing block %d\n", i
);
3378 BUG_ON(!test_bit(R5_UPTODATE
, &dev
->flags
));
3379 set_bit(R5_Wantwrite
, &dev
->flags
);
3380 if (!test_bit(R5_Insync
, &dev
->flags
) ||
3381 ((i
== sh
->pd_idx
|| i
== qd_idx
) &&
3383 set_bit(STRIPE_INSYNC
, &sh
->state
);
3386 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
3387 dec_preread_active
= 1;
3390 /* Now to consider new write requests and what else, if anything
3391 * should be read. We do not handle new writes when:
3392 * 1/ A 'write' operation (copy+gen_syndrome) is already in flight.
3393 * 2/ A 'check' operation is in flight, as it may clobber the parity
3396 if (s
.to_write
&& !sh
->reconstruct_state
&& !sh
->check_state
)
3397 handle_stripe_dirtying6(conf
, sh
, &s
, &r6s
, disks
);
3399 /* maybe we need to check and possibly fix the parity for this stripe
3400 * Any reads will already have been scheduled, so we just see if enough
3401 * data is available. The parity check is held off while parity
3402 * dependent operations are in flight.
3404 if (sh
->check_state
||
3405 (s
.syncing
&& s
.locked
== 0 &&
3406 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) &&
3407 !test_bit(STRIPE_INSYNC
, &sh
->state
)))
3408 handle_parity_checks6(conf
, sh
, &s
, &r6s
, disks
);
3410 if (s
.syncing
&& s
.locked
== 0 && test_bit(STRIPE_INSYNC
, &sh
->state
)) {
3411 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,1);
3412 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3415 /* If the failed drives are just a ReadError, then we might need
3416 * to progress the repair/check process
3418 if (s
.failed
<= 2 && !conf
->mddev
->ro
)
3419 for (i
= 0; i
< s
.failed
; i
++) {
3420 dev
= &sh
->dev
[r6s
.failed_num
[i
]];
3421 if (test_bit(R5_ReadError
, &dev
->flags
)
3422 && !test_bit(R5_LOCKED
, &dev
->flags
)
3423 && test_bit(R5_UPTODATE
, &dev
->flags
)
3425 if (!test_bit(R5_ReWrite
, &dev
->flags
)) {
3426 set_bit(R5_Wantwrite
, &dev
->flags
);
3427 set_bit(R5_ReWrite
, &dev
->flags
);
3428 set_bit(R5_LOCKED
, &dev
->flags
);
3431 /* let's read it back */
3432 set_bit(R5_Wantread
, &dev
->flags
);
3433 set_bit(R5_LOCKED
, &dev
->flags
);
3439 /* Finish reconstruct operations initiated by the expansion process */
3440 if (sh
->reconstruct_state
== reconstruct_state_result
) {
3441 sh
->reconstruct_state
= reconstruct_state_idle
;
3442 clear_bit(STRIPE_EXPANDING
, &sh
->state
);
3443 for (i
= conf
->raid_disks
; i
--; ) {
3444 set_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
);
3445 set_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
3450 if (s
.expanded
&& test_bit(STRIPE_EXPANDING
, &sh
->state
) &&
3451 !sh
->reconstruct_state
) {
3452 struct stripe_head
*sh2
3453 = get_active_stripe(conf
, sh
->sector
, 1, 1, 1);
3454 if (sh2
&& test_bit(STRIPE_EXPAND_SOURCE
, &sh2
->state
)) {
3455 /* sh cannot be written until sh2 has been read.
3456 * so arrange for sh to be delayed a little
3458 set_bit(STRIPE_DELAYED
, &sh
->state
);
3459 set_bit(STRIPE_HANDLE
, &sh
->state
);
3460 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
,
3462 atomic_inc(&conf
->preread_active_stripes
);
3463 release_stripe(sh2
);
3467 release_stripe(sh2
);
3469 /* Need to write out all blocks after computing P&Q */
3470 sh
->disks
= conf
->raid_disks
;
3471 stripe_set_idx(sh
->sector
, conf
, 0, sh
);
3472 schedule_reconstruction(sh
, &s
, 1, 1);
3473 } else if (s
.expanded
&& !sh
->reconstruct_state
&& s
.locked
== 0) {
3474 clear_bit(STRIPE_EXPAND_READY
, &sh
->state
);
3475 atomic_dec(&conf
->reshape_stripes
);
3476 wake_up(&conf
->wait_for_overlap
);
3477 md_done_sync(conf
->mddev
, STRIPE_SECTORS
, 1);
3480 if (s
.expanding
&& s
.locked
== 0 &&
3481 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
))
3482 handle_stripe_expansion(conf
, sh
, &r6s
);
3485 spin_unlock(&sh
->lock
);
3487 /* wait for this device to become unblocked */
3488 if (unlikely(blocked_rdev
))
3489 md_wait_for_blocked_rdev(blocked_rdev
, conf
->mddev
);
3492 raid_run_ops(sh
, s
.ops_request
);
3497 if (dec_preread_active
) {
3498 /* We delay this until after ops_run_io so that if make_request
3499 * is waiting on a barrier, it won't continue until the writes
3500 * have actually been submitted.
3502 atomic_dec(&conf
->preread_active_stripes
);
3503 if (atomic_read(&conf
->preread_active_stripes
) <
3505 md_wakeup_thread(conf
->mddev
->thread
);
3508 return_io(return_bi
);
3511 static void handle_stripe(struct stripe_head
*sh
)
3513 if (sh
->raid_conf
->level
== 6)
3519 static void raid5_activate_delayed(raid5_conf_t
*conf
)
3521 if (atomic_read(&conf
->preread_active_stripes
) < IO_THRESHOLD
) {
3522 while (!list_empty(&conf
->delayed_list
)) {
3523 struct list_head
*l
= conf
->delayed_list
.next
;
3524 struct stripe_head
*sh
;
3525 sh
= list_entry(l
, struct stripe_head
, lru
);
3527 clear_bit(STRIPE_DELAYED
, &sh
->state
);
3528 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
3529 atomic_inc(&conf
->preread_active_stripes
);
3530 list_add_tail(&sh
->lru
, &conf
->hold_list
);
3533 blk_plug_device(conf
->mddev
->queue
);
3536 static void activate_bit_delay(raid5_conf_t
*conf
)
3538 /* device_lock is held */
3539 struct list_head head
;
3540 list_add(&head
, &conf
->bitmap_list
);
3541 list_del_init(&conf
->bitmap_list
);
3542 while (!list_empty(&head
)) {
3543 struct stripe_head
*sh
= list_entry(head
.next
, struct stripe_head
, lru
);
3544 list_del_init(&sh
->lru
);
3545 atomic_inc(&sh
->count
);
3546 __release_stripe(conf
, sh
);
3550 static void unplug_slaves(mddev_t
*mddev
)
3552 raid5_conf_t
*conf
= mddev
->private;
3554 int devs
= max(conf
->raid_disks
, conf
->previous_raid_disks
);
3557 for (i
= 0; i
< devs
; i
++) {
3558 mdk_rdev_t
*rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
3559 if (rdev
&& !test_bit(Faulty
, &rdev
->flags
) && atomic_read(&rdev
->nr_pending
)) {
3560 struct request_queue
*r_queue
= bdev_get_queue(rdev
->bdev
);
3562 atomic_inc(&rdev
->nr_pending
);
3565 blk_unplug(r_queue
);
3567 rdev_dec_pending(rdev
, mddev
);
3574 static void raid5_unplug_device(struct request_queue
*q
)
3576 mddev_t
*mddev
= q
->queuedata
;
3577 raid5_conf_t
*conf
= mddev
->private;
3578 unsigned long flags
;
3580 spin_lock_irqsave(&conf
->device_lock
, flags
);
3582 if (blk_remove_plug(q
)) {
3584 raid5_activate_delayed(conf
);
3586 md_wakeup_thread(mddev
->thread
);
3588 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
3590 unplug_slaves(mddev
);
3593 static int raid5_congested(void *data
, int bits
)
3595 mddev_t
*mddev
= data
;
3596 raid5_conf_t
*conf
= mddev
->private;
3598 /* No difference between reads and writes. Just check
3599 * how busy the stripe_cache is
3602 if (mddev_congested(mddev
, bits
))
3604 if (conf
->inactive_blocked
)
3608 if (list_empty_careful(&conf
->inactive_list
))
3614 /* We want read requests to align with chunks where possible,
3615 * but write requests don't need to.
3617 static int raid5_mergeable_bvec(struct request_queue
*q
,
3618 struct bvec_merge_data
*bvm
,
3619 struct bio_vec
*biovec
)
3621 mddev_t
*mddev
= q
->queuedata
;
3622 sector_t sector
= bvm
->bi_sector
+ get_start_sect(bvm
->bi_bdev
);
3624 unsigned int chunk_sectors
= mddev
->chunk_sectors
;
3625 unsigned int bio_sectors
= bvm
->bi_size
>> 9;
3627 if ((bvm
->bi_rw
& 1) == WRITE
)
3628 return biovec
->bv_len
; /* always allow writes to be mergeable */
3630 if (mddev
->new_chunk_sectors
< mddev
->chunk_sectors
)
3631 chunk_sectors
= mddev
->new_chunk_sectors
;
3632 max
= (chunk_sectors
- ((sector
& (chunk_sectors
- 1)) + bio_sectors
)) << 9;
3633 if (max
< 0) max
= 0;
3634 if (max
<= biovec
->bv_len
&& bio_sectors
== 0)
3635 return biovec
->bv_len
;
3641 static int in_chunk_boundary(mddev_t
*mddev
, struct bio
*bio
)
3643 sector_t sector
= bio
->bi_sector
+ get_start_sect(bio
->bi_bdev
);
3644 unsigned int chunk_sectors
= mddev
->chunk_sectors
;
3645 unsigned int bio_sectors
= bio
->bi_size
>> 9;
3647 if (mddev
->new_chunk_sectors
< mddev
->chunk_sectors
)
3648 chunk_sectors
= mddev
->new_chunk_sectors
;
3649 return chunk_sectors
>=
3650 ((sector
& (chunk_sectors
- 1)) + bio_sectors
);
3654 * add bio to the retry LIFO ( in O(1) ... we are in interrupt )
3655 * later sampled by raid5d.
3657 static void add_bio_to_retry(struct bio
*bi
,raid5_conf_t
*conf
)
3659 unsigned long flags
;
3661 spin_lock_irqsave(&conf
->device_lock
, flags
);
3663 bi
->bi_next
= conf
->retry_read_aligned_list
;
3664 conf
->retry_read_aligned_list
= bi
;
3666 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
3667 md_wakeup_thread(conf
->mddev
->thread
);
3671 static struct bio
*remove_bio_from_retry(raid5_conf_t
*conf
)
3675 bi
= conf
->retry_read_aligned
;
3677 conf
->retry_read_aligned
= NULL
;
3680 bi
= conf
->retry_read_aligned_list
;
3682 conf
->retry_read_aligned_list
= bi
->bi_next
;
3685 * this sets the active strip count to 1 and the processed
3686 * strip count to zero (upper 8 bits)
3688 bi
->bi_phys_segments
= 1; /* biased count of active stripes */
3696 * The "raid5_align_endio" should check if the read succeeded and if it
3697 * did, call bio_endio on the original bio (having bio_put the new bio
3699 * If the read failed..
3701 static void raid5_align_endio(struct bio
*bi
, int error
)
3703 struct bio
* raid_bi
= bi
->bi_private
;
3706 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
3711 mddev
= raid_bi
->bi_bdev
->bd_disk
->queue
->queuedata
;
3712 conf
= mddev
->private;
3713 rdev
= (void*)raid_bi
->bi_next
;
3714 raid_bi
->bi_next
= NULL
;
3716 rdev_dec_pending(rdev
, conf
->mddev
);
3718 if (!error
&& uptodate
) {
3719 bio_endio(raid_bi
, 0);
3720 if (atomic_dec_and_test(&conf
->active_aligned_reads
))
3721 wake_up(&conf
->wait_for_stripe
);
3726 pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
3728 add_bio_to_retry(raid_bi
, conf
);
3731 static int bio_fits_rdev(struct bio
*bi
)
3733 struct request_queue
*q
= bdev_get_queue(bi
->bi_bdev
);
3735 if ((bi
->bi_size
>>9) > queue_max_sectors(q
))
3737 blk_recount_segments(q
, bi
);
3738 if (bi
->bi_phys_segments
> queue_max_phys_segments(q
))
3741 if (q
->merge_bvec_fn
)
3742 /* it's too hard to apply the merge_bvec_fn at this stage,
3751 static int chunk_aligned_read(struct request_queue
*q
, struct bio
* raid_bio
)
3753 mddev_t
*mddev
= q
->queuedata
;
3754 raid5_conf_t
*conf
= mddev
->private;
3756 struct bio
* align_bi
;
3759 if (!in_chunk_boundary(mddev
, raid_bio
)) {
3760 pr_debug("chunk_aligned_read : non aligned\n");
3764 * use bio_clone to make a copy of the bio
3766 align_bi
= bio_clone(raid_bio
, GFP_NOIO
);
3770 * set bi_end_io to a new function, and set bi_private to the
3773 align_bi
->bi_end_io
= raid5_align_endio
;
3774 align_bi
->bi_private
= raid_bio
;
3778 align_bi
->bi_sector
= raid5_compute_sector(conf
, raid_bio
->bi_sector
,
3783 rdev
= rcu_dereference(conf
->disks
[dd_idx
].rdev
);
3784 if (rdev
&& test_bit(In_sync
, &rdev
->flags
)) {
3785 atomic_inc(&rdev
->nr_pending
);
3787 raid_bio
->bi_next
= (void*)rdev
;
3788 align_bi
->bi_bdev
= rdev
->bdev
;
3789 align_bi
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
3790 align_bi
->bi_sector
+= rdev
->data_offset
;
3792 if (!bio_fits_rdev(align_bi
)) {
3793 /* too big in some way */
3795 rdev_dec_pending(rdev
, mddev
);
3799 spin_lock_irq(&conf
->device_lock
);
3800 wait_event_lock_irq(conf
->wait_for_stripe
,
3802 conf
->device_lock
, /* nothing */);
3803 atomic_inc(&conf
->active_aligned_reads
);
3804 spin_unlock_irq(&conf
->device_lock
);
3806 generic_make_request(align_bi
);
3815 /* __get_priority_stripe - get the next stripe to process
3817 * Full stripe writes are allowed to pass preread active stripes up until
3818 * the bypass_threshold is exceeded. In general the bypass_count
3819 * increments when the handle_list is handled before the hold_list; however, it
3820 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
3821 * stripe with in flight i/o. The bypass_count will be reset when the
3822 * head of the hold_list has changed, i.e. the head was promoted to the
3825 static struct stripe_head
*__get_priority_stripe(raid5_conf_t
*conf
)
3827 struct stripe_head
*sh
;
3829 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
3831 list_empty(&conf
->handle_list
) ? "empty" : "busy",
3832 list_empty(&conf
->hold_list
) ? "empty" : "busy",
3833 atomic_read(&conf
->pending_full_writes
), conf
->bypass_count
);
3835 if (!list_empty(&conf
->handle_list
)) {
3836 sh
= list_entry(conf
->handle_list
.next
, typeof(*sh
), lru
);
3838 if (list_empty(&conf
->hold_list
))
3839 conf
->bypass_count
= 0;
3840 else if (!test_bit(STRIPE_IO_STARTED
, &sh
->state
)) {
3841 if (conf
->hold_list
.next
== conf
->last_hold
)
3842 conf
->bypass_count
++;
3844 conf
->last_hold
= conf
->hold_list
.next
;
3845 conf
->bypass_count
-= conf
->bypass_threshold
;
3846 if (conf
->bypass_count
< 0)
3847 conf
->bypass_count
= 0;
3850 } else if (!list_empty(&conf
->hold_list
) &&
3851 ((conf
->bypass_threshold
&&
3852 conf
->bypass_count
> conf
->bypass_threshold
) ||
3853 atomic_read(&conf
->pending_full_writes
) == 0)) {
3854 sh
= list_entry(conf
->hold_list
.next
,
3856 conf
->bypass_count
-= conf
->bypass_threshold
;
3857 if (conf
->bypass_count
< 0)
3858 conf
->bypass_count
= 0;
3862 list_del_init(&sh
->lru
);
3863 atomic_inc(&sh
->count
);
3864 BUG_ON(atomic_read(&sh
->count
) != 1);
3868 static int make_request(struct request_queue
*q
, struct bio
* bi
)
3870 mddev_t
*mddev
= q
->queuedata
;
3871 raid5_conf_t
*conf
= mddev
->private;
3873 sector_t new_sector
;
3874 sector_t logical_sector
, last_sector
;
3875 struct stripe_head
*sh
;
3876 const int rw
= bio_data_dir(bi
);
3879 if (unlikely(bio_rw_flagged(bi
, BIO_RW_BARRIER
))) {
3880 /* Drain all pending writes. We only really need
3881 * to ensure they have been submitted, but this is
3884 mddev
->pers
->quiesce(mddev
, 1);
3885 mddev
->pers
->quiesce(mddev
, 0);
3886 md_barrier_request(mddev
, bi
);
3890 md_write_start(mddev
, bi
);
3892 cpu
= part_stat_lock();
3893 part_stat_inc(cpu
, &mddev
->gendisk
->part0
, ios
[rw
]);
3894 part_stat_add(cpu
, &mddev
->gendisk
->part0
, sectors
[rw
],
3899 mddev
->reshape_position
== MaxSector
&&
3900 chunk_aligned_read(q
,bi
))
3903 logical_sector
= bi
->bi_sector
& ~((sector_t
)STRIPE_SECTORS
-1);
3904 last_sector
= bi
->bi_sector
+ (bi
->bi_size
>>9);
3906 bi
->bi_phys_segments
= 1; /* over-loaded to count active stripes */
3908 for (;logical_sector
< last_sector
; logical_sector
+= STRIPE_SECTORS
) {
3910 int disks
, data_disks
;
3915 disks
= conf
->raid_disks
;
3916 prepare_to_wait(&conf
->wait_for_overlap
, &w
, TASK_UNINTERRUPTIBLE
);
3917 if (unlikely(conf
->reshape_progress
!= MaxSector
)) {
3918 /* spinlock is needed as reshape_progress may be
3919 * 64bit on a 32bit platform, and so it might be
3920 * possible to see a half-updated value
3921 * Ofcourse reshape_progress could change after
3922 * the lock is dropped, so once we get a reference
3923 * to the stripe that we think it is, we will have
3926 spin_lock_irq(&conf
->device_lock
);
3927 if (mddev
->delta_disks
< 0
3928 ? logical_sector
< conf
->reshape_progress
3929 : logical_sector
>= conf
->reshape_progress
) {
3930 disks
= conf
->previous_raid_disks
;
3933 if (mddev
->delta_disks
< 0
3934 ? logical_sector
< conf
->reshape_safe
3935 : logical_sector
>= conf
->reshape_safe
) {
3936 spin_unlock_irq(&conf
->device_lock
);
3941 spin_unlock_irq(&conf
->device_lock
);
3943 data_disks
= disks
- conf
->max_degraded
;
3945 new_sector
= raid5_compute_sector(conf
, logical_sector
,
3948 pr_debug("raid5: make_request, sector %llu logical %llu\n",
3949 (unsigned long long)new_sector
,
3950 (unsigned long long)logical_sector
);
3952 sh
= get_active_stripe(conf
, new_sector
, previous
,
3953 (bi
->bi_rw
&RWA_MASK
), 0);
3955 if (unlikely(previous
)) {
3956 /* expansion might have moved on while waiting for a
3957 * stripe, so we must do the range check again.
3958 * Expansion could still move past after this
3959 * test, but as we are holding a reference to
3960 * 'sh', we know that if that happens,
3961 * STRIPE_EXPANDING will get set and the expansion
3962 * won't proceed until we finish with the stripe.
3965 spin_lock_irq(&conf
->device_lock
);
3966 if (mddev
->delta_disks
< 0
3967 ? logical_sector
>= conf
->reshape_progress
3968 : logical_sector
< conf
->reshape_progress
)
3969 /* mismatch, need to try again */
3971 spin_unlock_irq(&conf
->device_lock
);
3979 if (bio_data_dir(bi
) == WRITE
&&
3980 logical_sector
>= mddev
->suspend_lo
&&
3981 logical_sector
< mddev
->suspend_hi
) {
3983 /* As the suspend_* range is controlled by
3984 * userspace, we want an interruptible
3987 flush_signals(current
);
3988 prepare_to_wait(&conf
->wait_for_overlap
,
3989 &w
, TASK_INTERRUPTIBLE
);
3990 if (logical_sector
>= mddev
->suspend_lo
&&
3991 logical_sector
< mddev
->suspend_hi
)
3996 if (test_bit(STRIPE_EXPANDING
, &sh
->state
) ||
3997 !add_stripe_bio(sh
, bi
, dd_idx
, (bi
->bi_rw
&RW_MASK
))) {
3998 /* Stripe is busy expanding or
3999 * add failed due to overlap. Flush everything
4002 raid5_unplug_device(mddev
->queue
);
4007 finish_wait(&conf
->wait_for_overlap
, &w
);
4008 set_bit(STRIPE_HANDLE
, &sh
->state
);
4009 clear_bit(STRIPE_DELAYED
, &sh
->state
);
4010 if (mddev
->barrier
&&
4011 !test_and_set_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
4012 atomic_inc(&conf
->preread_active_stripes
);
4015 /* cannot get stripe for read-ahead, just give-up */
4016 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
4017 finish_wait(&conf
->wait_for_overlap
, &w
);
4022 spin_lock_irq(&conf
->device_lock
);
4023 remaining
= raid5_dec_bi_phys_segments(bi
);
4024 spin_unlock_irq(&conf
->device_lock
);
4025 if (remaining
== 0) {
4028 md_write_end(mddev
);
4033 if (mddev
->barrier
) {
4034 /* We need to wait for the stripes to all be handled.
4035 * So: wait for preread_active_stripes to drop to 0.
4037 wait_event(mddev
->thread
->wqueue
,
4038 atomic_read(&conf
->preread_active_stripes
) == 0);
4043 static sector_t
raid5_size(mddev_t
*mddev
, sector_t sectors
, int raid_disks
);
4045 static sector_t
reshape_request(mddev_t
*mddev
, sector_t sector_nr
, int *skipped
)
4047 /* reshaping is quite different to recovery/resync so it is
4048 * handled quite separately ... here.
4050 * On each call to sync_request, we gather one chunk worth of
4051 * destination stripes and flag them as expanding.
4052 * Then we find all the source stripes and request reads.
4053 * As the reads complete, handle_stripe will copy the data
4054 * into the destination stripe and release that stripe.
4056 raid5_conf_t
*conf
= (raid5_conf_t
*) mddev
->private;
4057 struct stripe_head
*sh
;
4058 sector_t first_sector
, last_sector
;
4059 int raid_disks
= conf
->previous_raid_disks
;
4060 int data_disks
= raid_disks
- conf
->max_degraded
;
4061 int new_data_disks
= conf
->raid_disks
- conf
->max_degraded
;
4064 sector_t writepos
, readpos
, safepos
;
4065 sector_t stripe_addr
;
4066 int reshape_sectors
;
4067 struct list_head stripes
;
4069 if (sector_nr
== 0) {
4070 /* If restarting in the middle, skip the initial sectors */
4071 if (mddev
->delta_disks
< 0 &&
4072 conf
->reshape_progress
< raid5_size(mddev
, 0, 0)) {
4073 sector_nr
= raid5_size(mddev
, 0, 0)
4074 - conf
->reshape_progress
;
4075 } else if (mddev
->delta_disks
>= 0 &&
4076 conf
->reshape_progress
> 0)
4077 sector_nr
= conf
->reshape_progress
;
4078 sector_div(sector_nr
, new_data_disks
);
4080 mddev
->curr_resync_completed
= sector_nr
;
4081 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
4087 /* We need to process a full chunk at a time.
4088 * If old and new chunk sizes differ, we need to process the
4091 if (mddev
->new_chunk_sectors
> mddev
->chunk_sectors
)
4092 reshape_sectors
= mddev
->new_chunk_sectors
;
4094 reshape_sectors
= mddev
->chunk_sectors
;
4096 /* we update the metadata when there is more than 3Meg
4097 * in the block range (that is rather arbitrary, should
4098 * probably be time based) or when the data about to be
4099 * copied would over-write the source of the data at
4100 * the front of the range.
4101 * i.e. one new_stripe along from reshape_progress new_maps
4102 * to after where reshape_safe old_maps to
4104 writepos
= conf
->reshape_progress
;
4105 sector_div(writepos
, new_data_disks
);
4106 readpos
= conf
->reshape_progress
;
4107 sector_div(readpos
, data_disks
);
4108 safepos
= conf
->reshape_safe
;
4109 sector_div(safepos
, data_disks
);
4110 if (mddev
->delta_disks
< 0) {
4111 writepos
-= min_t(sector_t
, reshape_sectors
, writepos
);
4112 readpos
+= reshape_sectors
;
4113 safepos
+= reshape_sectors
;
4115 writepos
+= reshape_sectors
;
4116 readpos
-= min_t(sector_t
, reshape_sectors
, readpos
);
4117 safepos
-= min_t(sector_t
, reshape_sectors
, safepos
);
4120 /* 'writepos' is the most advanced device address we might write.
4121 * 'readpos' is the least advanced device address we might read.
4122 * 'safepos' is the least address recorded in the metadata as having
4124 * If 'readpos' is behind 'writepos', then there is no way that we can
4125 * ensure safety in the face of a crash - that must be done by userspace
4126 * making a backup of the data. So in that case there is no particular
4127 * rush to update metadata.
4128 * Otherwise if 'safepos' is behind 'writepos', then we really need to
4129 * update the metadata to advance 'safepos' to match 'readpos' so that
4130 * we can be safe in the event of a crash.
4131 * So we insist on updating metadata if safepos is behind writepos and
4132 * readpos is beyond writepos.
4133 * In any case, update the metadata every 10 seconds.
4134 * Maybe that number should be configurable, but I'm not sure it is
4135 * worth it.... maybe it could be a multiple of safemode_delay???
4137 if ((mddev
->delta_disks
< 0
4138 ? (safepos
> writepos
&& readpos
< writepos
)
4139 : (safepos
< writepos
&& readpos
> writepos
)) ||
4140 time_after(jiffies
, conf
->reshape_checkpoint
+ 10*HZ
)) {
4141 /* Cannot proceed until we've updated the superblock... */
4142 wait_event(conf
->wait_for_overlap
,
4143 atomic_read(&conf
->reshape_stripes
)==0);
4144 mddev
->reshape_position
= conf
->reshape_progress
;
4145 mddev
->curr_resync_completed
= mddev
->curr_resync
;
4146 conf
->reshape_checkpoint
= jiffies
;
4147 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
4148 md_wakeup_thread(mddev
->thread
);
4149 wait_event(mddev
->sb_wait
, mddev
->flags
== 0 ||
4150 kthread_should_stop());
4151 spin_lock_irq(&conf
->device_lock
);
4152 conf
->reshape_safe
= mddev
->reshape_position
;
4153 spin_unlock_irq(&conf
->device_lock
);
4154 wake_up(&conf
->wait_for_overlap
);
4155 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
4158 if (mddev
->delta_disks
< 0) {
4159 BUG_ON(conf
->reshape_progress
== 0);
4160 stripe_addr
= writepos
;
4161 BUG_ON((mddev
->dev_sectors
&
4162 ~((sector_t
)reshape_sectors
- 1))
4163 - reshape_sectors
- stripe_addr
4166 BUG_ON(writepos
!= sector_nr
+ reshape_sectors
);
4167 stripe_addr
= sector_nr
;
4169 INIT_LIST_HEAD(&stripes
);
4170 for (i
= 0; i
< reshape_sectors
; i
+= STRIPE_SECTORS
) {
4172 int skipped_disk
= 0;
4173 sh
= get_active_stripe(conf
, stripe_addr
+i
, 0, 0, 1);
4174 set_bit(STRIPE_EXPANDING
, &sh
->state
);
4175 atomic_inc(&conf
->reshape_stripes
);
4176 /* If any of this stripe is beyond the end of the old
4177 * array, then we need to zero those blocks
4179 for (j
=sh
->disks
; j
--;) {
4181 if (j
== sh
->pd_idx
)
4183 if (conf
->level
== 6 &&
4186 s
= compute_blocknr(sh
, j
, 0);
4187 if (s
< raid5_size(mddev
, 0, 0)) {
4191 memset(page_address(sh
->dev
[j
].page
), 0, STRIPE_SIZE
);
4192 set_bit(R5_Expanded
, &sh
->dev
[j
].flags
);
4193 set_bit(R5_UPTODATE
, &sh
->dev
[j
].flags
);
4195 if (!skipped_disk
) {
4196 set_bit(STRIPE_EXPAND_READY
, &sh
->state
);
4197 set_bit(STRIPE_HANDLE
, &sh
->state
);
4199 list_add(&sh
->lru
, &stripes
);
4201 spin_lock_irq(&conf
->device_lock
);
4202 if (mddev
->delta_disks
< 0)
4203 conf
->reshape_progress
-= reshape_sectors
* new_data_disks
;
4205 conf
->reshape_progress
+= reshape_sectors
* new_data_disks
;
4206 spin_unlock_irq(&conf
->device_lock
);
4207 /* Ok, those stripe are ready. We can start scheduling
4208 * reads on the source stripes.
4209 * The source stripes are determined by mapping the first and last
4210 * block on the destination stripes.
4213 raid5_compute_sector(conf
, stripe_addr
*(new_data_disks
),
4216 raid5_compute_sector(conf
, ((stripe_addr
+reshape_sectors
)
4217 * new_data_disks
- 1),
4219 if (last_sector
>= mddev
->dev_sectors
)
4220 last_sector
= mddev
->dev_sectors
- 1;
4221 while (first_sector
<= last_sector
) {
4222 sh
= get_active_stripe(conf
, first_sector
, 1, 0, 1);
4223 set_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
4224 set_bit(STRIPE_HANDLE
, &sh
->state
);
4226 first_sector
+= STRIPE_SECTORS
;
4228 /* Now that the sources are clearly marked, we can release
4229 * the destination stripes
4231 while (!list_empty(&stripes
)) {
4232 sh
= list_entry(stripes
.next
, struct stripe_head
, lru
);
4233 list_del_init(&sh
->lru
);
4236 /* If this takes us to the resync_max point where we have to pause,
4237 * then we need to write out the superblock.
4239 sector_nr
+= reshape_sectors
;
4240 if ((sector_nr
- mddev
->curr_resync_completed
) * 2
4241 >= mddev
->resync_max
- mddev
->curr_resync_completed
) {
4242 /* Cannot proceed until we've updated the superblock... */
4243 wait_event(conf
->wait_for_overlap
,
4244 atomic_read(&conf
->reshape_stripes
) == 0);
4245 mddev
->reshape_position
= conf
->reshape_progress
;
4246 mddev
->curr_resync_completed
= mddev
->curr_resync
+ reshape_sectors
;
4247 conf
->reshape_checkpoint
= jiffies
;
4248 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
4249 md_wakeup_thread(mddev
->thread
);
4250 wait_event(mddev
->sb_wait
,
4251 !test_bit(MD_CHANGE_DEVS
, &mddev
->flags
)
4252 || kthread_should_stop());
4253 spin_lock_irq(&conf
->device_lock
);
4254 conf
->reshape_safe
= mddev
->reshape_position
;
4255 spin_unlock_irq(&conf
->device_lock
);
4256 wake_up(&conf
->wait_for_overlap
);
4257 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
4259 return reshape_sectors
;
4262 /* FIXME go_faster isn't used */
4263 static inline sector_t
sync_request(mddev_t
*mddev
, sector_t sector_nr
, int *skipped
, int go_faster
)
4265 raid5_conf_t
*conf
= (raid5_conf_t
*) mddev
->private;
4266 struct stripe_head
*sh
;
4267 sector_t max_sector
= mddev
->dev_sectors
;
4269 int still_degraded
= 0;
4272 if (sector_nr
>= max_sector
) {
4273 /* just being told to finish up .. nothing much to do */
4274 unplug_slaves(mddev
);
4276 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
)) {
4281 if (mddev
->curr_resync
< max_sector
) /* aborted */
4282 bitmap_end_sync(mddev
->bitmap
, mddev
->curr_resync
,
4284 else /* completed sync */
4286 bitmap_close_sync(mddev
->bitmap
);
4291 /* Allow raid5_quiesce to complete */
4292 wait_event(conf
->wait_for_overlap
, conf
->quiesce
!= 2);
4294 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
4295 return reshape_request(mddev
, sector_nr
, skipped
);
4297 /* No need to check resync_max as we never do more than one
4298 * stripe, and as resync_max will always be on a chunk boundary,
4299 * if the check in md_do_sync didn't fire, there is no chance
4300 * of overstepping resync_max here
4303 /* if there is too many failed drives and we are trying
4304 * to resync, then assert that we are finished, because there is
4305 * nothing we can do.
4307 if (mddev
->degraded
>= conf
->max_degraded
&&
4308 test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
4309 sector_t rv
= mddev
->dev_sectors
- sector_nr
;
4313 if (!bitmap_start_sync(mddev
->bitmap
, sector_nr
, &sync_blocks
, 1) &&
4314 !test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
) &&
4315 !conf
->fullsync
&& sync_blocks
>= STRIPE_SECTORS
) {
4316 /* we can skip this block, and probably more */
4317 sync_blocks
/= STRIPE_SECTORS
;
4319 return sync_blocks
* STRIPE_SECTORS
; /* keep things rounded to whole stripes */
4323 bitmap_cond_end_sync(mddev
->bitmap
, sector_nr
);
4325 sh
= get_active_stripe(conf
, sector_nr
, 0, 1, 0);
4327 sh
= get_active_stripe(conf
, sector_nr
, 0, 0, 0);
4328 /* make sure we don't swamp the stripe cache if someone else
4329 * is trying to get access
4331 schedule_timeout_uninterruptible(1);
4333 /* Need to check if array will still be degraded after recovery/resync
4334 * We don't need to check the 'failed' flag as when that gets set,
4337 for (i
= 0; i
< conf
->raid_disks
; i
++)
4338 if (conf
->disks
[i
].rdev
== NULL
)
4341 bitmap_start_sync(mddev
->bitmap
, sector_nr
, &sync_blocks
, still_degraded
);
4343 spin_lock(&sh
->lock
);
4344 set_bit(STRIPE_SYNCING
, &sh
->state
);
4345 clear_bit(STRIPE_INSYNC
, &sh
->state
);
4346 spin_unlock(&sh
->lock
);
4351 return STRIPE_SECTORS
;
4354 static int retry_aligned_read(raid5_conf_t
*conf
, struct bio
*raid_bio
)
4356 /* We may not be able to submit a whole bio at once as there
4357 * may not be enough stripe_heads available.
4358 * We cannot pre-allocate enough stripe_heads as we may need
4359 * more than exist in the cache (if we allow ever large chunks).
4360 * So we do one stripe head at a time and record in
4361 * ->bi_hw_segments how many have been done.
4363 * We *know* that this entire raid_bio is in one chunk, so
4364 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
4366 struct stripe_head
*sh
;
4368 sector_t sector
, logical_sector
, last_sector
;
4373 logical_sector
= raid_bio
->bi_sector
& ~((sector_t
)STRIPE_SECTORS
-1);
4374 sector
= raid5_compute_sector(conf
, logical_sector
,
4376 last_sector
= raid_bio
->bi_sector
+ (raid_bio
->bi_size
>>9);
4378 for (; logical_sector
< last_sector
;
4379 logical_sector
+= STRIPE_SECTORS
,
4380 sector
+= STRIPE_SECTORS
,
4383 if (scnt
< raid5_bi_hw_segments(raid_bio
))
4384 /* already done this stripe */
4387 sh
= get_active_stripe(conf
, sector
, 0, 1, 0);
4390 /* failed to get a stripe - must wait */
4391 raid5_set_bi_hw_segments(raid_bio
, scnt
);
4392 conf
->retry_read_aligned
= raid_bio
;
4396 set_bit(R5_ReadError
, &sh
->dev
[dd_idx
].flags
);
4397 if (!add_stripe_bio(sh
, raid_bio
, dd_idx
, 0)) {
4399 raid5_set_bi_hw_segments(raid_bio
, scnt
);
4400 conf
->retry_read_aligned
= raid_bio
;
4408 spin_lock_irq(&conf
->device_lock
);
4409 remaining
= raid5_dec_bi_phys_segments(raid_bio
);
4410 spin_unlock_irq(&conf
->device_lock
);
4412 bio_endio(raid_bio
, 0);
4413 if (atomic_dec_and_test(&conf
->active_aligned_reads
))
4414 wake_up(&conf
->wait_for_stripe
);
4420 * This is our raid5 kernel thread.
4422 * We scan the hash table for stripes which can be handled now.
4423 * During the scan, completed stripes are saved for us by the interrupt
4424 * handler, so that they will not have to wait for our next wakeup.
4426 static void raid5d(mddev_t
*mddev
)
4428 struct stripe_head
*sh
;
4429 raid5_conf_t
*conf
= mddev
->private;
4432 pr_debug("+++ raid5d active\n");
4434 md_check_recovery(mddev
);
4437 spin_lock_irq(&conf
->device_lock
);
4441 if (conf
->seq_flush
!= conf
->seq_write
) {
4442 int seq
= conf
->seq_flush
;
4443 spin_unlock_irq(&conf
->device_lock
);
4444 bitmap_unplug(mddev
->bitmap
);
4445 spin_lock_irq(&conf
->device_lock
);
4446 conf
->seq_write
= seq
;
4447 activate_bit_delay(conf
);
4450 while ((bio
= remove_bio_from_retry(conf
))) {
4452 spin_unlock_irq(&conf
->device_lock
);
4453 ok
= retry_aligned_read(conf
, bio
);
4454 spin_lock_irq(&conf
->device_lock
);
4460 sh
= __get_priority_stripe(conf
);
4464 spin_unlock_irq(&conf
->device_lock
);
4471 spin_lock_irq(&conf
->device_lock
);
4473 pr_debug("%d stripes handled\n", handled
);
4475 spin_unlock_irq(&conf
->device_lock
);
4477 async_tx_issue_pending_all();
4478 unplug_slaves(mddev
);
4480 pr_debug("--- raid5d inactive\n");
4484 raid5_show_stripe_cache_size(mddev_t
*mddev
, char *page
)
4486 raid5_conf_t
*conf
= mddev
->private;
4488 return sprintf(page
, "%d\n", conf
->max_nr_stripes
);
4494 raid5_store_stripe_cache_size(mddev_t
*mddev
, const char *page
, size_t len
)
4496 raid5_conf_t
*conf
= mddev
->private;
4500 if (len
>= PAGE_SIZE
)
4505 if (strict_strtoul(page
, 10, &new))
4507 if (new <= 16 || new > 32768)
4509 while (new < conf
->max_nr_stripes
) {
4510 if (drop_one_stripe(conf
))
4511 conf
->max_nr_stripes
--;
4515 err
= md_allow_write(mddev
);
4518 while (new > conf
->max_nr_stripes
) {
4519 if (grow_one_stripe(conf
))
4520 conf
->max_nr_stripes
++;
4526 static struct md_sysfs_entry
4527 raid5_stripecache_size
= __ATTR(stripe_cache_size
, S_IRUGO
| S_IWUSR
,
4528 raid5_show_stripe_cache_size
,
4529 raid5_store_stripe_cache_size
);
4532 raid5_show_preread_threshold(mddev_t
*mddev
, char *page
)
4534 raid5_conf_t
*conf
= mddev
->private;
4536 return sprintf(page
, "%d\n", conf
->bypass_threshold
);
4542 raid5_store_preread_threshold(mddev_t
*mddev
, const char *page
, size_t len
)
4544 raid5_conf_t
*conf
= mddev
->private;
4546 if (len
>= PAGE_SIZE
)
4551 if (strict_strtoul(page
, 10, &new))
4553 if (new > conf
->max_nr_stripes
)
4555 conf
->bypass_threshold
= new;
4559 static struct md_sysfs_entry
4560 raid5_preread_bypass_threshold
= __ATTR(preread_bypass_threshold
,
4562 raid5_show_preread_threshold
,
4563 raid5_store_preread_threshold
);
4566 stripe_cache_active_show(mddev_t
*mddev
, char *page
)
4568 raid5_conf_t
*conf
= mddev
->private;
4570 return sprintf(page
, "%d\n", atomic_read(&conf
->active_stripes
));
4575 static struct md_sysfs_entry
4576 raid5_stripecache_active
= __ATTR_RO(stripe_cache_active
);
4578 static struct attribute
*raid5_attrs
[] = {
4579 &raid5_stripecache_size
.attr
,
4580 &raid5_stripecache_active
.attr
,
4581 &raid5_preread_bypass_threshold
.attr
,
4584 static struct attribute_group raid5_attrs_group
= {
4586 .attrs
= raid5_attrs
,
4590 raid5_size(mddev_t
*mddev
, sector_t sectors
, int raid_disks
)
4592 raid5_conf_t
*conf
= mddev
->private;
4595 sectors
= mddev
->dev_sectors
;
4597 /* size is defined by the smallest of previous and new size */
4598 raid_disks
= min(conf
->raid_disks
, conf
->previous_raid_disks
);
4600 sectors
&= ~((sector_t
)mddev
->chunk_sectors
- 1);
4601 sectors
&= ~((sector_t
)mddev
->new_chunk_sectors
- 1);
4602 return sectors
* (raid_disks
- conf
->max_degraded
);
4605 static void raid5_free_percpu(raid5_conf_t
*conf
)
4607 struct raid5_percpu
*percpu
;
4614 for_each_possible_cpu(cpu
) {
4615 percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
4616 safe_put_page(percpu
->spare_page
);
4617 kfree(percpu
->scribble
);
4619 #ifdef CONFIG_HOTPLUG_CPU
4620 unregister_cpu_notifier(&conf
->cpu_notify
);
4624 free_percpu(conf
->percpu
);
4627 static void free_conf(raid5_conf_t
*conf
)
4629 shrink_stripes(conf
);
4630 raid5_free_percpu(conf
);
4632 kfree(conf
->stripe_hashtbl
);
4636 #ifdef CONFIG_HOTPLUG_CPU
4637 static int raid456_cpu_notify(struct notifier_block
*nfb
, unsigned long action
,
4640 raid5_conf_t
*conf
= container_of(nfb
, raid5_conf_t
, cpu_notify
);
4641 long cpu
= (long)hcpu
;
4642 struct raid5_percpu
*percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
4645 case CPU_UP_PREPARE
:
4646 case CPU_UP_PREPARE_FROZEN
:
4647 if (conf
->level
== 6 && !percpu
->spare_page
)
4648 percpu
->spare_page
= alloc_page(GFP_KERNEL
);
4649 if (!percpu
->scribble
)
4650 percpu
->scribble
= kmalloc(conf
->scribble_len
, GFP_KERNEL
);
4652 if (!percpu
->scribble
||
4653 (conf
->level
== 6 && !percpu
->spare_page
)) {
4654 safe_put_page(percpu
->spare_page
);
4655 kfree(percpu
->scribble
);
4656 pr_err("%s: failed memory allocation for cpu%ld\n",
4662 case CPU_DEAD_FROZEN
:
4663 safe_put_page(percpu
->spare_page
);
4664 kfree(percpu
->scribble
);
4665 percpu
->spare_page
= NULL
;
4666 percpu
->scribble
= NULL
;
4675 static int raid5_alloc_percpu(raid5_conf_t
*conf
)
4678 struct page
*spare_page
;
4679 struct raid5_percpu
*allcpus
;
4683 allcpus
= alloc_percpu(struct raid5_percpu
);
4686 conf
->percpu
= allcpus
;
4690 for_each_present_cpu(cpu
) {
4691 if (conf
->level
== 6) {
4692 spare_page
= alloc_page(GFP_KERNEL
);
4697 per_cpu_ptr(conf
->percpu
, cpu
)->spare_page
= spare_page
;
4699 scribble
= kmalloc(conf
->scribble_len
, GFP_KERNEL
);
4704 per_cpu_ptr(conf
->percpu
, cpu
)->scribble
= scribble
;
4706 #ifdef CONFIG_HOTPLUG_CPU
4707 conf
->cpu_notify
.notifier_call
= raid456_cpu_notify
;
4708 conf
->cpu_notify
.priority
= 0;
4710 err
= register_cpu_notifier(&conf
->cpu_notify
);
4717 static raid5_conf_t
*setup_conf(mddev_t
*mddev
)
4720 int raid_disk
, memory
, max_disks
;
4722 struct disk_info
*disk
;
4724 if (mddev
->new_level
!= 5
4725 && mddev
->new_level
!= 4
4726 && mddev
->new_level
!= 6) {
4727 printk(KERN_ERR
"raid5: %s: raid level not set to 4/5/6 (%d)\n",
4728 mdname(mddev
), mddev
->new_level
);
4729 return ERR_PTR(-EIO
);
4731 if ((mddev
->new_level
== 5
4732 && !algorithm_valid_raid5(mddev
->new_layout
)) ||
4733 (mddev
->new_level
== 6
4734 && !algorithm_valid_raid6(mddev
->new_layout
))) {
4735 printk(KERN_ERR
"raid5: %s: layout %d not supported\n",
4736 mdname(mddev
), mddev
->new_layout
);
4737 return ERR_PTR(-EIO
);
4739 if (mddev
->new_level
== 6 && mddev
->raid_disks
< 4) {
4740 printk(KERN_ERR
"raid6: not enough configured devices for %s (%d, minimum 4)\n",
4741 mdname(mddev
), mddev
->raid_disks
);
4742 return ERR_PTR(-EINVAL
);
4745 if (!mddev
->new_chunk_sectors
||
4746 (mddev
->new_chunk_sectors
<< 9) % PAGE_SIZE
||
4747 !is_power_of_2(mddev
->new_chunk_sectors
)) {
4748 printk(KERN_ERR
"raid5: invalid chunk size %d for %s\n",
4749 mddev
->new_chunk_sectors
<< 9, mdname(mddev
));
4750 return ERR_PTR(-EINVAL
);
4753 conf
= kzalloc(sizeof(raid5_conf_t
), GFP_KERNEL
);
4756 spin_lock_init(&conf
->device_lock
);
4757 init_waitqueue_head(&conf
->wait_for_stripe
);
4758 init_waitqueue_head(&conf
->wait_for_overlap
);
4759 INIT_LIST_HEAD(&conf
->handle_list
);
4760 INIT_LIST_HEAD(&conf
->hold_list
);
4761 INIT_LIST_HEAD(&conf
->delayed_list
);
4762 INIT_LIST_HEAD(&conf
->bitmap_list
);
4763 INIT_LIST_HEAD(&conf
->inactive_list
);
4764 atomic_set(&conf
->active_stripes
, 0);
4765 atomic_set(&conf
->preread_active_stripes
, 0);
4766 atomic_set(&conf
->active_aligned_reads
, 0);
4767 conf
->bypass_threshold
= BYPASS_THRESHOLD
;
4769 conf
->raid_disks
= mddev
->raid_disks
;
4770 if (mddev
->reshape_position
== MaxSector
)
4771 conf
->previous_raid_disks
= mddev
->raid_disks
;
4773 conf
->previous_raid_disks
= mddev
->raid_disks
- mddev
->delta_disks
;
4774 max_disks
= max(conf
->raid_disks
, conf
->previous_raid_disks
);
4775 conf
->scribble_len
= scribble_len(max_disks
);
4777 conf
->disks
= kzalloc(max_disks
* sizeof(struct disk_info
),
4782 conf
->mddev
= mddev
;
4784 if ((conf
->stripe_hashtbl
= kzalloc(PAGE_SIZE
, GFP_KERNEL
)) == NULL
)
4787 conf
->level
= mddev
->new_level
;
4788 if (raid5_alloc_percpu(conf
) != 0)
4791 pr_debug("raid5: run(%s) called.\n", mdname(mddev
));
4793 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
4794 raid_disk
= rdev
->raid_disk
;
4795 if (raid_disk
>= max_disks
4798 disk
= conf
->disks
+ raid_disk
;
4802 if (test_bit(In_sync
, &rdev
->flags
)) {
4803 char b
[BDEVNAME_SIZE
];
4804 printk(KERN_INFO
"raid5: device %s operational as raid"
4805 " disk %d\n", bdevname(rdev
->bdev
,b
),
4808 /* Cannot rely on bitmap to complete recovery */
4812 conf
->chunk_sectors
= mddev
->new_chunk_sectors
;
4813 conf
->level
= mddev
->new_level
;
4814 if (conf
->level
== 6)
4815 conf
->max_degraded
= 2;
4817 conf
->max_degraded
= 1;
4818 conf
->algorithm
= mddev
->new_layout
;
4819 conf
->max_nr_stripes
= NR_STRIPES
;
4820 conf
->reshape_progress
= mddev
->reshape_position
;
4821 if (conf
->reshape_progress
!= MaxSector
) {
4822 conf
->prev_chunk_sectors
= mddev
->chunk_sectors
;
4823 conf
->prev_algo
= mddev
->layout
;
4826 memory
= conf
->max_nr_stripes
* (sizeof(struct stripe_head
) +
4827 max_disks
* ((sizeof(struct bio
) + PAGE_SIZE
))) / 1024;
4828 if (grow_stripes(conf
, conf
->max_nr_stripes
)) {
4830 "raid5: couldn't allocate %dkB for buffers\n", memory
);
4833 printk(KERN_INFO
"raid5: allocated %dkB for %s\n",
4834 memory
, mdname(mddev
));
4836 conf
->thread
= md_register_thread(raid5d
, mddev
, NULL
);
4837 if (!conf
->thread
) {
4839 "raid5: couldn't allocate thread for %s\n",
4849 return ERR_PTR(-EIO
);
4851 return ERR_PTR(-ENOMEM
);
4855 static int only_parity(int raid_disk
, int algo
, int raid_disks
, int max_degraded
)
4858 case ALGORITHM_PARITY_0
:
4859 if (raid_disk
< max_degraded
)
4862 case ALGORITHM_PARITY_N
:
4863 if (raid_disk
>= raid_disks
- max_degraded
)
4866 case ALGORITHM_PARITY_0_6
:
4867 if (raid_disk
== 0 ||
4868 raid_disk
== raid_disks
- 1)
4871 case ALGORITHM_LEFT_ASYMMETRIC_6
:
4872 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
4873 case ALGORITHM_LEFT_SYMMETRIC_6
:
4874 case ALGORITHM_RIGHT_SYMMETRIC_6
:
4875 if (raid_disk
== raid_disks
- 1)
4881 static int run(mddev_t
*mddev
)
4884 int working_disks
= 0, chunk_size
;
4885 int dirty_parity_disks
= 0;
4887 sector_t reshape_offset
= 0;
4889 if (mddev
->recovery_cp
!= MaxSector
)
4890 printk(KERN_NOTICE
"raid5: %s is not clean"
4891 " -- starting background reconstruction\n",
4893 if (mddev
->reshape_position
!= MaxSector
) {
4894 /* Check that we can continue the reshape.
4895 * Currently only disks can change, it must
4896 * increase, and we must be past the point where
4897 * a stripe over-writes itself
4899 sector_t here_new
, here_old
;
4901 int max_degraded
= (mddev
->level
== 6 ? 2 : 1);
4903 if (mddev
->new_level
!= mddev
->level
) {
4904 printk(KERN_ERR
"raid5: %s: unsupported reshape "
4905 "required - aborting.\n",
4909 old_disks
= mddev
->raid_disks
- mddev
->delta_disks
;
4910 /* reshape_position must be on a new-stripe boundary, and one
4911 * further up in new geometry must map after here in old
4914 here_new
= mddev
->reshape_position
;
4915 if (sector_div(here_new
, mddev
->new_chunk_sectors
*
4916 (mddev
->raid_disks
- max_degraded
))) {
4917 printk(KERN_ERR
"raid5: reshape_position not "
4918 "on a stripe boundary\n");
4921 reshape_offset
= here_new
* mddev
->new_chunk_sectors
;
4922 /* here_new is the stripe we will write to */
4923 here_old
= mddev
->reshape_position
;
4924 sector_div(here_old
, mddev
->chunk_sectors
*
4925 (old_disks
-max_degraded
));
4926 /* here_old is the first stripe that we might need to read
4928 if (mddev
->delta_disks
== 0) {
4929 /* We cannot be sure it is safe to start an in-place
4930 * reshape. It is only safe if user-space if monitoring
4931 * and taking constant backups.
4932 * mdadm always starts a situation like this in
4933 * readonly mode so it can take control before
4934 * allowing any writes. So just check for that.
4936 if ((here_new
* mddev
->new_chunk_sectors
!=
4937 here_old
* mddev
->chunk_sectors
) ||
4939 printk(KERN_ERR
"raid5: in-place reshape must be started"
4940 " in read-only mode - aborting\n");
4943 } else if (mddev
->delta_disks
< 0
4944 ? (here_new
* mddev
->new_chunk_sectors
<=
4945 here_old
* mddev
->chunk_sectors
)
4946 : (here_new
* mddev
->new_chunk_sectors
>=
4947 here_old
* mddev
->chunk_sectors
)) {
4948 /* Reading from the same stripe as writing to - bad */
4949 printk(KERN_ERR
"raid5: reshape_position too early for "
4950 "auto-recovery - aborting.\n");
4953 printk(KERN_INFO
"raid5: reshape will continue\n");
4954 /* OK, we should be able to continue; */
4956 BUG_ON(mddev
->level
!= mddev
->new_level
);
4957 BUG_ON(mddev
->layout
!= mddev
->new_layout
);
4958 BUG_ON(mddev
->chunk_sectors
!= mddev
->new_chunk_sectors
);
4959 BUG_ON(mddev
->delta_disks
!= 0);
4962 if (mddev
->private == NULL
)
4963 conf
= setup_conf(mddev
);
4965 conf
= mddev
->private;
4968 return PTR_ERR(conf
);
4970 mddev
->thread
= conf
->thread
;
4971 conf
->thread
= NULL
;
4972 mddev
->private = conf
;
4975 * 0 for a fully functional array, 1 or 2 for a degraded array.
4977 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
4978 if (rdev
->raid_disk
< 0)
4980 if (test_bit(In_sync
, &rdev
->flags
))
4982 /* This disc is not fully in-sync. However if it
4983 * just stored parity (beyond the recovery_offset),
4984 * when we don't need to be concerned about the
4985 * array being dirty.
4986 * When reshape goes 'backwards', we never have
4987 * partially completed devices, so we only need
4988 * to worry about reshape going forwards.
4990 /* Hack because v0.91 doesn't store recovery_offset properly. */
4991 if (mddev
->major_version
== 0 &&
4992 mddev
->minor_version
> 90)
4993 rdev
->recovery_offset
= reshape_offset
;
4995 printk("%d: w=%d pa=%d pr=%d m=%d a=%d r=%d op1=%d op2=%d\n",
4996 rdev
->raid_disk
, working_disks
, conf
->prev_algo
,
4997 conf
->previous_raid_disks
, conf
->max_degraded
,
4998 conf
->algorithm
, conf
->raid_disks
,
4999 only_parity(rdev
->raid_disk
,
5001 conf
->previous_raid_disks
,
5002 conf
->max_degraded
),
5003 only_parity(rdev
->raid_disk
,
5006 conf
->max_degraded
));
5007 if (rdev
->recovery_offset
< reshape_offset
) {
5008 /* We need to check old and new layout */
5009 if (!only_parity(rdev
->raid_disk
,
5012 conf
->max_degraded
))
5015 if (!only_parity(rdev
->raid_disk
,
5017 conf
->previous_raid_disks
,
5018 conf
->max_degraded
))
5020 dirty_parity_disks
++;
5023 mddev
->degraded
= (max(conf
->raid_disks
, conf
->previous_raid_disks
)
5026 if (mddev
->degraded
> conf
->max_degraded
) {
5027 printk(KERN_ERR
"raid5: not enough operational devices for %s"
5028 " (%d/%d failed)\n",
5029 mdname(mddev
), mddev
->degraded
, conf
->raid_disks
);
5033 /* device size must be a multiple of chunk size */
5034 mddev
->dev_sectors
&= ~(mddev
->chunk_sectors
- 1);
5035 mddev
->resync_max_sectors
= mddev
->dev_sectors
;
5037 if (mddev
->degraded
> dirty_parity_disks
&&
5038 mddev
->recovery_cp
!= MaxSector
) {
5039 if (mddev
->ok_start_degraded
)
5041 "raid5: starting dirty degraded array: %s"
5042 "- data corruption possible.\n",
5046 "raid5: cannot start dirty degraded array for %s\n",
5052 if (mddev
->degraded
== 0)
5053 printk("raid5: raid level %d set %s active with %d out of %d"
5054 " devices, algorithm %d\n", conf
->level
, mdname(mddev
),
5055 mddev
->raid_disks
-mddev
->degraded
, mddev
->raid_disks
,
5058 printk(KERN_ALERT
"raid5: raid level %d set %s active with %d"
5059 " out of %d devices, algorithm %d\n", conf
->level
,
5060 mdname(mddev
), mddev
->raid_disks
- mddev
->degraded
,
5061 mddev
->raid_disks
, mddev
->new_layout
);
5063 print_raid5_conf(conf
);
5065 if (conf
->reshape_progress
!= MaxSector
) {
5066 printk("...ok start reshape thread\n");
5067 conf
->reshape_safe
= conf
->reshape_progress
;
5068 atomic_set(&conf
->reshape_stripes
, 0);
5069 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
5070 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
5071 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
5072 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
5073 mddev
->sync_thread
= md_register_thread(md_do_sync
, mddev
,
5077 /* read-ahead size must cover two whole stripes, which is
5078 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
5081 int data_disks
= conf
->previous_raid_disks
- conf
->max_degraded
;
5082 int stripe
= data_disks
*
5083 ((mddev
->chunk_sectors
<< 9) / PAGE_SIZE
);
5084 if (mddev
->queue
->backing_dev_info
.ra_pages
< 2 * stripe
)
5085 mddev
->queue
->backing_dev_info
.ra_pages
= 2 * stripe
;
5088 /* Ok, everything is just fine now */
5089 if (mddev
->to_remove
== &raid5_attrs_group
)
5090 mddev
->to_remove
= NULL
;
5091 else if (sysfs_create_group(&mddev
->kobj
, &raid5_attrs_group
))
5093 "raid5: failed to create sysfs attributes for %s\n",
5096 mddev
->queue
->queue_lock
= &conf
->device_lock
;
5098 mddev
->queue
->unplug_fn
= raid5_unplug_device
;
5099 mddev
->queue
->backing_dev_info
.congested_data
= mddev
;
5100 mddev
->queue
->backing_dev_info
.congested_fn
= raid5_congested
;
5102 md_set_array_sectors(mddev
, raid5_size(mddev
, 0, 0));
5104 blk_queue_merge_bvec(mddev
->queue
, raid5_mergeable_bvec
);
5105 chunk_size
= mddev
->chunk_sectors
<< 9;
5106 blk_queue_io_min(mddev
->queue
, chunk_size
);
5107 blk_queue_io_opt(mddev
->queue
, chunk_size
*
5108 (conf
->raid_disks
- conf
->max_degraded
));
5110 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
5111 disk_stack_limits(mddev
->gendisk
, rdev
->bdev
,
5112 rdev
->data_offset
<< 9);
5116 md_unregister_thread(mddev
->thread
);
5117 mddev
->thread
= NULL
;
5119 print_raid5_conf(conf
);
5122 mddev
->private = NULL
;
5123 printk(KERN_ALERT
"raid5: failed to run raid set %s\n", mdname(mddev
));
5129 static int stop(mddev_t
*mddev
)
5131 raid5_conf_t
*conf
= (raid5_conf_t
*) mddev
->private;
5133 md_unregister_thread(mddev
->thread
);
5134 mddev
->thread
= NULL
;
5135 mddev
->queue
->backing_dev_info
.congested_fn
= NULL
;
5136 blk_sync_queue(mddev
->queue
); /* the unplug fn references 'conf'*/
5138 mddev
->private = NULL
;
5139 mddev
->to_remove
= &raid5_attrs_group
;
5144 static void print_sh(struct seq_file
*seq
, struct stripe_head
*sh
)
5148 seq_printf(seq
, "sh %llu, pd_idx %d, state %ld.\n",
5149 (unsigned long long)sh
->sector
, sh
->pd_idx
, sh
->state
);
5150 seq_printf(seq
, "sh %llu, count %d.\n",
5151 (unsigned long long)sh
->sector
, atomic_read(&sh
->count
));
5152 seq_printf(seq
, "sh %llu, ", (unsigned long long)sh
->sector
);
5153 for (i
= 0; i
< sh
->disks
; i
++) {
5154 seq_printf(seq
, "(cache%d: %p %ld) ",
5155 i
, sh
->dev
[i
].page
, sh
->dev
[i
].flags
);
5157 seq_printf(seq
, "\n");
5160 static void printall(struct seq_file
*seq
, raid5_conf_t
*conf
)
5162 struct stripe_head
*sh
;
5163 struct hlist_node
*hn
;
5166 spin_lock_irq(&conf
->device_lock
);
5167 for (i
= 0; i
< NR_HASH
; i
++) {
5168 hlist_for_each_entry(sh
, hn
, &conf
->stripe_hashtbl
[i
], hash
) {
5169 if (sh
->raid_conf
!= conf
)
5174 spin_unlock_irq(&conf
->device_lock
);
5178 static void status(struct seq_file
*seq
, mddev_t
*mddev
)
5180 raid5_conf_t
*conf
= (raid5_conf_t
*) mddev
->private;
5183 seq_printf(seq
, " level %d, %dk chunk, algorithm %d", mddev
->level
,
5184 mddev
->chunk_sectors
/ 2, mddev
->layout
);
5185 seq_printf (seq
, " [%d/%d] [", conf
->raid_disks
, conf
->raid_disks
- mddev
->degraded
);
5186 for (i
= 0; i
< conf
->raid_disks
; i
++)
5187 seq_printf (seq
, "%s",
5188 conf
->disks
[i
].rdev
&&
5189 test_bit(In_sync
, &conf
->disks
[i
].rdev
->flags
) ? "U" : "_");
5190 seq_printf (seq
, "]");
5192 seq_printf (seq
, "\n");
5193 printall(seq
, conf
);
5197 static void print_raid5_conf (raid5_conf_t
*conf
)
5200 struct disk_info
*tmp
;
5202 printk("RAID5 conf printout:\n");
5204 printk("(conf==NULL)\n");
5207 printk(" --- rd:%d wd:%d\n", conf
->raid_disks
,
5208 conf
->raid_disks
- conf
->mddev
->degraded
);
5210 for (i
= 0; i
< conf
->raid_disks
; i
++) {
5211 char b
[BDEVNAME_SIZE
];
5212 tmp
= conf
->disks
+ i
;
5214 printk(" disk %d, o:%d, dev:%s\n",
5215 i
, !test_bit(Faulty
, &tmp
->rdev
->flags
),
5216 bdevname(tmp
->rdev
->bdev
,b
));
5220 static int raid5_spare_active(mddev_t
*mddev
)
5223 raid5_conf_t
*conf
= mddev
->private;
5224 struct disk_info
*tmp
;
5226 for (i
= 0; i
< conf
->raid_disks
; i
++) {
5227 tmp
= conf
->disks
+ i
;
5229 && !test_bit(Faulty
, &tmp
->rdev
->flags
)
5230 && !test_and_set_bit(In_sync
, &tmp
->rdev
->flags
)) {
5231 unsigned long flags
;
5232 spin_lock_irqsave(&conf
->device_lock
, flags
);
5234 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
5237 print_raid5_conf(conf
);
5241 static int raid5_remove_disk(mddev_t
*mddev
, int number
)
5243 raid5_conf_t
*conf
= mddev
->private;
5246 struct disk_info
*p
= conf
->disks
+ number
;
5248 print_raid5_conf(conf
);
5251 if (number
>= conf
->raid_disks
&&
5252 conf
->reshape_progress
== MaxSector
)
5253 clear_bit(In_sync
, &rdev
->flags
);
5255 if (test_bit(In_sync
, &rdev
->flags
) ||
5256 atomic_read(&rdev
->nr_pending
)) {
5260 /* Only remove non-faulty devices if recovery
5263 if (!test_bit(Faulty
, &rdev
->flags
) &&
5264 mddev
->degraded
<= conf
->max_degraded
&&
5265 number
< conf
->raid_disks
) {
5271 if (atomic_read(&rdev
->nr_pending
)) {
5272 /* lost the race, try later */
5279 print_raid5_conf(conf
);
5283 static int raid5_add_disk(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
5285 raid5_conf_t
*conf
= mddev
->private;
5288 struct disk_info
*p
;
5290 int last
= conf
->raid_disks
- 1;
5292 if (mddev
->degraded
> conf
->max_degraded
)
5293 /* no point adding a device */
5296 if (rdev
->raid_disk
>= 0)
5297 first
= last
= rdev
->raid_disk
;
5300 * find the disk ... but prefer rdev->saved_raid_disk
5303 if (rdev
->saved_raid_disk
>= 0 &&
5304 rdev
->saved_raid_disk
>= first
&&
5305 conf
->disks
[rdev
->saved_raid_disk
].rdev
== NULL
)
5306 disk
= rdev
->saved_raid_disk
;
5309 for ( ; disk
<= last
; disk
++)
5310 if ((p
=conf
->disks
+ disk
)->rdev
== NULL
) {
5311 clear_bit(In_sync
, &rdev
->flags
);
5312 rdev
->raid_disk
= disk
;
5314 if (rdev
->saved_raid_disk
!= disk
)
5316 rcu_assign_pointer(p
->rdev
, rdev
);
5319 print_raid5_conf(conf
);
5323 static int raid5_resize(mddev_t
*mddev
, sector_t sectors
)
5325 /* no resync is happening, and there is enough space
5326 * on all devices, so we can resize.
5327 * We need to make sure resync covers any new space.
5328 * If the array is shrinking we should possibly wait until
5329 * any io in the removed space completes, but it hardly seems
5332 sectors
&= ~((sector_t
)mddev
->chunk_sectors
- 1);
5333 md_set_array_sectors(mddev
, raid5_size(mddev
, sectors
,
5334 mddev
->raid_disks
));
5335 if (mddev
->array_sectors
>
5336 raid5_size(mddev
, sectors
, mddev
->raid_disks
))
5338 set_capacity(mddev
->gendisk
, mddev
->array_sectors
);
5340 revalidate_disk(mddev
->gendisk
);
5341 if (sectors
> mddev
->dev_sectors
&& mddev
->recovery_cp
== MaxSector
) {
5342 mddev
->recovery_cp
= mddev
->dev_sectors
;
5343 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
5345 mddev
->dev_sectors
= sectors
;
5346 mddev
->resync_max_sectors
= sectors
;
5350 static int check_stripe_cache(mddev_t
*mddev
)
5352 /* Can only proceed if there are plenty of stripe_heads.
5353 * We need a minimum of one full stripe,, and for sensible progress
5354 * it is best to have about 4 times that.
5355 * If we require 4 times, then the default 256 4K stripe_heads will
5356 * allow for chunk sizes up to 256K, which is probably OK.
5357 * If the chunk size is greater, user-space should request more
5358 * stripe_heads first.
5360 raid5_conf_t
*conf
= mddev
->private;
5361 if (((mddev
->chunk_sectors
<< 9) / STRIPE_SIZE
) * 4
5362 > conf
->max_nr_stripes
||
5363 ((mddev
->new_chunk_sectors
<< 9) / STRIPE_SIZE
) * 4
5364 > conf
->max_nr_stripes
) {
5365 printk(KERN_WARNING
"raid5: reshape: not enough stripes. Needed %lu\n",
5366 ((max(mddev
->chunk_sectors
, mddev
->new_chunk_sectors
) << 9)
5373 static int check_reshape(mddev_t
*mddev
)
5375 raid5_conf_t
*conf
= mddev
->private;
5377 if (mddev
->delta_disks
== 0 &&
5378 mddev
->new_layout
== mddev
->layout
&&
5379 mddev
->new_chunk_sectors
== mddev
->chunk_sectors
)
5380 return 0; /* nothing to do */
5382 /* Cannot grow a bitmap yet */
5384 if (mddev
->degraded
> conf
->max_degraded
)
5386 if (mddev
->delta_disks
< 0) {
5387 /* We might be able to shrink, but the devices must
5388 * be made bigger first.
5389 * For raid6, 4 is the minimum size.
5390 * Otherwise 2 is the minimum
5393 if (mddev
->level
== 6)
5395 if (mddev
->raid_disks
+ mddev
->delta_disks
< min
)
5399 if (!check_stripe_cache(mddev
))
5402 return resize_stripes(conf
, conf
->raid_disks
+ mddev
->delta_disks
);
5405 static int raid5_start_reshape(mddev_t
*mddev
)
5407 raid5_conf_t
*conf
= mddev
->private;
5410 int added_devices
= 0;
5411 unsigned long flags
;
5413 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
))
5416 if (!check_stripe_cache(mddev
))
5419 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
5420 if (rdev
->raid_disk
< 0 &&
5421 !test_bit(Faulty
, &rdev
->flags
))
5424 if (spares
- mddev
->degraded
< mddev
->delta_disks
- conf
->max_degraded
)
5425 /* Not enough devices even to make a degraded array
5430 /* Refuse to reduce size of the array. Any reductions in
5431 * array size must be through explicit setting of array_size
5434 if (raid5_size(mddev
, 0, conf
->raid_disks
+ mddev
->delta_disks
)
5435 < mddev
->array_sectors
) {
5436 printk(KERN_ERR
"md: %s: array size must be reduced "
5437 "before number of disks\n", mdname(mddev
));
5441 atomic_set(&conf
->reshape_stripes
, 0);
5442 spin_lock_irq(&conf
->device_lock
);
5443 conf
->previous_raid_disks
= conf
->raid_disks
;
5444 conf
->raid_disks
+= mddev
->delta_disks
;
5445 conf
->prev_chunk_sectors
= conf
->chunk_sectors
;
5446 conf
->chunk_sectors
= mddev
->new_chunk_sectors
;
5447 conf
->prev_algo
= conf
->algorithm
;
5448 conf
->algorithm
= mddev
->new_layout
;
5449 if (mddev
->delta_disks
< 0)
5450 conf
->reshape_progress
= raid5_size(mddev
, 0, 0);
5452 conf
->reshape_progress
= 0;
5453 conf
->reshape_safe
= conf
->reshape_progress
;
5455 spin_unlock_irq(&conf
->device_lock
);
5457 /* Add some new drives, as many as will fit.
5458 * We know there are enough to make the newly sized array work.
5460 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
5461 if (rdev
->raid_disk
< 0 &&
5462 !test_bit(Faulty
, &rdev
->flags
)) {
5463 if (raid5_add_disk(mddev
, rdev
) == 0) {
5465 if (rdev
->raid_disk
>= conf
->previous_raid_disks
) {
5466 set_bit(In_sync
, &rdev
->flags
);
5469 rdev
->recovery_offset
= 0;
5470 sprintf(nm
, "rd%d", rdev
->raid_disk
);
5471 if (sysfs_create_link(&mddev
->kobj
,
5474 "raid5: failed to create "
5475 " link %s for %s\n",
5481 /* When a reshape changes the number of devices, ->degraded
5482 * is measured against the large of the pre and post number of
5484 if (mddev
->delta_disks
> 0) {
5485 spin_lock_irqsave(&conf
->device_lock
, flags
);
5486 mddev
->degraded
+= (conf
->raid_disks
- conf
->previous_raid_disks
)
5488 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
5490 mddev
->raid_disks
= conf
->raid_disks
;
5491 mddev
->reshape_position
= conf
->reshape_progress
;
5492 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
5494 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
5495 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
5496 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
5497 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
5498 mddev
->sync_thread
= md_register_thread(md_do_sync
, mddev
,
5500 if (!mddev
->sync_thread
) {
5501 mddev
->recovery
= 0;
5502 spin_lock_irq(&conf
->device_lock
);
5503 mddev
->raid_disks
= conf
->raid_disks
= conf
->previous_raid_disks
;
5504 conf
->reshape_progress
= MaxSector
;
5505 spin_unlock_irq(&conf
->device_lock
);
5508 conf
->reshape_checkpoint
= jiffies
;
5509 md_wakeup_thread(mddev
->sync_thread
);
5510 md_new_event(mddev
);
5514 /* This is called from the reshape thread and should make any
5515 * changes needed in 'conf'
5517 static void end_reshape(raid5_conf_t
*conf
)
5520 if (!test_bit(MD_RECOVERY_INTR
, &conf
->mddev
->recovery
)) {
5522 spin_lock_irq(&conf
->device_lock
);
5523 conf
->previous_raid_disks
= conf
->raid_disks
;
5524 conf
->reshape_progress
= MaxSector
;
5525 spin_unlock_irq(&conf
->device_lock
);
5526 wake_up(&conf
->wait_for_overlap
);
5528 /* read-ahead size must cover two whole stripes, which is
5529 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
5532 int data_disks
= conf
->raid_disks
- conf
->max_degraded
;
5533 int stripe
= data_disks
* ((conf
->chunk_sectors
<< 9)
5535 if (conf
->mddev
->queue
->backing_dev_info
.ra_pages
< 2 * stripe
)
5536 conf
->mddev
->queue
->backing_dev_info
.ra_pages
= 2 * stripe
;
5541 /* This is called from the raid5d thread with mddev_lock held.
5542 * It makes config changes to the device.
5544 static void raid5_finish_reshape(mddev_t
*mddev
)
5546 raid5_conf_t
*conf
= mddev
->private;
5548 if (!test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
)) {
5550 if (mddev
->delta_disks
> 0) {
5551 md_set_array_sectors(mddev
, raid5_size(mddev
, 0, 0));
5552 set_capacity(mddev
->gendisk
, mddev
->array_sectors
);
5554 revalidate_disk(mddev
->gendisk
);
5557 mddev
->degraded
= conf
->raid_disks
;
5558 for (d
= 0; d
< conf
->raid_disks
; d
++)
5559 if (conf
->disks
[d
].rdev
&&
5561 &conf
->disks
[d
].rdev
->flags
))
5563 for (d
= conf
->raid_disks
;
5564 d
< conf
->raid_disks
- mddev
->delta_disks
;
5566 mdk_rdev_t
*rdev
= conf
->disks
[d
].rdev
;
5567 if (rdev
&& raid5_remove_disk(mddev
, d
) == 0) {
5569 sprintf(nm
, "rd%d", rdev
->raid_disk
);
5570 sysfs_remove_link(&mddev
->kobj
, nm
);
5571 rdev
->raid_disk
= -1;
5575 mddev
->layout
= conf
->algorithm
;
5576 mddev
->chunk_sectors
= conf
->chunk_sectors
;
5577 mddev
->reshape_position
= MaxSector
;
5578 mddev
->delta_disks
= 0;
5582 static void raid5_quiesce(mddev_t
*mddev
, int state
)
5584 raid5_conf_t
*conf
= mddev
->private;
5587 case 2: /* resume for a suspend */
5588 wake_up(&conf
->wait_for_overlap
);
5591 case 1: /* stop all writes */
5592 spin_lock_irq(&conf
->device_lock
);
5593 /* '2' tells resync/reshape to pause so that all
5594 * active stripes can drain
5597 wait_event_lock_irq(conf
->wait_for_stripe
,
5598 atomic_read(&conf
->active_stripes
) == 0 &&
5599 atomic_read(&conf
->active_aligned_reads
) == 0,
5600 conf
->device_lock
, /* nothing */);
5602 spin_unlock_irq(&conf
->device_lock
);
5603 /* allow reshape to continue */
5604 wake_up(&conf
->wait_for_overlap
);
5607 case 0: /* re-enable writes */
5608 spin_lock_irq(&conf
->device_lock
);
5610 wake_up(&conf
->wait_for_stripe
);
5611 wake_up(&conf
->wait_for_overlap
);
5612 spin_unlock_irq(&conf
->device_lock
);
5618 static void *raid5_takeover_raid1(mddev_t
*mddev
)
5622 if (mddev
->raid_disks
!= 2 ||
5623 mddev
->degraded
> 1)
5624 return ERR_PTR(-EINVAL
);
5626 /* Should check if there are write-behind devices? */
5628 chunksect
= 64*2; /* 64K by default */
5630 /* The array must be an exact multiple of chunksize */
5631 while (chunksect
&& (mddev
->array_sectors
& (chunksect
-1)))
5634 if ((chunksect
<<9) < STRIPE_SIZE
)
5635 /* array size does not allow a suitable chunk size */
5636 return ERR_PTR(-EINVAL
);
5638 mddev
->new_level
= 5;
5639 mddev
->new_layout
= ALGORITHM_LEFT_SYMMETRIC
;
5640 mddev
->new_chunk_sectors
= chunksect
;
5642 return setup_conf(mddev
);
5645 static void *raid5_takeover_raid6(mddev_t
*mddev
)
5649 switch (mddev
->layout
) {
5650 case ALGORITHM_LEFT_ASYMMETRIC_6
:
5651 new_layout
= ALGORITHM_LEFT_ASYMMETRIC
;
5653 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
5654 new_layout
= ALGORITHM_RIGHT_ASYMMETRIC
;
5656 case ALGORITHM_LEFT_SYMMETRIC_6
:
5657 new_layout
= ALGORITHM_LEFT_SYMMETRIC
;
5659 case ALGORITHM_RIGHT_SYMMETRIC_6
:
5660 new_layout
= ALGORITHM_RIGHT_SYMMETRIC
;
5662 case ALGORITHM_PARITY_0_6
:
5663 new_layout
= ALGORITHM_PARITY_0
;
5665 case ALGORITHM_PARITY_N
:
5666 new_layout
= ALGORITHM_PARITY_N
;
5669 return ERR_PTR(-EINVAL
);
5671 mddev
->new_level
= 5;
5672 mddev
->new_layout
= new_layout
;
5673 mddev
->delta_disks
= -1;
5674 mddev
->raid_disks
-= 1;
5675 return setup_conf(mddev
);
5679 static int raid5_check_reshape(mddev_t
*mddev
)
5681 /* For a 2-drive array, the layout and chunk size can be changed
5682 * immediately as not restriping is needed.
5683 * For larger arrays we record the new value - after validation
5684 * to be used by a reshape pass.
5686 raid5_conf_t
*conf
= mddev
->private;
5687 int new_chunk
= mddev
->new_chunk_sectors
;
5689 if (mddev
->new_layout
>= 0 && !algorithm_valid_raid5(mddev
->new_layout
))
5691 if (new_chunk
> 0) {
5692 if (!is_power_of_2(new_chunk
))
5694 if (new_chunk
< (PAGE_SIZE
>>9))
5696 if (mddev
->array_sectors
& (new_chunk
-1))
5697 /* not factor of array size */
5701 /* They look valid */
5703 if (mddev
->raid_disks
== 2) {
5704 /* can make the change immediately */
5705 if (mddev
->new_layout
>= 0) {
5706 conf
->algorithm
= mddev
->new_layout
;
5707 mddev
->layout
= mddev
->new_layout
;
5709 if (new_chunk
> 0) {
5710 conf
->chunk_sectors
= new_chunk
;
5711 mddev
->chunk_sectors
= new_chunk
;
5713 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
5714 md_wakeup_thread(mddev
->thread
);
5716 return check_reshape(mddev
);
5719 static int raid6_check_reshape(mddev_t
*mddev
)
5721 int new_chunk
= mddev
->new_chunk_sectors
;
5723 if (mddev
->new_layout
>= 0 && !algorithm_valid_raid6(mddev
->new_layout
))
5725 if (new_chunk
> 0) {
5726 if (!is_power_of_2(new_chunk
))
5728 if (new_chunk
< (PAGE_SIZE
>> 9))
5730 if (mddev
->array_sectors
& (new_chunk
-1))
5731 /* not factor of array size */
5735 /* They look valid */
5736 return check_reshape(mddev
);
5739 static void *raid5_takeover(mddev_t
*mddev
)
5741 /* raid5 can take over:
5742 * raid0 - if all devices are the same - make it a raid4 layout
5743 * raid1 - if there are two drives. We need to know the chunk size
5744 * raid4 - trivial - just use a raid4 layout.
5745 * raid6 - Providing it is a *_6 layout
5748 if (mddev
->level
== 1)
5749 return raid5_takeover_raid1(mddev
);
5750 if (mddev
->level
== 4) {
5751 mddev
->new_layout
= ALGORITHM_PARITY_N
;
5752 mddev
->new_level
= 5;
5753 return setup_conf(mddev
);
5755 if (mddev
->level
== 6)
5756 return raid5_takeover_raid6(mddev
);
5758 return ERR_PTR(-EINVAL
);
5762 static struct mdk_personality raid5_personality
;
5764 static void *raid6_takeover(mddev_t
*mddev
)
5766 /* Currently can only take over a raid5. We map the
5767 * personality to an equivalent raid6 personality
5768 * with the Q block at the end.
5772 if (mddev
->pers
!= &raid5_personality
)
5773 return ERR_PTR(-EINVAL
);
5774 if (mddev
->degraded
> 1)
5775 return ERR_PTR(-EINVAL
);
5776 if (mddev
->raid_disks
> 253)
5777 return ERR_PTR(-EINVAL
);
5778 if (mddev
->raid_disks
< 3)
5779 return ERR_PTR(-EINVAL
);
5781 switch (mddev
->layout
) {
5782 case ALGORITHM_LEFT_ASYMMETRIC
:
5783 new_layout
= ALGORITHM_LEFT_ASYMMETRIC_6
;
5785 case ALGORITHM_RIGHT_ASYMMETRIC
:
5786 new_layout
= ALGORITHM_RIGHT_ASYMMETRIC_6
;
5788 case ALGORITHM_LEFT_SYMMETRIC
:
5789 new_layout
= ALGORITHM_LEFT_SYMMETRIC_6
;
5791 case ALGORITHM_RIGHT_SYMMETRIC
:
5792 new_layout
= ALGORITHM_RIGHT_SYMMETRIC_6
;
5794 case ALGORITHM_PARITY_0
:
5795 new_layout
= ALGORITHM_PARITY_0_6
;
5797 case ALGORITHM_PARITY_N
:
5798 new_layout
= ALGORITHM_PARITY_N
;
5801 return ERR_PTR(-EINVAL
);
5803 mddev
->new_level
= 6;
5804 mddev
->new_layout
= new_layout
;
5805 mddev
->delta_disks
= 1;
5806 mddev
->raid_disks
+= 1;
5807 return setup_conf(mddev
);
5811 static struct mdk_personality raid6_personality
=
5815 .owner
= THIS_MODULE
,
5816 .make_request
= make_request
,
5820 .error_handler
= error
,
5821 .hot_add_disk
= raid5_add_disk
,
5822 .hot_remove_disk
= raid5_remove_disk
,
5823 .spare_active
= raid5_spare_active
,
5824 .sync_request
= sync_request
,
5825 .resize
= raid5_resize
,
5827 .check_reshape
= raid6_check_reshape
,
5828 .start_reshape
= raid5_start_reshape
,
5829 .finish_reshape
= raid5_finish_reshape
,
5830 .quiesce
= raid5_quiesce
,
5831 .takeover
= raid6_takeover
,
5833 static struct mdk_personality raid5_personality
=
5837 .owner
= THIS_MODULE
,
5838 .make_request
= make_request
,
5842 .error_handler
= error
,
5843 .hot_add_disk
= raid5_add_disk
,
5844 .hot_remove_disk
= raid5_remove_disk
,
5845 .spare_active
= raid5_spare_active
,
5846 .sync_request
= sync_request
,
5847 .resize
= raid5_resize
,
5849 .check_reshape
= raid5_check_reshape
,
5850 .start_reshape
= raid5_start_reshape
,
5851 .finish_reshape
= raid5_finish_reshape
,
5852 .quiesce
= raid5_quiesce
,
5853 .takeover
= raid5_takeover
,
5856 static struct mdk_personality raid4_personality
=
5860 .owner
= THIS_MODULE
,
5861 .make_request
= make_request
,
5865 .error_handler
= error
,
5866 .hot_add_disk
= raid5_add_disk
,
5867 .hot_remove_disk
= raid5_remove_disk
,
5868 .spare_active
= raid5_spare_active
,
5869 .sync_request
= sync_request
,
5870 .resize
= raid5_resize
,
5872 .check_reshape
= raid5_check_reshape
,
5873 .start_reshape
= raid5_start_reshape
,
5874 .finish_reshape
= raid5_finish_reshape
,
5875 .quiesce
= raid5_quiesce
,
5878 static int __init
raid5_init(void)
5880 register_md_personality(&raid6_personality
);
5881 register_md_personality(&raid5_personality
);
5882 register_md_personality(&raid4_personality
);
5886 static void raid5_exit(void)
5888 unregister_md_personality(&raid6_personality
);
5889 unregister_md_personality(&raid5_personality
);
5890 unregister_md_personality(&raid4_personality
);
5893 module_init(raid5_init
);
5894 module_exit(raid5_exit
);
5895 MODULE_LICENSE("GPL");
5896 MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
5897 MODULE_ALIAS("md-personality-4"); /* RAID5 */
5898 MODULE_ALIAS("md-raid5");
5899 MODULE_ALIAS("md-raid4");
5900 MODULE_ALIAS("md-level-5");
5901 MODULE_ALIAS("md-level-4");
5902 MODULE_ALIAS("md-personality-8"); /* RAID6 */
5903 MODULE_ALIAS("md-raid6");
5904 MODULE_ALIAS("md-level-6");
5906 /* This used to be two separate modules, they were: */
5907 MODULE_ALIAS("raid5");
5908 MODULE_ALIAS("raid6");