2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
5 * Copyright (C) 2002, 2003 H. Peter Anvin
7 * RAID-4/5/6 management functions.
8 * Thanks to Penguin Computing for making the RAID-6 development possible
9 * by donating a test server!
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 * The sequencing for updating the bitmap reliably is a little
25 * subtle (and I got it wrong the first time) so it deserves some
28 * We group bitmap updates into batches. Each batch has a number.
29 * We may write out several batches at once, but that isn't very important.
30 * conf->bm_write is the number of the last batch successfully written.
31 * conf->bm_flush is the number of the last batch that was closed to
33 * When we discover that we will need to write to any block in a stripe
34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
35 * the number of the batch it will be in. This is bm_flush+1.
36 * When we are ready to do a write, if that batch hasn't been written yet,
37 * we plug the array and queue the stripe for later.
38 * When an unplug happens, we increment bm_flush, thus closing the current
40 * When we notice that bm_flush > bm_write, we write out all pending updates
41 * to the bitmap, and advance bm_write to where bm_flush was.
42 * This may occasionally write a bit out twice, but is sure never to
46 #include <linux/blkdev.h>
47 #include <linux/kthread.h>
48 #include <linux/raid/pq.h>
49 #include <linux/async_tx.h>
50 #include <linux/async.h>
51 #include <linux/seq_file.h>
52 #include <linux/cpu.h>
61 #define NR_STRIPES 256
62 #define STRIPE_SIZE PAGE_SIZE
63 #define STRIPE_SHIFT (PAGE_SHIFT - 9)
64 #define STRIPE_SECTORS (STRIPE_SIZE>>9)
65 #define IO_THRESHOLD 1
66 #define BYPASS_THRESHOLD 1
67 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
68 #define HASH_MASK (NR_HASH - 1)
70 #define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
72 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
73 * order without overlap. There may be several bio's per stripe+device, and
74 * a bio could span several devices.
75 * When walking this list for a particular stripe+device, we must never proceed
76 * beyond a bio that extends past this device, as the next bio might no longer
78 * This macro is used to determine the 'next' bio in the list, given the sector
79 * of the current stripe+device
81 #define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
83 * The following can be used to debug the driver
85 #define RAID5_PARANOIA 1
86 #if RAID5_PARANOIA && defined(CONFIG_SMP)
87 # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
89 # define CHECK_DEVLOCK()
97 #define printk_rl(args...) ((void) (printk_ratelimit() && printk(args)))
100 * We maintain a biased count of active stripes in the bottom 16 bits of
101 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
103 static inline int raid5_bi_phys_segments(struct bio
*bio
)
105 return bio
->bi_phys_segments
& 0xffff;
108 static inline int raid5_bi_hw_segments(struct bio
*bio
)
110 return (bio
->bi_phys_segments
>> 16) & 0xffff;
113 static inline int raid5_dec_bi_phys_segments(struct bio
*bio
)
115 --bio
->bi_phys_segments
;
116 return raid5_bi_phys_segments(bio
);
119 static inline int raid5_dec_bi_hw_segments(struct bio
*bio
)
121 unsigned short val
= raid5_bi_hw_segments(bio
);
124 bio
->bi_phys_segments
= (val
<< 16) | raid5_bi_phys_segments(bio
);
128 static inline void raid5_set_bi_hw_segments(struct bio
*bio
, unsigned int cnt
)
130 bio
->bi_phys_segments
= raid5_bi_phys_segments(bio
) || (cnt
<< 16);
133 /* Find first data disk in a raid6 stripe */
134 static inline int raid6_d0(struct stripe_head
*sh
)
137 /* ddf always start from first device */
139 /* md starts just after Q block */
140 if (sh
->qd_idx
== sh
->disks
- 1)
143 return sh
->qd_idx
+ 1;
145 static inline int raid6_next_disk(int disk
, int raid_disks
)
148 return (disk
< raid_disks
) ? disk
: 0;
151 /* When walking through the disks in a raid5, starting at raid6_d0,
152 * We need to map each disk to a 'slot', where the data disks are slot
153 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
154 * is raid_disks-1. This help does that mapping.
156 static int raid6_idx_to_slot(int idx
, struct stripe_head
*sh
,
157 int *count
, int syndrome_disks
)
161 if (idx
== sh
->pd_idx
)
162 return syndrome_disks
;
163 if (idx
== sh
->qd_idx
)
164 return syndrome_disks
+ 1;
169 static void return_io(struct bio
*return_bi
)
171 struct bio
*bi
= return_bi
;
174 return_bi
= bi
->bi_next
;
182 static void print_raid5_conf (raid5_conf_t
*conf
);
184 static int stripe_operations_active(struct stripe_head
*sh
)
186 return sh
->check_state
|| sh
->reconstruct_state
||
187 test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
) ||
188 test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
191 static void __release_stripe(raid5_conf_t
*conf
, struct stripe_head
*sh
)
193 if (atomic_dec_and_test(&sh
->count
)) {
194 BUG_ON(!list_empty(&sh
->lru
));
195 BUG_ON(atomic_read(&conf
->active_stripes
)==0);
196 if (test_bit(STRIPE_HANDLE
, &sh
->state
)) {
197 if (test_bit(STRIPE_DELAYED
, &sh
->state
)) {
198 list_add_tail(&sh
->lru
, &conf
->delayed_list
);
199 blk_plug_device(conf
->mddev
->queue
);
200 } else if (test_bit(STRIPE_BIT_DELAY
, &sh
->state
) &&
201 sh
->bm_seq
- conf
->seq_write
> 0) {
202 list_add_tail(&sh
->lru
, &conf
->bitmap_list
);
203 blk_plug_device(conf
->mddev
->queue
);
205 clear_bit(STRIPE_BIT_DELAY
, &sh
->state
);
206 list_add_tail(&sh
->lru
, &conf
->handle_list
);
208 md_wakeup_thread(conf
->mddev
->thread
);
210 BUG_ON(stripe_operations_active(sh
));
211 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
212 atomic_dec(&conf
->preread_active_stripes
);
213 if (atomic_read(&conf
->preread_active_stripes
) < IO_THRESHOLD
)
214 md_wakeup_thread(conf
->mddev
->thread
);
216 atomic_dec(&conf
->active_stripes
);
217 if (!test_bit(STRIPE_EXPANDING
, &sh
->state
)) {
218 list_add_tail(&sh
->lru
, &conf
->inactive_list
);
219 wake_up(&conf
->wait_for_stripe
);
220 if (conf
->retry_read_aligned
)
221 md_wakeup_thread(conf
->mddev
->thread
);
227 static void release_stripe(struct stripe_head
*sh
)
229 raid5_conf_t
*conf
= sh
->raid_conf
;
232 spin_lock_irqsave(&conf
->device_lock
, flags
);
233 __release_stripe(conf
, sh
);
234 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
237 static inline void remove_hash(struct stripe_head
*sh
)
239 pr_debug("remove_hash(), stripe %llu\n",
240 (unsigned long long)sh
->sector
);
242 hlist_del_init(&sh
->hash
);
245 static inline void insert_hash(raid5_conf_t
*conf
, struct stripe_head
*sh
)
247 struct hlist_head
*hp
= stripe_hash(conf
, sh
->sector
);
249 pr_debug("insert_hash(), stripe %llu\n",
250 (unsigned long long)sh
->sector
);
253 hlist_add_head(&sh
->hash
, hp
);
257 /* find an idle stripe, make sure it is unhashed, and return it. */
258 static struct stripe_head
*get_free_stripe(raid5_conf_t
*conf
)
260 struct stripe_head
*sh
= NULL
;
261 struct list_head
*first
;
264 if (list_empty(&conf
->inactive_list
))
266 first
= conf
->inactive_list
.next
;
267 sh
= list_entry(first
, struct stripe_head
, lru
);
268 list_del_init(first
);
270 atomic_inc(&conf
->active_stripes
);
275 static void shrink_buffers(struct stripe_head
*sh
, int num
)
280 for (i
=0; i
<num
; i
++) {
284 sh
->dev
[i
].page
= NULL
;
289 static int grow_buffers(struct stripe_head
*sh
, int num
)
293 for (i
=0; i
<num
; i
++) {
296 if (!(page
= alloc_page(GFP_KERNEL
))) {
299 sh
->dev
[i
].page
= page
;
304 static void raid5_build_block(struct stripe_head
*sh
, int i
, int previous
);
305 static void stripe_set_idx(sector_t stripe
, raid5_conf_t
*conf
, int previous
,
306 struct stripe_head
*sh
);
308 static void init_stripe(struct stripe_head
*sh
, sector_t sector
, int previous
)
310 raid5_conf_t
*conf
= sh
->raid_conf
;
313 BUG_ON(atomic_read(&sh
->count
) != 0);
314 BUG_ON(test_bit(STRIPE_HANDLE
, &sh
->state
));
315 BUG_ON(stripe_operations_active(sh
));
318 pr_debug("init_stripe called, stripe %llu\n",
319 (unsigned long long)sh
->sector
);
323 sh
->generation
= conf
->generation
- previous
;
324 sh
->disks
= previous
? conf
->previous_raid_disks
: conf
->raid_disks
;
326 stripe_set_idx(sector
, conf
, previous
, sh
);
330 for (i
= sh
->disks
; i
--; ) {
331 struct r5dev
*dev
= &sh
->dev
[i
];
333 if (dev
->toread
|| dev
->read
|| dev
->towrite
|| dev
->written
||
334 test_bit(R5_LOCKED
, &dev
->flags
)) {
335 printk(KERN_ERR
"sector=%llx i=%d %p %p %p %p %d\n",
336 (unsigned long long)sh
->sector
, i
, dev
->toread
,
337 dev
->read
, dev
->towrite
, dev
->written
,
338 test_bit(R5_LOCKED
, &dev
->flags
));
342 raid5_build_block(sh
, i
, previous
);
344 insert_hash(conf
, sh
);
347 static struct stripe_head
*__find_stripe(raid5_conf_t
*conf
, sector_t sector
,
350 struct stripe_head
*sh
;
351 struct hlist_node
*hn
;
354 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector
);
355 hlist_for_each_entry(sh
, hn
, stripe_hash(conf
, sector
), hash
)
356 if (sh
->sector
== sector
&& sh
->generation
== generation
)
358 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector
);
362 static void unplug_slaves(mddev_t
*mddev
);
363 static void raid5_unplug_device(struct request_queue
*q
);
365 static struct stripe_head
*
366 get_active_stripe(raid5_conf_t
*conf
, sector_t sector
,
367 int previous
, int noblock
, int noquiesce
)
369 struct stripe_head
*sh
;
371 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector
);
373 spin_lock_irq(&conf
->device_lock
);
376 wait_event_lock_irq(conf
->wait_for_stripe
,
377 conf
->quiesce
== 0 || noquiesce
,
378 conf
->device_lock
, /* nothing */);
379 sh
= __find_stripe(conf
, sector
, conf
->generation
- previous
);
381 if (!conf
->inactive_blocked
)
382 sh
= get_free_stripe(conf
);
383 if (noblock
&& sh
== NULL
)
386 conf
->inactive_blocked
= 1;
387 wait_event_lock_irq(conf
->wait_for_stripe
,
388 !list_empty(&conf
->inactive_list
) &&
389 (atomic_read(&conf
->active_stripes
)
390 < (conf
->max_nr_stripes
*3/4)
391 || !conf
->inactive_blocked
),
393 raid5_unplug_device(conf
->mddev
->queue
)
395 conf
->inactive_blocked
= 0;
397 init_stripe(sh
, sector
, previous
);
399 if (atomic_read(&sh
->count
)) {
400 BUG_ON(!list_empty(&sh
->lru
)
401 && !test_bit(STRIPE_EXPANDING
, &sh
->state
));
403 if (!test_bit(STRIPE_HANDLE
, &sh
->state
))
404 atomic_inc(&conf
->active_stripes
);
405 if (list_empty(&sh
->lru
) &&
406 !test_bit(STRIPE_EXPANDING
, &sh
->state
))
408 list_del_init(&sh
->lru
);
411 } while (sh
== NULL
);
414 atomic_inc(&sh
->count
);
416 spin_unlock_irq(&conf
->device_lock
);
421 raid5_end_read_request(struct bio
*bi
, int error
);
423 raid5_end_write_request(struct bio
*bi
, int error
);
425 static void ops_run_io(struct stripe_head
*sh
, struct stripe_head_state
*s
)
427 raid5_conf_t
*conf
= sh
->raid_conf
;
428 int i
, disks
= sh
->disks
;
432 for (i
= disks
; i
--; ) {
436 if (test_and_clear_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
))
438 else if (test_and_clear_bit(R5_Wantread
, &sh
->dev
[i
].flags
))
443 bi
= &sh
->dev
[i
].req
;
447 bi
->bi_end_io
= raid5_end_write_request
;
449 bi
->bi_end_io
= raid5_end_read_request
;
452 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
453 if (rdev
&& test_bit(Faulty
, &rdev
->flags
))
456 atomic_inc(&rdev
->nr_pending
);
460 if (s
->syncing
|| s
->expanding
|| s
->expanded
)
461 md_sync_acct(rdev
->bdev
, STRIPE_SECTORS
);
463 set_bit(STRIPE_IO_STARTED
, &sh
->state
);
465 bi
->bi_bdev
= rdev
->bdev
;
466 pr_debug("%s: for %llu schedule op %ld on disc %d\n",
467 __func__
, (unsigned long long)sh
->sector
,
469 atomic_inc(&sh
->count
);
470 bi
->bi_sector
= sh
->sector
+ rdev
->data_offset
;
471 bi
->bi_flags
= 1 << BIO_UPTODATE
;
475 bi
->bi_io_vec
= &sh
->dev
[i
].vec
;
476 bi
->bi_io_vec
[0].bv_len
= STRIPE_SIZE
;
477 bi
->bi_io_vec
[0].bv_offset
= 0;
478 bi
->bi_size
= STRIPE_SIZE
;
481 test_bit(R5_ReWrite
, &sh
->dev
[i
].flags
))
482 atomic_add(STRIPE_SECTORS
,
483 &rdev
->corrected_errors
);
484 generic_make_request(bi
);
487 set_bit(STRIPE_DEGRADED
, &sh
->state
);
488 pr_debug("skip op %ld on disc %d for sector %llu\n",
489 bi
->bi_rw
, i
, (unsigned long long)sh
->sector
);
490 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
491 set_bit(STRIPE_HANDLE
, &sh
->state
);
496 static struct dma_async_tx_descriptor
*
497 async_copy_data(int frombio
, struct bio
*bio
, struct page
*page
,
498 sector_t sector
, struct dma_async_tx_descriptor
*tx
)
501 struct page
*bio_page
;
504 struct async_submit_ctl submit
;
505 enum async_tx_flags flags
= 0;
507 if (bio
->bi_sector
>= sector
)
508 page_offset
= (signed)(bio
->bi_sector
- sector
) * 512;
510 page_offset
= (signed)(sector
- bio
->bi_sector
) * -512;
513 flags
|= ASYNC_TX_FENCE
;
514 init_async_submit(&submit
, flags
, tx
, NULL
, NULL
, NULL
);
516 bio_for_each_segment(bvl
, bio
, i
) {
517 int len
= bio_iovec_idx(bio
, i
)->bv_len
;
521 if (page_offset
< 0) {
522 b_offset
= -page_offset
;
523 page_offset
+= b_offset
;
527 if (len
> 0 && page_offset
+ len
> STRIPE_SIZE
)
528 clen
= STRIPE_SIZE
- page_offset
;
533 b_offset
+= bio_iovec_idx(bio
, i
)->bv_offset
;
534 bio_page
= bio_iovec_idx(bio
, i
)->bv_page
;
536 tx
= async_memcpy(page
, bio_page
, page_offset
,
537 b_offset
, clen
, &submit
);
539 tx
= async_memcpy(bio_page
, page
, b_offset
,
540 page_offset
, clen
, &submit
);
542 /* chain the operations */
543 submit
.depend_tx
= tx
;
545 if (clen
< len
) /* hit end of page */
553 static void ops_complete_biofill(void *stripe_head_ref
)
555 struct stripe_head
*sh
= stripe_head_ref
;
556 struct bio
*return_bi
= NULL
;
557 raid5_conf_t
*conf
= sh
->raid_conf
;
560 pr_debug("%s: stripe %llu\n", __func__
,
561 (unsigned long long)sh
->sector
);
563 /* clear completed biofills */
564 spin_lock_irq(&conf
->device_lock
);
565 for (i
= sh
->disks
; i
--; ) {
566 struct r5dev
*dev
= &sh
->dev
[i
];
568 /* acknowledge completion of a biofill operation */
569 /* and check if we need to reply to a read request,
570 * new R5_Wantfill requests are held off until
571 * !STRIPE_BIOFILL_RUN
573 if (test_and_clear_bit(R5_Wantfill
, &dev
->flags
)) {
574 struct bio
*rbi
, *rbi2
;
579 while (rbi
&& rbi
->bi_sector
<
580 dev
->sector
+ STRIPE_SECTORS
) {
581 rbi2
= r5_next_bio(rbi
, dev
->sector
);
582 if (!raid5_dec_bi_phys_segments(rbi
)) {
583 rbi
->bi_next
= return_bi
;
590 spin_unlock_irq(&conf
->device_lock
);
591 clear_bit(STRIPE_BIOFILL_RUN
, &sh
->state
);
593 return_io(return_bi
);
595 set_bit(STRIPE_HANDLE
, &sh
->state
);
599 static void ops_run_biofill(struct stripe_head
*sh
)
601 struct dma_async_tx_descriptor
*tx
= NULL
;
602 raid5_conf_t
*conf
= sh
->raid_conf
;
603 struct async_submit_ctl submit
;
606 pr_debug("%s: stripe %llu\n", __func__
,
607 (unsigned long long)sh
->sector
);
609 for (i
= sh
->disks
; i
--; ) {
610 struct r5dev
*dev
= &sh
->dev
[i
];
611 if (test_bit(R5_Wantfill
, &dev
->flags
)) {
613 spin_lock_irq(&conf
->device_lock
);
614 dev
->read
= rbi
= dev
->toread
;
616 spin_unlock_irq(&conf
->device_lock
);
617 while (rbi
&& rbi
->bi_sector
<
618 dev
->sector
+ STRIPE_SECTORS
) {
619 tx
= async_copy_data(0, rbi
, dev
->page
,
621 rbi
= r5_next_bio(rbi
, dev
->sector
);
626 atomic_inc(&sh
->count
);
627 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, ops_complete_biofill
, sh
, NULL
);
628 async_trigger_callback(&submit
);
631 static void mark_target_uptodate(struct stripe_head
*sh
, int target
)
638 tgt
= &sh
->dev
[target
];
639 set_bit(R5_UPTODATE
, &tgt
->flags
);
640 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
641 clear_bit(R5_Wantcompute
, &tgt
->flags
);
644 static void ops_complete_compute(void *stripe_head_ref
)
646 struct stripe_head
*sh
= stripe_head_ref
;
648 pr_debug("%s: stripe %llu\n", __func__
,
649 (unsigned long long)sh
->sector
);
651 /* mark the computed target(s) as uptodate */
652 mark_target_uptodate(sh
, sh
->ops
.target
);
653 mark_target_uptodate(sh
, sh
->ops
.target2
);
655 clear_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
656 if (sh
->check_state
== check_state_compute_run
)
657 sh
->check_state
= check_state_compute_result
;
658 set_bit(STRIPE_HANDLE
, &sh
->state
);
662 /* return a pointer to the address conversion region of the scribble buffer */
663 static addr_conv_t
*to_addr_conv(struct stripe_head
*sh
,
664 struct raid5_percpu
*percpu
)
666 return percpu
->scribble
+ sizeof(struct page
*) * (sh
->disks
+ 2);
669 static struct dma_async_tx_descriptor
*
670 ops_run_compute5(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
672 int disks
= sh
->disks
;
673 struct page
**xor_srcs
= percpu
->scribble
;
674 int target
= sh
->ops
.target
;
675 struct r5dev
*tgt
= &sh
->dev
[target
];
676 struct page
*xor_dest
= tgt
->page
;
678 struct dma_async_tx_descriptor
*tx
;
679 struct async_submit_ctl submit
;
682 pr_debug("%s: stripe %llu block: %d\n",
683 __func__
, (unsigned long long)sh
->sector
, target
);
684 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
686 for (i
= disks
; i
--; )
688 xor_srcs
[count
++] = sh
->dev
[i
].page
;
690 atomic_inc(&sh
->count
);
692 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
, NULL
,
693 ops_complete_compute
, sh
, to_addr_conv(sh
, percpu
));
694 if (unlikely(count
== 1))
695 tx
= async_memcpy(xor_dest
, xor_srcs
[0], 0, 0, STRIPE_SIZE
, &submit
);
697 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
, &submit
);
702 /* set_syndrome_sources - populate source buffers for gen_syndrome
703 * @srcs - (struct page *) array of size sh->disks
704 * @sh - stripe_head to parse
706 * Populates srcs in proper layout order for the stripe and returns the
707 * 'count' of sources to be used in a call to async_gen_syndrome. The P
708 * destination buffer is recorded in srcs[count] and the Q destination
709 * is recorded in srcs[count+1]].
711 static int set_syndrome_sources(struct page
**srcs
, struct stripe_head
*sh
)
713 int disks
= sh
->disks
;
714 int syndrome_disks
= sh
->ddf_layout
? disks
: (disks
- 2);
715 int d0_idx
= raid6_d0(sh
);
719 for (i
= 0; i
< disks
; i
++)
720 srcs
[i
] = (void *)raid6_empty_zero_page
;
725 int slot
= raid6_idx_to_slot(i
, sh
, &count
, syndrome_disks
);
727 srcs
[slot
] = sh
->dev
[i
].page
;
728 i
= raid6_next_disk(i
, disks
);
729 } while (i
!= d0_idx
);
730 BUG_ON(count
!= syndrome_disks
);
735 static struct dma_async_tx_descriptor
*
736 ops_run_compute6_1(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
738 int disks
= sh
->disks
;
739 struct page
**blocks
= percpu
->scribble
;
741 int qd_idx
= sh
->qd_idx
;
742 struct dma_async_tx_descriptor
*tx
;
743 struct async_submit_ctl submit
;
749 if (sh
->ops
.target
< 0)
750 target
= sh
->ops
.target2
;
751 else if (sh
->ops
.target2
< 0)
752 target
= sh
->ops
.target
;
754 /* we should only have one valid target */
757 pr_debug("%s: stripe %llu block: %d\n",
758 __func__
, (unsigned long long)sh
->sector
, target
);
760 tgt
= &sh
->dev
[target
];
761 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
764 atomic_inc(&sh
->count
);
766 if (target
== qd_idx
) {
767 count
= set_syndrome_sources(blocks
, sh
);
768 blocks
[count
] = NULL
; /* regenerating p is not necessary */
769 BUG_ON(blocks
[count
+1] != dest
); /* q should already be set */
770 init_async_submit(&submit
, ASYNC_TX_FENCE
, NULL
,
771 ops_complete_compute
, sh
,
772 to_addr_conv(sh
, percpu
));
773 tx
= async_gen_syndrome(blocks
, 0, count
+2, STRIPE_SIZE
, &submit
);
775 /* Compute any data- or p-drive using XOR */
777 for (i
= disks
; i
-- ; ) {
778 if (i
== target
|| i
== qd_idx
)
780 blocks
[count
++] = sh
->dev
[i
].page
;
783 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
,
784 NULL
, ops_complete_compute
, sh
,
785 to_addr_conv(sh
, percpu
));
786 tx
= async_xor(dest
, blocks
, 0, count
, STRIPE_SIZE
, &submit
);
792 static struct dma_async_tx_descriptor
*
793 ops_run_compute6_2(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
795 int i
, count
, disks
= sh
->disks
;
796 int syndrome_disks
= sh
->ddf_layout
? disks
: disks
-2;
797 int d0_idx
= raid6_d0(sh
);
798 int faila
= -1, failb
= -1;
799 int target
= sh
->ops
.target
;
800 int target2
= sh
->ops
.target2
;
801 struct r5dev
*tgt
= &sh
->dev
[target
];
802 struct r5dev
*tgt2
= &sh
->dev
[target2
];
803 struct dma_async_tx_descriptor
*tx
;
804 struct page
**blocks
= percpu
->scribble
;
805 struct async_submit_ctl submit
;
807 pr_debug("%s: stripe %llu block1: %d block2: %d\n",
808 __func__
, (unsigned long long)sh
->sector
, target
, target2
);
809 BUG_ON(target
< 0 || target2
< 0);
810 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
811 BUG_ON(!test_bit(R5_Wantcompute
, &tgt2
->flags
));
813 /* we need to open-code set_syndrome_sources to handle to the
814 * slot number conversion for 'faila' and 'failb'
816 for (i
= 0; i
< disks
; i
++)
817 blocks
[i
] = (void *)raid6_empty_zero_page
;
821 int slot
= raid6_idx_to_slot(i
, sh
, &count
, syndrome_disks
);
823 blocks
[slot
] = sh
->dev
[i
].page
;
829 i
= raid6_next_disk(i
, disks
);
830 } while (i
!= d0_idx
);
831 BUG_ON(count
!= syndrome_disks
);
833 BUG_ON(faila
== failb
);
836 pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
837 __func__
, (unsigned long long)sh
->sector
, faila
, failb
);
839 atomic_inc(&sh
->count
);
841 if (failb
== syndrome_disks
+1) {
842 /* Q disk is one of the missing disks */
843 if (faila
== syndrome_disks
) {
844 /* Missing P+Q, just recompute */
845 init_async_submit(&submit
, ASYNC_TX_FENCE
, NULL
,
846 ops_complete_compute
, sh
,
847 to_addr_conv(sh
, percpu
));
848 return async_gen_syndrome(blocks
, 0, count
+2,
849 STRIPE_SIZE
, &submit
);
853 int qd_idx
= sh
->qd_idx
;
855 /* Missing D+Q: recompute D from P, then recompute Q */
856 if (target
== qd_idx
)
857 data_target
= target2
;
859 data_target
= target
;
862 for (i
= disks
; i
-- ; ) {
863 if (i
== data_target
|| i
== qd_idx
)
865 blocks
[count
++] = sh
->dev
[i
].page
;
867 dest
= sh
->dev
[data_target
].page
;
868 init_async_submit(&submit
,
869 ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
,
871 to_addr_conv(sh
, percpu
));
872 tx
= async_xor(dest
, blocks
, 0, count
, STRIPE_SIZE
,
875 count
= set_syndrome_sources(blocks
, sh
);
876 init_async_submit(&submit
, ASYNC_TX_FENCE
, tx
,
877 ops_complete_compute
, sh
,
878 to_addr_conv(sh
, percpu
));
879 return async_gen_syndrome(blocks
, 0, count
+2,
880 STRIPE_SIZE
, &submit
);
884 init_async_submit(&submit
, ASYNC_TX_FENCE
, NULL
, ops_complete_compute
,
885 sh
, to_addr_conv(sh
, percpu
));
886 if (failb
== syndrome_disks
) {
887 /* We're missing D+P. */
888 return async_raid6_datap_recov(syndrome_disks
+2, STRIPE_SIZE
,
889 faila
, blocks
, &submit
);
891 /* We're missing D+D. */
892 return async_raid6_2data_recov(syndrome_disks
+2, STRIPE_SIZE
,
893 faila
, failb
, blocks
, &submit
);
898 static void ops_complete_prexor(void *stripe_head_ref
)
900 struct stripe_head
*sh
= stripe_head_ref
;
902 pr_debug("%s: stripe %llu\n", __func__
,
903 (unsigned long long)sh
->sector
);
906 static struct dma_async_tx_descriptor
*
907 ops_run_prexor(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
908 struct dma_async_tx_descriptor
*tx
)
910 int disks
= sh
->disks
;
911 struct page
**xor_srcs
= percpu
->scribble
;
912 int count
= 0, pd_idx
= sh
->pd_idx
, i
;
913 struct async_submit_ctl submit
;
915 /* existing parity data subtracted */
916 struct page
*xor_dest
= xor_srcs
[count
++] = sh
->dev
[pd_idx
].page
;
918 pr_debug("%s: stripe %llu\n", __func__
,
919 (unsigned long long)sh
->sector
);
921 for (i
= disks
; i
--; ) {
922 struct r5dev
*dev
= &sh
->dev
[i
];
923 /* Only process blocks that are known to be uptodate */
924 if (test_bit(R5_Wantdrain
, &dev
->flags
))
925 xor_srcs
[count
++] = dev
->page
;
928 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_DROP_DST
, tx
,
929 ops_complete_prexor
, sh
, to_addr_conv(sh
, percpu
));
930 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
, &submit
);
935 static struct dma_async_tx_descriptor
*
936 ops_run_biodrain(struct stripe_head
*sh
, struct dma_async_tx_descriptor
*tx
)
938 int disks
= sh
->disks
;
941 pr_debug("%s: stripe %llu\n", __func__
,
942 (unsigned long long)sh
->sector
);
944 for (i
= disks
; i
--; ) {
945 struct r5dev
*dev
= &sh
->dev
[i
];
948 if (test_and_clear_bit(R5_Wantdrain
, &dev
->flags
)) {
951 spin_lock(&sh
->lock
);
952 chosen
= dev
->towrite
;
954 BUG_ON(dev
->written
);
955 wbi
= dev
->written
= chosen
;
956 spin_unlock(&sh
->lock
);
958 while (wbi
&& wbi
->bi_sector
<
959 dev
->sector
+ STRIPE_SECTORS
) {
960 tx
= async_copy_data(1, wbi
, dev
->page
,
962 wbi
= r5_next_bio(wbi
, dev
->sector
);
970 static void ops_complete_reconstruct(void *stripe_head_ref
)
972 struct stripe_head
*sh
= stripe_head_ref
;
973 int disks
= sh
->disks
;
974 int pd_idx
= sh
->pd_idx
;
975 int qd_idx
= sh
->qd_idx
;
978 pr_debug("%s: stripe %llu\n", __func__
,
979 (unsigned long long)sh
->sector
);
981 for (i
= disks
; i
--; ) {
982 struct r5dev
*dev
= &sh
->dev
[i
];
984 if (dev
->written
|| i
== pd_idx
|| i
== qd_idx
)
985 set_bit(R5_UPTODATE
, &dev
->flags
);
988 if (sh
->reconstruct_state
== reconstruct_state_drain_run
)
989 sh
->reconstruct_state
= reconstruct_state_drain_result
;
990 else if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_run
)
991 sh
->reconstruct_state
= reconstruct_state_prexor_drain_result
;
993 BUG_ON(sh
->reconstruct_state
!= reconstruct_state_run
);
994 sh
->reconstruct_state
= reconstruct_state_result
;
997 set_bit(STRIPE_HANDLE
, &sh
->state
);
1002 ops_run_reconstruct5(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
1003 struct dma_async_tx_descriptor
*tx
)
1005 int disks
= sh
->disks
;
1006 struct page
**xor_srcs
= percpu
->scribble
;
1007 struct async_submit_ctl submit
;
1008 int count
= 0, pd_idx
= sh
->pd_idx
, i
;
1009 struct page
*xor_dest
;
1011 unsigned long flags
;
1013 pr_debug("%s: stripe %llu\n", __func__
,
1014 (unsigned long long)sh
->sector
);
1016 /* check if prexor is active which means only process blocks
1017 * that are part of a read-modify-write (written)
1019 if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_run
) {
1021 xor_dest
= xor_srcs
[count
++] = sh
->dev
[pd_idx
].page
;
1022 for (i
= disks
; i
--; ) {
1023 struct r5dev
*dev
= &sh
->dev
[i
];
1025 xor_srcs
[count
++] = dev
->page
;
1028 xor_dest
= sh
->dev
[pd_idx
].page
;
1029 for (i
= disks
; i
--; ) {
1030 struct r5dev
*dev
= &sh
->dev
[i
];
1032 xor_srcs
[count
++] = dev
->page
;
1036 /* 1/ if we prexor'd then the dest is reused as a source
1037 * 2/ if we did not prexor then we are redoing the parity
1038 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
1039 * for the synchronous xor case
1041 flags
= ASYNC_TX_ACK
|
1042 (prexor
? ASYNC_TX_XOR_DROP_DST
: ASYNC_TX_XOR_ZERO_DST
);
1044 atomic_inc(&sh
->count
);
1046 init_async_submit(&submit
, flags
, tx
, ops_complete_reconstruct
, sh
,
1047 to_addr_conv(sh
, percpu
));
1048 if (unlikely(count
== 1))
1049 tx
= async_memcpy(xor_dest
, xor_srcs
[0], 0, 0, STRIPE_SIZE
, &submit
);
1051 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
, &submit
);
1055 ops_run_reconstruct6(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
1056 struct dma_async_tx_descriptor
*tx
)
1058 struct async_submit_ctl submit
;
1059 struct page
**blocks
= percpu
->scribble
;
1062 pr_debug("%s: stripe %llu\n", __func__
, (unsigned long long)sh
->sector
);
1064 count
= set_syndrome_sources(blocks
, sh
);
1066 atomic_inc(&sh
->count
);
1068 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, ops_complete_reconstruct
,
1069 sh
, to_addr_conv(sh
, percpu
));
1070 async_gen_syndrome(blocks
, 0, count
+2, STRIPE_SIZE
, &submit
);
1073 static void ops_complete_check(void *stripe_head_ref
)
1075 struct stripe_head
*sh
= stripe_head_ref
;
1077 pr_debug("%s: stripe %llu\n", __func__
,
1078 (unsigned long long)sh
->sector
);
1080 sh
->check_state
= check_state_check_result
;
1081 set_bit(STRIPE_HANDLE
, &sh
->state
);
1085 static void ops_run_check_p(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
1087 int disks
= sh
->disks
;
1088 int pd_idx
= sh
->pd_idx
;
1089 int qd_idx
= sh
->qd_idx
;
1090 struct page
*xor_dest
;
1091 struct page
**xor_srcs
= percpu
->scribble
;
1092 struct dma_async_tx_descriptor
*tx
;
1093 struct async_submit_ctl submit
;
1097 pr_debug("%s: stripe %llu\n", __func__
,
1098 (unsigned long long)sh
->sector
);
1101 xor_dest
= sh
->dev
[pd_idx
].page
;
1102 xor_srcs
[count
++] = xor_dest
;
1103 for (i
= disks
; i
--; ) {
1104 if (i
== pd_idx
|| i
== qd_idx
)
1106 xor_srcs
[count
++] = sh
->dev
[i
].page
;
1109 init_async_submit(&submit
, 0, NULL
, NULL
, NULL
,
1110 to_addr_conv(sh
, percpu
));
1111 tx
= async_xor_val(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
,
1112 &sh
->ops
.zero_sum_result
, &submit
);
1114 atomic_inc(&sh
->count
);
1115 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, ops_complete_check
, sh
, NULL
);
1116 tx
= async_trigger_callback(&submit
);
1119 static void ops_run_check_pq(struct stripe_head
*sh
, struct raid5_percpu
*percpu
, int checkp
)
1121 struct page
**srcs
= percpu
->scribble
;
1122 struct async_submit_ctl submit
;
1125 pr_debug("%s: stripe %llu checkp: %d\n", __func__
,
1126 (unsigned long long)sh
->sector
, checkp
);
1128 count
= set_syndrome_sources(srcs
, sh
);
1132 atomic_inc(&sh
->count
);
1133 init_async_submit(&submit
, ASYNC_TX_ACK
, NULL
, ops_complete_check
,
1134 sh
, to_addr_conv(sh
, percpu
));
1135 async_syndrome_val(srcs
, 0, count
+2, STRIPE_SIZE
,
1136 &sh
->ops
.zero_sum_result
, percpu
->spare_page
, &submit
);
1139 static void raid_run_ops(struct stripe_head
*sh
, unsigned long ops_request
)
1141 int overlap_clear
= 0, i
, disks
= sh
->disks
;
1142 struct dma_async_tx_descriptor
*tx
= NULL
;
1143 raid5_conf_t
*conf
= sh
->raid_conf
;
1144 int level
= conf
->level
;
1145 struct raid5_percpu
*percpu
;
1149 percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
1150 if (test_bit(STRIPE_OP_BIOFILL
, &ops_request
)) {
1151 ops_run_biofill(sh
);
1155 if (test_bit(STRIPE_OP_COMPUTE_BLK
, &ops_request
)) {
1157 tx
= ops_run_compute5(sh
, percpu
);
1159 if (sh
->ops
.target2
< 0 || sh
->ops
.target
< 0)
1160 tx
= ops_run_compute6_1(sh
, percpu
);
1162 tx
= ops_run_compute6_2(sh
, percpu
);
1164 /* terminate the chain if reconstruct is not set to be run */
1165 if (tx
&& !test_bit(STRIPE_OP_RECONSTRUCT
, &ops_request
))
1169 if (test_bit(STRIPE_OP_PREXOR
, &ops_request
))
1170 tx
= ops_run_prexor(sh
, percpu
, tx
);
1172 if (test_bit(STRIPE_OP_BIODRAIN
, &ops_request
)) {
1173 tx
= ops_run_biodrain(sh
, tx
);
1177 if (test_bit(STRIPE_OP_RECONSTRUCT
, &ops_request
)) {
1179 ops_run_reconstruct5(sh
, percpu
, tx
);
1181 ops_run_reconstruct6(sh
, percpu
, tx
);
1184 if (test_bit(STRIPE_OP_CHECK
, &ops_request
)) {
1185 if (sh
->check_state
== check_state_run
)
1186 ops_run_check_p(sh
, percpu
);
1187 else if (sh
->check_state
== check_state_run_q
)
1188 ops_run_check_pq(sh
, percpu
, 0);
1189 else if (sh
->check_state
== check_state_run_pq
)
1190 ops_run_check_pq(sh
, percpu
, 1);
1196 for (i
= disks
; i
--; ) {
1197 struct r5dev
*dev
= &sh
->dev
[i
];
1198 if (test_and_clear_bit(R5_Overlap
, &dev
->flags
))
1199 wake_up(&sh
->raid_conf
->wait_for_overlap
);
1204 static int grow_one_stripe(raid5_conf_t
*conf
)
1206 struct stripe_head
*sh
;
1207 sh
= kmem_cache_alloc(conf
->slab_cache
, GFP_KERNEL
);
1210 memset(sh
, 0, sizeof(*sh
) + (conf
->raid_disks
-1)*sizeof(struct r5dev
));
1211 sh
->raid_conf
= conf
;
1212 spin_lock_init(&sh
->lock
);
1214 if (grow_buffers(sh
, conf
->raid_disks
)) {
1215 shrink_buffers(sh
, conf
->raid_disks
);
1216 kmem_cache_free(conf
->slab_cache
, sh
);
1219 sh
->disks
= conf
->raid_disks
;
1220 /* we just created an active stripe so... */
1221 atomic_set(&sh
->count
, 1);
1222 atomic_inc(&conf
->active_stripes
);
1223 INIT_LIST_HEAD(&sh
->lru
);
1228 static int grow_stripes(raid5_conf_t
*conf
, int num
)
1230 struct kmem_cache
*sc
;
1231 int devs
= conf
->raid_disks
;
1233 sprintf(conf
->cache_name
[0],
1234 "raid%d-%s", conf
->level
, mdname(conf
->mddev
));
1235 sprintf(conf
->cache_name
[1],
1236 "raid%d-%s-alt", conf
->level
, mdname(conf
->mddev
));
1237 conf
->active_name
= 0;
1238 sc
= kmem_cache_create(conf
->cache_name
[conf
->active_name
],
1239 sizeof(struct stripe_head
)+(devs
-1)*sizeof(struct r5dev
),
1243 conf
->slab_cache
= sc
;
1244 conf
->pool_size
= devs
;
1246 if (!grow_one_stripe(conf
))
1252 * scribble_len - return the required size of the scribble region
1253 * @num - total number of disks in the array
1255 * The size must be enough to contain:
1256 * 1/ a struct page pointer for each device in the array +2
1257 * 2/ room to convert each entry in (1) to its corresponding dma
1258 * (dma_map_page()) or page (page_address()) address.
1260 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
1261 * calculate over all devices (not just the data blocks), using zeros in place
1262 * of the P and Q blocks.
1264 static size_t scribble_len(int num
)
1268 len
= sizeof(struct page
*) * (num
+2) + sizeof(addr_conv_t
) * (num
+2);
1273 static int resize_stripes(raid5_conf_t
*conf
, int newsize
)
1275 /* Make all the stripes able to hold 'newsize' devices.
1276 * New slots in each stripe get 'page' set to a new page.
1278 * This happens in stages:
1279 * 1/ create a new kmem_cache and allocate the required number of
1281 * 2/ gather all the old stripe_heads and tranfer the pages across
1282 * to the new stripe_heads. This will have the side effect of
1283 * freezing the array as once all stripe_heads have been collected,
1284 * no IO will be possible. Old stripe heads are freed once their
1285 * pages have been transferred over, and the old kmem_cache is
1286 * freed when all stripes are done.
1287 * 3/ reallocate conf->disks to be suitable bigger. If this fails,
1288 * we simple return a failre status - no need to clean anything up.
1289 * 4/ allocate new pages for the new slots in the new stripe_heads.
1290 * If this fails, we don't bother trying the shrink the
1291 * stripe_heads down again, we just leave them as they are.
1292 * As each stripe_head is processed the new one is released into
1295 * Once step2 is started, we cannot afford to wait for a write,
1296 * so we use GFP_NOIO allocations.
1298 struct stripe_head
*osh
, *nsh
;
1299 LIST_HEAD(newstripes
);
1300 struct disk_info
*ndisks
;
1303 struct kmem_cache
*sc
;
1306 if (newsize
<= conf
->pool_size
)
1307 return 0; /* never bother to shrink */
1309 err
= md_allow_write(conf
->mddev
);
1314 sc
= kmem_cache_create(conf
->cache_name
[1-conf
->active_name
],
1315 sizeof(struct stripe_head
)+(newsize
-1)*sizeof(struct r5dev
),
1320 for (i
= conf
->max_nr_stripes
; i
; i
--) {
1321 nsh
= kmem_cache_alloc(sc
, GFP_KERNEL
);
1325 memset(nsh
, 0, sizeof(*nsh
) + (newsize
-1)*sizeof(struct r5dev
));
1327 nsh
->raid_conf
= conf
;
1328 spin_lock_init(&nsh
->lock
);
1330 list_add(&nsh
->lru
, &newstripes
);
1333 /* didn't get enough, give up */
1334 while (!list_empty(&newstripes
)) {
1335 nsh
= list_entry(newstripes
.next
, struct stripe_head
, lru
);
1336 list_del(&nsh
->lru
);
1337 kmem_cache_free(sc
, nsh
);
1339 kmem_cache_destroy(sc
);
1342 /* Step 2 - Must use GFP_NOIO now.
1343 * OK, we have enough stripes, start collecting inactive
1344 * stripes and copying them over
1346 list_for_each_entry(nsh
, &newstripes
, lru
) {
1347 spin_lock_irq(&conf
->device_lock
);
1348 wait_event_lock_irq(conf
->wait_for_stripe
,
1349 !list_empty(&conf
->inactive_list
),
1351 unplug_slaves(conf
->mddev
)
1353 osh
= get_free_stripe(conf
);
1354 spin_unlock_irq(&conf
->device_lock
);
1355 atomic_set(&nsh
->count
, 1);
1356 for(i
=0; i
<conf
->pool_size
; i
++)
1357 nsh
->dev
[i
].page
= osh
->dev
[i
].page
;
1358 for( ; i
<newsize
; i
++)
1359 nsh
->dev
[i
].page
= NULL
;
1360 kmem_cache_free(conf
->slab_cache
, osh
);
1362 kmem_cache_destroy(conf
->slab_cache
);
1365 * At this point, we are holding all the stripes so the array
1366 * is completely stalled, so now is a good time to resize
1367 * conf->disks and the scribble region
1369 ndisks
= kzalloc(newsize
* sizeof(struct disk_info
), GFP_NOIO
);
1371 for (i
=0; i
<conf
->raid_disks
; i
++)
1372 ndisks
[i
] = conf
->disks
[i
];
1374 conf
->disks
= ndisks
;
1379 conf
->scribble_len
= scribble_len(newsize
);
1380 for_each_present_cpu(cpu
) {
1381 struct raid5_percpu
*percpu
;
1384 percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
1385 scribble
= kmalloc(conf
->scribble_len
, GFP_NOIO
);
1388 kfree(percpu
->scribble
);
1389 percpu
->scribble
= scribble
;
1397 /* Step 4, return new stripes to service */
1398 while(!list_empty(&newstripes
)) {
1399 nsh
= list_entry(newstripes
.next
, struct stripe_head
, lru
);
1400 list_del_init(&nsh
->lru
);
1402 for (i
=conf
->raid_disks
; i
< newsize
; i
++)
1403 if (nsh
->dev
[i
].page
== NULL
) {
1404 struct page
*p
= alloc_page(GFP_NOIO
);
1405 nsh
->dev
[i
].page
= p
;
1409 release_stripe(nsh
);
1411 /* critical section pass, GFP_NOIO no longer needed */
1413 conf
->slab_cache
= sc
;
1414 conf
->active_name
= 1-conf
->active_name
;
1415 conf
->pool_size
= newsize
;
1419 static int drop_one_stripe(raid5_conf_t
*conf
)
1421 struct stripe_head
*sh
;
1423 spin_lock_irq(&conf
->device_lock
);
1424 sh
= get_free_stripe(conf
);
1425 spin_unlock_irq(&conf
->device_lock
);
1428 BUG_ON(atomic_read(&sh
->count
));
1429 shrink_buffers(sh
, conf
->pool_size
);
1430 kmem_cache_free(conf
->slab_cache
, sh
);
1431 atomic_dec(&conf
->active_stripes
);
1435 static void shrink_stripes(raid5_conf_t
*conf
)
1437 while (drop_one_stripe(conf
))
1440 if (conf
->slab_cache
)
1441 kmem_cache_destroy(conf
->slab_cache
);
1442 conf
->slab_cache
= NULL
;
1445 static void raid5_end_read_request(struct bio
* bi
, int error
)
1447 struct stripe_head
*sh
= bi
->bi_private
;
1448 raid5_conf_t
*conf
= sh
->raid_conf
;
1449 int disks
= sh
->disks
, i
;
1450 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
1451 char b
[BDEVNAME_SIZE
];
1455 for (i
=0 ; i
<disks
; i
++)
1456 if (bi
== &sh
->dev
[i
].req
)
1459 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
1460 (unsigned long long)sh
->sector
, i
, atomic_read(&sh
->count
),
1468 set_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
);
1469 if (test_bit(R5_ReadError
, &sh
->dev
[i
].flags
)) {
1470 rdev
= conf
->disks
[i
].rdev
;
1471 printk_rl(KERN_INFO
"raid5:%s: read error corrected"
1472 " (%lu sectors at %llu on %s)\n",
1473 mdname(conf
->mddev
), STRIPE_SECTORS
,
1474 (unsigned long long)(sh
->sector
1475 + rdev
->data_offset
),
1476 bdevname(rdev
->bdev
, b
));
1477 clear_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
1478 clear_bit(R5_ReWrite
, &sh
->dev
[i
].flags
);
1480 if (atomic_read(&conf
->disks
[i
].rdev
->read_errors
))
1481 atomic_set(&conf
->disks
[i
].rdev
->read_errors
, 0);
1483 const char *bdn
= bdevname(conf
->disks
[i
].rdev
->bdev
, b
);
1485 rdev
= conf
->disks
[i
].rdev
;
1487 clear_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
);
1488 atomic_inc(&rdev
->read_errors
);
1489 if (conf
->mddev
->degraded
)
1490 printk_rl(KERN_WARNING
1491 "raid5:%s: read error not correctable "
1492 "(sector %llu on %s).\n",
1493 mdname(conf
->mddev
),
1494 (unsigned long long)(sh
->sector
1495 + rdev
->data_offset
),
1497 else if (test_bit(R5_ReWrite
, &sh
->dev
[i
].flags
))
1499 printk_rl(KERN_WARNING
1500 "raid5:%s: read error NOT corrected!! "
1501 "(sector %llu on %s).\n",
1502 mdname(conf
->mddev
),
1503 (unsigned long long)(sh
->sector
1504 + rdev
->data_offset
),
1506 else if (atomic_read(&rdev
->read_errors
)
1507 > conf
->max_nr_stripes
)
1509 "raid5:%s: Too many read errors, failing device %s.\n",
1510 mdname(conf
->mddev
), bdn
);
1514 set_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
1516 clear_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
1517 clear_bit(R5_ReWrite
, &sh
->dev
[i
].flags
);
1518 md_error(conf
->mddev
, rdev
);
1521 rdev_dec_pending(conf
->disks
[i
].rdev
, conf
->mddev
);
1522 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
1523 set_bit(STRIPE_HANDLE
, &sh
->state
);
1527 static void raid5_end_write_request(struct bio
*bi
, int error
)
1529 struct stripe_head
*sh
= bi
->bi_private
;
1530 raid5_conf_t
*conf
= sh
->raid_conf
;
1531 int disks
= sh
->disks
, i
;
1532 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
1534 for (i
=0 ; i
<disks
; i
++)
1535 if (bi
== &sh
->dev
[i
].req
)
1538 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
1539 (unsigned long long)sh
->sector
, i
, atomic_read(&sh
->count
),
1547 md_error(conf
->mddev
, conf
->disks
[i
].rdev
);
1549 rdev_dec_pending(conf
->disks
[i
].rdev
, conf
->mddev
);
1551 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
1552 set_bit(STRIPE_HANDLE
, &sh
->state
);
1557 static sector_t
compute_blocknr(struct stripe_head
*sh
, int i
, int previous
);
1559 static void raid5_build_block(struct stripe_head
*sh
, int i
, int previous
)
1561 struct r5dev
*dev
= &sh
->dev
[i
];
1563 bio_init(&dev
->req
);
1564 dev
->req
.bi_io_vec
= &dev
->vec
;
1566 dev
->req
.bi_max_vecs
++;
1567 dev
->vec
.bv_page
= dev
->page
;
1568 dev
->vec
.bv_len
= STRIPE_SIZE
;
1569 dev
->vec
.bv_offset
= 0;
1571 dev
->req
.bi_sector
= sh
->sector
;
1572 dev
->req
.bi_private
= sh
;
1575 dev
->sector
= compute_blocknr(sh
, i
, previous
);
1578 static void error(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1580 char b
[BDEVNAME_SIZE
];
1581 raid5_conf_t
*conf
= (raid5_conf_t
*) mddev
->private;
1582 pr_debug("raid5: error called\n");
1584 if (!test_bit(Faulty
, &rdev
->flags
)) {
1585 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
1586 if (test_and_clear_bit(In_sync
, &rdev
->flags
)) {
1587 unsigned long flags
;
1588 spin_lock_irqsave(&conf
->device_lock
, flags
);
1590 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1592 * if recovery was running, make sure it aborts.
1594 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
1596 set_bit(Faulty
, &rdev
->flags
);
1598 "raid5: Disk failure on %s, disabling device.\n"
1599 "raid5: Operation continuing on %d devices.\n",
1600 bdevname(rdev
->bdev
,b
), conf
->raid_disks
- mddev
->degraded
);
1605 * Input: a 'big' sector number,
1606 * Output: index of the data and parity disk, and the sector # in them.
1608 static sector_t
raid5_compute_sector(raid5_conf_t
*conf
, sector_t r_sector
,
1609 int previous
, int *dd_idx
,
1610 struct stripe_head
*sh
)
1613 unsigned long chunk_number
;
1614 unsigned int chunk_offset
;
1617 sector_t new_sector
;
1618 int algorithm
= previous
? conf
->prev_algo
1620 int sectors_per_chunk
= previous
? (conf
->prev_chunk
>> 9)
1621 : (conf
->chunk_size
>> 9);
1622 int raid_disks
= previous
? conf
->previous_raid_disks
1624 int data_disks
= raid_disks
- conf
->max_degraded
;
1626 /* First compute the information on this sector */
1629 * Compute the chunk number and the sector offset inside the chunk
1631 chunk_offset
= sector_div(r_sector
, sectors_per_chunk
);
1632 chunk_number
= r_sector
;
1633 BUG_ON(r_sector
!= chunk_number
);
1636 * Compute the stripe number
1638 stripe
= chunk_number
/ data_disks
;
1641 * Compute the data disk and parity disk indexes inside the stripe
1643 *dd_idx
= chunk_number
% data_disks
;
1646 * Select the parity disk based on the user selected algorithm.
1648 pd_idx
= qd_idx
= ~0;
1649 switch(conf
->level
) {
1651 pd_idx
= data_disks
;
1654 switch (algorithm
) {
1655 case ALGORITHM_LEFT_ASYMMETRIC
:
1656 pd_idx
= data_disks
- stripe
% raid_disks
;
1657 if (*dd_idx
>= pd_idx
)
1660 case ALGORITHM_RIGHT_ASYMMETRIC
:
1661 pd_idx
= stripe
% raid_disks
;
1662 if (*dd_idx
>= pd_idx
)
1665 case ALGORITHM_LEFT_SYMMETRIC
:
1666 pd_idx
= data_disks
- stripe
% raid_disks
;
1667 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
1669 case ALGORITHM_RIGHT_SYMMETRIC
:
1670 pd_idx
= stripe
% raid_disks
;
1671 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
1673 case ALGORITHM_PARITY_0
:
1677 case ALGORITHM_PARITY_N
:
1678 pd_idx
= data_disks
;
1681 printk(KERN_ERR
"raid5: unsupported algorithm %d\n",
1688 switch (algorithm
) {
1689 case ALGORITHM_LEFT_ASYMMETRIC
:
1690 pd_idx
= raid_disks
- 1 - (stripe
% raid_disks
);
1691 qd_idx
= pd_idx
+ 1;
1692 if (pd_idx
== raid_disks
-1) {
1693 (*dd_idx
)++; /* Q D D D P */
1695 } else if (*dd_idx
>= pd_idx
)
1696 (*dd_idx
) += 2; /* D D P Q D */
1698 case ALGORITHM_RIGHT_ASYMMETRIC
:
1699 pd_idx
= stripe
% raid_disks
;
1700 qd_idx
= pd_idx
+ 1;
1701 if (pd_idx
== raid_disks
-1) {
1702 (*dd_idx
)++; /* Q D D D P */
1704 } else if (*dd_idx
>= pd_idx
)
1705 (*dd_idx
) += 2; /* D D P Q D */
1707 case ALGORITHM_LEFT_SYMMETRIC
:
1708 pd_idx
= raid_disks
- 1 - (stripe
% raid_disks
);
1709 qd_idx
= (pd_idx
+ 1) % raid_disks
;
1710 *dd_idx
= (pd_idx
+ 2 + *dd_idx
) % raid_disks
;
1712 case ALGORITHM_RIGHT_SYMMETRIC
:
1713 pd_idx
= stripe
% raid_disks
;
1714 qd_idx
= (pd_idx
+ 1) % raid_disks
;
1715 *dd_idx
= (pd_idx
+ 2 + *dd_idx
) % raid_disks
;
1718 case ALGORITHM_PARITY_0
:
1723 case ALGORITHM_PARITY_N
:
1724 pd_idx
= data_disks
;
1725 qd_idx
= data_disks
+ 1;
1728 case ALGORITHM_ROTATING_ZERO_RESTART
:
1729 /* Exactly the same as RIGHT_ASYMMETRIC, but or
1730 * of blocks for computing Q is different.
1732 pd_idx
= stripe
% raid_disks
;
1733 qd_idx
= pd_idx
+ 1;
1734 if (pd_idx
== raid_disks
-1) {
1735 (*dd_idx
)++; /* Q D D D P */
1737 } else if (*dd_idx
>= pd_idx
)
1738 (*dd_idx
) += 2; /* D D P Q D */
1742 case ALGORITHM_ROTATING_N_RESTART
:
1743 /* Same a left_asymmetric, by first stripe is
1744 * D D D P Q rather than
1747 pd_idx
= raid_disks
- 1 - ((stripe
+ 1) % raid_disks
);
1748 qd_idx
= pd_idx
+ 1;
1749 if (pd_idx
== raid_disks
-1) {
1750 (*dd_idx
)++; /* Q D D D P */
1752 } else if (*dd_idx
>= pd_idx
)
1753 (*dd_idx
) += 2; /* D D P Q D */
1757 case ALGORITHM_ROTATING_N_CONTINUE
:
1758 /* Same as left_symmetric but Q is before P */
1759 pd_idx
= raid_disks
- 1 - (stripe
% raid_disks
);
1760 qd_idx
= (pd_idx
+ raid_disks
- 1) % raid_disks
;
1761 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
1765 case ALGORITHM_LEFT_ASYMMETRIC_6
:
1766 /* RAID5 left_asymmetric, with Q on last device */
1767 pd_idx
= data_disks
- stripe
% (raid_disks
-1);
1768 if (*dd_idx
>= pd_idx
)
1770 qd_idx
= raid_disks
- 1;
1773 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
1774 pd_idx
= stripe
% (raid_disks
-1);
1775 if (*dd_idx
>= pd_idx
)
1777 qd_idx
= raid_disks
- 1;
1780 case ALGORITHM_LEFT_SYMMETRIC_6
:
1781 pd_idx
= data_disks
- stripe
% (raid_disks
-1);
1782 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % (raid_disks
-1);
1783 qd_idx
= raid_disks
- 1;
1786 case ALGORITHM_RIGHT_SYMMETRIC_6
:
1787 pd_idx
= stripe
% (raid_disks
-1);
1788 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % (raid_disks
-1);
1789 qd_idx
= raid_disks
- 1;
1792 case ALGORITHM_PARITY_0_6
:
1795 qd_idx
= raid_disks
- 1;
1800 printk(KERN_CRIT
"raid6: unsupported algorithm %d\n",
1808 sh
->pd_idx
= pd_idx
;
1809 sh
->qd_idx
= qd_idx
;
1810 sh
->ddf_layout
= ddf_layout
;
1813 * Finally, compute the new sector number
1815 new_sector
= (sector_t
)stripe
* sectors_per_chunk
+ chunk_offset
;
1820 static sector_t
compute_blocknr(struct stripe_head
*sh
, int i
, int previous
)
1822 raid5_conf_t
*conf
= sh
->raid_conf
;
1823 int raid_disks
= sh
->disks
;
1824 int data_disks
= raid_disks
- conf
->max_degraded
;
1825 sector_t new_sector
= sh
->sector
, check
;
1826 int sectors_per_chunk
= previous
? (conf
->prev_chunk
>> 9)
1827 : (conf
->chunk_size
>> 9);
1828 int algorithm
= previous
? conf
->prev_algo
1832 int chunk_number
, dummy1
, dd_idx
= i
;
1834 struct stripe_head sh2
;
1837 chunk_offset
= sector_div(new_sector
, sectors_per_chunk
);
1838 stripe
= new_sector
;
1839 BUG_ON(new_sector
!= stripe
);
1841 if (i
== sh
->pd_idx
)
1843 switch(conf
->level
) {
1846 switch (algorithm
) {
1847 case ALGORITHM_LEFT_ASYMMETRIC
:
1848 case ALGORITHM_RIGHT_ASYMMETRIC
:
1852 case ALGORITHM_LEFT_SYMMETRIC
:
1853 case ALGORITHM_RIGHT_SYMMETRIC
:
1856 i
-= (sh
->pd_idx
+ 1);
1858 case ALGORITHM_PARITY_0
:
1861 case ALGORITHM_PARITY_N
:
1864 printk(KERN_ERR
"raid5: unsupported algorithm %d\n",
1870 if (i
== sh
->qd_idx
)
1871 return 0; /* It is the Q disk */
1872 switch (algorithm
) {
1873 case ALGORITHM_LEFT_ASYMMETRIC
:
1874 case ALGORITHM_RIGHT_ASYMMETRIC
:
1875 case ALGORITHM_ROTATING_ZERO_RESTART
:
1876 case ALGORITHM_ROTATING_N_RESTART
:
1877 if (sh
->pd_idx
== raid_disks
-1)
1878 i
--; /* Q D D D P */
1879 else if (i
> sh
->pd_idx
)
1880 i
-= 2; /* D D P Q D */
1882 case ALGORITHM_LEFT_SYMMETRIC
:
1883 case ALGORITHM_RIGHT_SYMMETRIC
:
1884 if (sh
->pd_idx
== raid_disks
-1)
1885 i
--; /* Q D D D P */
1890 i
-= (sh
->pd_idx
+ 2);
1893 case ALGORITHM_PARITY_0
:
1896 case ALGORITHM_PARITY_N
:
1898 case ALGORITHM_ROTATING_N_CONTINUE
:
1899 if (sh
->pd_idx
== 0)
1900 i
--; /* P D D D Q */
1901 else if (i
> sh
->pd_idx
)
1902 i
-= 2; /* D D Q P D */
1904 case ALGORITHM_LEFT_ASYMMETRIC_6
:
1905 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
1909 case ALGORITHM_LEFT_SYMMETRIC_6
:
1910 case ALGORITHM_RIGHT_SYMMETRIC_6
:
1912 i
+= data_disks
+ 1;
1913 i
-= (sh
->pd_idx
+ 1);
1915 case ALGORITHM_PARITY_0_6
:
1919 printk(KERN_CRIT
"raid6: unsupported algorithm %d\n",
1926 chunk_number
= stripe
* data_disks
+ i
;
1927 r_sector
= (sector_t
)chunk_number
* sectors_per_chunk
+ chunk_offset
;
1929 check
= raid5_compute_sector(conf
, r_sector
,
1930 previous
, &dummy1
, &sh2
);
1931 if (check
!= sh
->sector
|| dummy1
!= dd_idx
|| sh2
.pd_idx
!= sh
->pd_idx
1932 || sh2
.qd_idx
!= sh
->qd_idx
) {
1933 printk(KERN_ERR
"compute_blocknr: map not correct\n");
1941 schedule_reconstruction(struct stripe_head
*sh
, struct stripe_head_state
*s
,
1942 int rcw
, int expand
)
1944 int i
, pd_idx
= sh
->pd_idx
, disks
= sh
->disks
;
1945 raid5_conf_t
*conf
= sh
->raid_conf
;
1946 int level
= conf
->level
;
1949 /* if we are not expanding this is a proper write request, and
1950 * there will be bios with new data to be drained into the
1954 sh
->reconstruct_state
= reconstruct_state_drain_run
;
1955 set_bit(STRIPE_OP_BIODRAIN
, &s
->ops_request
);
1957 sh
->reconstruct_state
= reconstruct_state_run
;
1959 set_bit(STRIPE_OP_RECONSTRUCT
, &s
->ops_request
);
1961 for (i
= disks
; i
--; ) {
1962 struct r5dev
*dev
= &sh
->dev
[i
];
1965 set_bit(R5_LOCKED
, &dev
->flags
);
1966 set_bit(R5_Wantdrain
, &dev
->flags
);
1968 clear_bit(R5_UPTODATE
, &dev
->flags
);
1972 if (s
->locked
+ conf
->max_degraded
== disks
)
1973 if (!test_and_set_bit(STRIPE_FULL_WRITE
, &sh
->state
))
1974 atomic_inc(&conf
->pending_full_writes
);
1977 BUG_ON(!(test_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
) ||
1978 test_bit(R5_Wantcompute
, &sh
->dev
[pd_idx
].flags
)));
1980 sh
->reconstruct_state
= reconstruct_state_prexor_drain_run
;
1981 set_bit(STRIPE_OP_PREXOR
, &s
->ops_request
);
1982 set_bit(STRIPE_OP_BIODRAIN
, &s
->ops_request
);
1983 set_bit(STRIPE_OP_RECONSTRUCT
, &s
->ops_request
);
1985 for (i
= disks
; i
--; ) {
1986 struct r5dev
*dev
= &sh
->dev
[i
];
1991 (test_bit(R5_UPTODATE
, &dev
->flags
) ||
1992 test_bit(R5_Wantcompute
, &dev
->flags
))) {
1993 set_bit(R5_Wantdrain
, &dev
->flags
);
1994 set_bit(R5_LOCKED
, &dev
->flags
);
1995 clear_bit(R5_UPTODATE
, &dev
->flags
);
2001 /* keep the parity disk(s) locked while asynchronous operations
2004 set_bit(R5_LOCKED
, &sh
->dev
[pd_idx
].flags
);
2005 clear_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
);
2009 int qd_idx
= sh
->qd_idx
;
2010 struct r5dev
*dev
= &sh
->dev
[qd_idx
];
2012 set_bit(R5_LOCKED
, &dev
->flags
);
2013 clear_bit(R5_UPTODATE
, &dev
->flags
);
2017 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
2018 __func__
, (unsigned long long)sh
->sector
,
2019 s
->locked
, s
->ops_request
);
2023 * Each stripe/dev can have one or more bion attached.
2024 * toread/towrite point to the first in a chain.
2025 * The bi_next chain must be in order.
2027 static int add_stripe_bio(struct stripe_head
*sh
, struct bio
*bi
, int dd_idx
, int forwrite
)
2030 raid5_conf_t
*conf
= sh
->raid_conf
;
2033 pr_debug("adding bh b#%llu to stripe s#%llu\n",
2034 (unsigned long long)bi
->bi_sector
,
2035 (unsigned long long)sh
->sector
);
2038 spin_lock(&sh
->lock
);
2039 spin_lock_irq(&conf
->device_lock
);
2041 bip
= &sh
->dev
[dd_idx
].towrite
;
2042 if (*bip
== NULL
&& sh
->dev
[dd_idx
].written
== NULL
)
2045 bip
= &sh
->dev
[dd_idx
].toread
;
2046 while (*bip
&& (*bip
)->bi_sector
< bi
->bi_sector
) {
2047 if ((*bip
)->bi_sector
+ ((*bip
)->bi_size
>> 9) > bi
->bi_sector
)
2049 bip
= & (*bip
)->bi_next
;
2051 if (*bip
&& (*bip
)->bi_sector
< bi
->bi_sector
+ ((bi
->bi_size
)>>9))
2054 BUG_ON(*bip
&& bi
->bi_next
&& (*bip
) != bi
->bi_next
);
2058 bi
->bi_phys_segments
++;
2059 spin_unlock_irq(&conf
->device_lock
);
2060 spin_unlock(&sh
->lock
);
2062 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2063 (unsigned long long)bi
->bi_sector
,
2064 (unsigned long long)sh
->sector
, dd_idx
);
2066 if (conf
->mddev
->bitmap
&& firstwrite
) {
2067 bitmap_startwrite(conf
->mddev
->bitmap
, sh
->sector
,
2069 sh
->bm_seq
= conf
->seq_flush
+1;
2070 set_bit(STRIPE_BIT_DELAY
, &sh
->state
);
2074 /* check if page is covered */
2075 sector_t sector
= sh
->dev
[dd_idx
].sector
;
2076 for (bi
=sh
->dev
[dd_idx
].towrite
;
2077 sector
< sh
->dev
[dd_idx
].sector
+ STRIPE_SECTORS
&&
2078 bi
&& bi
->bi_sector
<= sector
;
2079 bi
= r5_next_bio(bi
, sh
->dev
[dd_idx
].sector
)) {
2080 if (bi
->bi_sector
+ (bi
->bi_size
>>9) >= sector
)
2081 sector
= bi
->bi_sector
+ (bi
->bi_size
>>9);
2083 if (sector
>= sh
->dev
[dd_idx
].sector
+ STRIPE_SECTORS
)
2084 set_bit(R5_OVERWRITE
, &sh
->dev
[dd_idx
].flags
);
2089 set_bit(R5_Overlap
, &sh
->dev
[dd_idx
].flags
);
2090 spin_unlock_irq(&conf
->device_lock
);
2091 spin_unlock(&sh
->lock
);
2095 static void end_reshape(raid5_conf_t
*conf
);
2097 static void stripe_set_idx(sector_t stripe
, raid5_conf_t
*conf
, int previous
,
2098 struct stripe_head
*sh
)
2100 int sectors_per_chunk
=
2101 previous
? (conf
->prev_chunk
>> 9)
2102 : (conf
->chunk_size
>> 9);
2104 int chunk_offset
= sector_div(stripe
, sectors_per_chunk
);
2105 int disks
= previous
? conf
->previous_raid_disks
: conf
->raid_disks
;
2107 raid5_compute_sector(conf
,
2108 stripe
* (disks
- conf
->max_degraded
)
2109 *sectors_per_chunk
+ chunk_offset
,
2115 handle_failed_stripe(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2116 struct stripe_head_state
*s
, int disks
,
2117 struct bio
**return_bi
)
2120 for (i
= disks
; i
--; ) {
2124 if (test_bit(R5_ReadError
, &sh
->dev
[i
].flags
)) {
2127 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
2128 if (rdev
&& test_bit(In_sync
, &rdev
->flags
))
2129 /* multiple read failures in one stripe */
2130 md_error(conf
->mddev
, rdev
);
2133 spin_lock_irq(&conf
->device_lock
);
2134 /* fail all writes first */
2135 bi
= sh
->dev
[i
].towrite
;
2136 sh
->dev
[i
].towrite
= NULL
;
2142 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[i
].flags
))
2143 wake_up(&conf
->wait_for_overlap
);
2145 while (bi
&& bi
->bi_sector
<
2146 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
2147 struct bio
*nextbi
= r5_next_bio(bi
, sh
->dev
[i
].sector
);
2148 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2149 if (!raid5_dec_bi_phys_segments(bi
)) {
2150 md_write_end(conf
->mddev
);
2151 bi
->bi_next
= *return_bi
;
2156 /* and fail all 'written' */
2157 bi
= sh
->dev
[i
].written
;
2158 sh
->dev
[i
].written
= NULL
;
2159 if (bi
) bitmap_end
= 1;
2160 while (bi
&& bi
->bi_sector
<
2161 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
2162 struct bio
*bi2
= r5_next_bio(bi
, sh
->dev
[i
].sector
);
2163 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2164 if (!raid5_dec_bi_phys_segments(bi
)) {
2165 md_write_end(conf
->mddev
);
2166 bi
->bi_next
= *return_bi
;
2172 /* fail any reads if this device is non-operational and
2173 * the data has not reached the cache yet.
2175 if (!test_bit(R5_Wantfill
, &sh
->dev
[i
].flags
) &&
2176 (!test_bit(R5_Insync
, &sh
->dev
[i
].flags
) ||
2177 test_bit(R5_ReadError
, &sh
->dev
[i
].flags
))) {
2178 bi
= sh
->dev
[i
].toread
;
2179 sh
->dev
[i
].toread
= NULL
;
2180 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[i
].flags
))
2181 wake_up(&conf
->wait_for_overlap
);
2182 if (bi
) s
->to_read
--;
2183 while (bi
&& bi
->bi_sector
<
2184 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
2185 struct bio
*nextbi
=
2186 r5_next_bio(bi
, sh
->dev
[i
].sector
);
2187 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2188 if (!raid5_dec_bi_phys_segments(bi
)) {
2189 bi
->bi_next
= *return_bi
;
2195 spin_unlock_irq(&conf
->device_lock
);
2197 bitmap_endwrite(conf
->mddev
->bitmap
, sh
->sector
,
2198 STRIPE_SECTORS
, 0, 0);
2201 if (test_and_clear_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2202 if (atomic_dec_and_test(&conf
->pending_full_writes
))
2203 md_wakeup_thread(conf
->mddev
->thread
);
2206 /* fetch_block5 - checks the given member device to see if its data needs
2207 * to be read or computed to satisfy a request.
2209 * Returns 1 when no more member devices need to be checked, otherwise returns
2210 * 0 to tell the loop in handle_stripe_fill5 to continue
2212 static int fetch_block5(struct stripe_head
*sh
, struct stripe_head_state
*s
,
2213 int disk_idx
, int disks
)
2215 struct r5dev
*dev
= &sh
->dev
[disk_idx
];
2216 struct r5dev
*failed_dev
= &sh
->dev
[s
->failed_num
];
2218 /* is the data in this block needed, and can we get it? */
2219 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
2220 !test_bit(R5_UPTODATE
, &dev
->flags
) &&
2222 (dev
->towrite
&& !test_bit(R5_OVERWRITE
, &dev
->flags
)) ||
2223 s
->syncing
|| s
->expanding
||
2225 (failed_dev
->toread
||
2226 (failed_dev
->towrite
&&
2227 !test_bit(R5_OVERWRITE
, &failed_dev
->flags
)))))) {
2228 /* We would like to get this block, possibly by computing it,
2229 * otherwise read it if the backing disk is insync
2231 if ((s
->uptodate
== disks
- 1) &&
2232 (s
->failed
&& disk_idx
== s
->failed_num
)) {
2233 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2234 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2235 set_bit(R5_Wantcompute
, &dev
->flags
);
2236 sh
->ops
.target
= disk_idx
;
2237 sh
->ops
.target2
= -1;
2239 /* Careful: from this point on 'uptodate' is in the eye
2240 * of raid_run_ops which services 'compute' operations
2241 * before writes. R5_Wantcompute flags a block that will
2242 * be R5_UPTODATE by the time it is needed for a
2243 * subsequent operation.
2246 return 1; /* uptodate + compute == disks */
2247 } else if (test_bit(R5_Insync
, &dev
->flags
)) {
2248 set_bit(R5_LOCKED
, &dev
->flags
);
2249 set_bit(R5_Wantread
, &dev
->flags
);
2251 pr_debug("Reading block %d (sync=%d)\n", disk_idx
,
2260 * handle_stripe_fill5 - read or compute data to satisfy pending requests.
2262 static void handle_stripe_fill5(struct stripe_head
*sh
,
2263 struct stripe_head_state
*s
, int disks
)
2267 /* look for blocks to read/compute, skip this if a compute
2268 * is already in flight, or if the stripe contents are in the
2269 * midst of changing due to a write
2271 if (!test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) && !sh
->check_state
&&
2272 !sh
->reconstruct_state
)
2273 for (i
= disks
; i
--; )
2274 if (fetch_block5(sh
, s
, i
, disks
))
2276 set_bit(STRIPE_HANDLE
, &sh
->state
);
2279 /* fetch_block6 - checks the given member device to see if its data needs
2280 * to be read or computed to satisfy a request.
2282 * Returns 1 when no more member devices need to be checked, otherwise returns
2283 * 0 to tell the loop in handle_stripe_fill6 to continue
2285 static int fetch_block6(struct stripe_head
*sh
, struct stripe_head_state
*s
,
2286 struct r6_state
*r6s
, int disk_idx
, int disks
)
2288 struct r5dev
*dev
= &sh
->dev
[disk_idx
];
2289 struct r5dev
*fdev
[2] = { &sh
->dev
[r6s
->failed_num
[0]],
2290 &sh
->dev
[r6s
->failed_num
[1]] };
2292 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
2293 !test_bit(R5_UPTODATE
, &dev
->flags
) &&
2295 (dev
->towrite
&& !test_bit(R5_OVERWRITE
, &dev
->flags
)) ||
2296 s
->syncing
|| s
->expanding
||
2298 (fdev
[0]->toread
|| s
->to_write
)) ||
2300 (fdev
[1]->toread
|| s
->to_write
)))) {
2301 /* we would like to get this block, possibly by computing it,
2302 * otherwise read it if the backing disk is insync
2304 BUG_ON(test_bit(R5_Wantcompute
, &dev
->flags
));
2305 BUG_ON(test_bit(R5_Wantread
, &dev
->flags
));
2306 if ((s
->uptodate
== disks
- 1) &&
2307 (s
->failed
&& (disk_idx
== r6s
->failed_num
[0] ||
2308 disk_idx
== r6s
->failed_num
[1]))) {
2309 /* have disk failed, and we're requested to fetch it;
2312 pr_debug("Computing stripe %llu block %d\n",
2313 (unsigned long long)sh
->sector
, disk_idx
);
2314 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2315 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2316 set_bit(R5_Wantcompute
, &dev
->flags
);
2317 sh
->ops
.target
= disk_idx
;
2318 sh
->ops
.target2
= -1; /* no 2nd target */
2322 } else if (s
->uptodate
== disks
-2 && s
->failed
>= 2) {
2323 /* Computing 2-failure is *very* expensive; only
2324 * do it if failed >= 2
2327 for (other
= disks
; other
--; ) {
2328 if (other
== disk_idx
)
2330 if (!test_bit(R5_UPTODATE
,
2331 &sh
->dev
[other
].flags
))
2335 pr_debug("Computing stripe %llu blocks %d,%d\n",
2336 (unsigned long long)sh
->sector
,
2338 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2339 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2340 set_bit(R5_Wantcompute
, &sh
->dev
[disk_idx
].flags
);
2341 set_bit(R5_Wantcompute
, &sh
->dev
[other
].flags
);
2342 sh
->ops
.target
= disk_idx
;
2343 sh
->ops
.target2
= other
;
2347 } else if (test_bit(R5_Insync
, &dev
->flags
)) {
2348 set_bit(R5_LOCKED
, &dev
->flags
);
2349 set_bit(R5_Wantread
, &dev
->flags
);
2351 pr_debug("Reading block %d (sync=%d)\n",
2352 disk_idx
, s
->syncing
);
2360 * handle_stripe_fill6 - read or compute data to satisfy pending requests.
2362 static void handle_stripe_fill6(struct stripe_head
*sh
,
2363 struct stripe_head_state
*s
, struct r6_state
*r6s
,
2368 /* look for blocks to read/compute, skip this if a compute
2369 * is already in flight, or if the stripe contents are in the
2370 * midst of changing due to a write
2372 if (!test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) && !sh
->check_state
&&
2373 !sh
->reconstruct_state
)
2374 for (i
= disks
; i
--; )
2375 if (fetch_block6(sh
, s
, r6s
, i
, disks
))
2377 set_bit(STRIPE_HANDLE
, &sh
->state
);
2381 /* handle_stripe_clean_event
2382 * any written block on an uptodate or failed drive can be returned.
2383 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
2384 * never LOCKED, so we don't need to test 'failed' directly.
2386 static void handle_stripe_clean_event(raid5_conf_t
*conf
,
2387 struct stripe_head
*sh
, int disks
, struct bio
**return_bi
)
2392 for (i
= disks
; i
--; )
2393 if (sh
->dev
[i
].written
) {
2395 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
2396 test_bit(R5_UPTODATE
, &dev
->flags
)) {
2397 /* We can return any write requests */
2398 struct bio
*wbi
, *wbi2
;
2400 pr_debug("Return write for disc %d\n", i
);
2401 spin_lock_irq(&conf
->device_lock
);
2403 dev
->written
= NULL
;
2404 while (wbi
&& wbi
->bi_sector
<
2405 dev
->sector
+ STRIPE_SECTORS
) {
2406 wbi2
= r5_next_bio(wbi
, dev
->sector
);
2407 if (!raid5_dec_bi_phys_segments(wbi
)) {
2408 md_write_end(conf
->mddev
);
2409 wbi
->bi_next
= *return_bi
;
2414 if (dev
->towrite
== NULL
)
2416 spin_unlock_irq(&conf
->device_lock
);
2418 bitmap_endwrite(conf
->mddev
->bitmap
,
2421 !test_bit(STRIPE_DEGRADED
, &sh
->state
),
2426 if (test_and_clear_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2427 if (atomic_dec_and_test(&conf
->pending_full_writes
))
2428 md_wakeup_thread(conf
->mddev
->thread
);
2431 static void handle_stripe_dirtying5(raid5_conf_t
*conf
,
2432 struct stripe_head
*sh
, struct stripe_head_state
*s
, int disks
)
2434 int rmw
= 0, rcw
= 0, i
;
2435 for (i
= disks
; i
--; ) {
2436 /* would I have to read this buffer for read_modify_write */
2437 struct r5dev
*dev
= &sh
->dev
[i
];
2438 if ((dev
->towrite
|| i
== sh
->pd_idx
) &&
2439 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2440 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2441 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2442 if (test_bit(R5_Insync
, &dev
->flags
))
2445 rmw
+= 2*disks
; /* cannot read it */
2447 /* Would I have to read this buffer for reconstruct_write */
2448 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) && i
!= sh
->pd_idx
&&
2449 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2450 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2451 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2452 if (test_bit(R5_Insync
, &dev
->flags
)) rcw
++;
2457 pr_debug("for sector %llu, rmw=%d rcw=%d\n",
2458 (unsigned long long)sh
->sector
, rmw
, rcw
);
2459 set_bit(STRIPE_HANDLE
, &sh
->state
);
2460 if (rmw
< rcw
&& rmw
> 0)
2461 /* prefer read-modify-write, but need to get some data */
2462 for (i
= disks
; i
--; ) {
2463 struct r5dev
*dev
= &sh
->dev
[i
];
2464 if ((dev
->towrite
|| i
== sh
->pd_idx
) &&
2465 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2466 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2467 test_bit(R5_Wantcompute
, &dev
->flags
)) &&
2468 test_bit(R5_Insync
, &dev
->flags
)) {
2470 test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2471 pr_debug("Read_old block "
2472 "%d for r-m-w\n", i
);
2473 set_bit(R5_LOCKED
, &dev
->flags
);
2474 set_bit(R5_Wantread
, &dev
->flags
);
2477 set_bit(STRIPE_DELAYED
, &sh
->state
);
2478 set_bit(STRIPE_HANDLE
, &sh
->state
);
2482 if (rcw
<= rmw
&& rcw
> 0)
2483 /* want reconstruct write, but need to get some data */
2484 for (i
= disks
; i
--; ) {
2485 struct r5dev
*dev
= &sh
->dev
[i
];
2486 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) &&
2488 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2489 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2490 test_bit(R5_Wantcompute
, &dev
->flags
)) &&
2491 test_bit(R5_Insync
, &dev
->flags
)) {
2493 test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2494 pr_debug("Read_old block "
2495 "%d for Reconstruct\n", i
);
2496 set_bit(R5_LOCKED
, &dev
->flags
);
2497 set_bit(R5_Wantread
, &dev
->flags
);
2500 set_bit(STRIPE_DELAYED
, &sh
->state
);
2501 set_bit(STRIPE_HANDLE
, &sh
->state
);
2505 /* now if nothing is locked, and if we have enough data,
2506 * we can start a write request
2508 /* since handle_stripe can be called at any time we need to handle the
2509 * case where a compute block operation has been submitted and then a
2510 * subsequent call wants to start a write request. raid_run_ops only
2511 * handles the case where compute block and reconstruct are requested
2512 * simultaneously. If this is not the case then new writes need to be
2513 * held off until the compute completes.
2515 if ((s
->req_compute
|| !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
)) &&
2516 (s
->locked
== 0 && (rcw
== 0 || rmw
== 0) &&
2517 !test_bit(STRIPE_BIT_DELAY
, &sh
->state
)))
2518 schedule_reconstruction(sh
, s
, rcw
== 0, 0);
2521 static void handle_stripe_dirtying6(raid5_conf_t
*conf
,
2522 struct stripe_head
*sh
, struct stripe_head_state
*s
,
2523 struct r6_state
*r6s
, int disks
)
2525 int rcw
= 0, pd_idx
= sh
->pd_idx
, i
;
2526 int qd_idx
= sh
->qd_idx
;
2528 set_bit(STRIPE_HANDLE
, &sh
->state
);
2529 for (i
= disks
; i
--; ) {
2530 struct r5dev
*dev
= &sh
->dev
[i
];
2531 /* check if we haven't enough data */
2532 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) &&
2533 i
!= pd_idx
&& i
!= qd_idx
&&
2534 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2535 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2536 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2538 if (!test_bit(R5_Insync
, &dev
->flags
))
2539 continue; /* it's a failed drive */
2542 test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2543 pr_debug("Read_old stripe %llu "
2544 "block %d for Reconstruct\n",
2545 (unsigned long long)sh
->sector
, i
);
2546 set_bit(R5_LOCKED
, &dev
->flags
);
2547 set_bit(R5_Wantread
, &dev
->flags
);
2550 pr_debug("Request delayed stripe %llu "
2551 "block %d for Reconstruct\n",
2552 (unsigned long long)sh
->sector
, i
);
2553 set_bit(STRIPE_DELAYED
, &sh
->state
);
2554 set_bit(STRIPE_HANDLE
, &sh
->state
);
2558 /* now if nothing is locked, and if we have enough data, we can start a
2561 if ((s
->req_compute
|| !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
)) &&
2562 s
->locked
== 0 && rcw
== 0 &&
2563 !test_bit(STRIPE_BIT_DELAY
, &sh
->state
)) {
2564 schedule_reconstruction(sh
, s
, 1, 0);
2568 static void handle_parity_checks5(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2569 struct stripe_head_state
*s
, int disks
)
2571 struct r5dev
*dev
= NULL
;
2573 set_bit(STRIPE_HANDLE
, &sh
->state
);
2575 switch (sh
->check_state
) {
2576 case check_state_idle
:
2577 /* start a new check operation if there are no failures */
2578 if (s
->failed
== 0) {
2579 BUG_ON(s
->uptodate
!= disks
);
2580 sh
->check_state
= check_state_run
;
2581 set_bit(STRIPE_OP_CHECK
, &s
->ops_request
);
2582 clear_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
);
2586 dev
= &sh
->dev
[s
->failed_num
];
2588 case check_state_compute_result
:
2589 sh
->check_state
= check_state_idle
;
2591 dev
= &sh
->dev
[sh
->pd_idx
];
2593 /* check that a write has not made the stripe insync */
2594 if (test_bit(STRIPE_INSYNC
, &sh
->state
))
2597 /* either failed parity check, or recovery is happening */
2598 BUG_ON(!test_bit(R5_UPTODATE
, &dev
->flags
));
2599 BUG_ON(s
->uptodate
!= disks
);
2601 set_bit(R5_LOCKED
, &dev
->flags
);
2603 set_bit(R5_Wantwrite
, &dev
->flags
);
2605 clear_bit(STRIPE_DEGRADED
, &sh
->state
);
2606 set_bit(STRIPE_INSYNC
, &sh
->state
);
2608 case check_state_run
:
2609 break; /* we will be called again upon completion */
2610 case check_state_check_result
:
2611 sh
->check_state
= check_state_idle
;
2613 /* if a failure occurred during the check operation, leave
2614 * STRIPE_INSYNC not set and let the stripe be handled again
2619 /* handle a successful check operation, if parity is correct
2620 * we are done. Otherwise update the mismatch count and repair
2621 * parity if !MD_RECOVERY_CHECK
2623 if ((sh
->ops
.zero_sum_result
& SUM_CHECK_P_RESULT
) == 0)
2624 /* parity is correct (on disc,
2625 * not in buffer any more)
2627 set_bit(STRIPE_INSYNC
, &sh
->state
);
2629 conf
->mddev
->resync_mismatches
+= STRIPE_SECTORS
;
2630 if (test_bit(MD_RECOVERY_CHECK
, &conf
->mddev
->recovery
))
2631 /* don't try to repair!! */
2632 set_bit(STRIPE_INSYNC
, &sh
->state
);
2634 sh
->check_state
= check_state_compute_run
;
2635 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2636 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2637 set_bit(R5_Wantcompute
,
2638 &sh
->dev
[sh
->pd_idx
].flags
);
2639 sh
->ops
.target
= sh
->pd_idx
;
2640 sh
->ops
.target2
= -1;
2645 case check_state_compute_run
:
2648 printk(KERN_ERR
"%s: unknown check_state: %d sector: %llu\n",
2649 __func__
, sh
->check_state
,
2650 (unsigned long long) sh
->sector
);
2656 static void handle_parity_checks6(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2657 struct stripe_head_state
*s
,
2658 struct r6_state
*r6s
, int disks
)
2660 int pd_idx
= sh
->pd_idx
;
2661 int qd_idx
= sh
->qd_idx
;
2664 set_bit(STRIPE_HANDLE
, &sh
->state
);
2666 BUG_ON(s
->failed
> 2);
2668 /* Want to check and possibly repair P and Q.
2669 * However there could be one 'failed' device, in which
2670 * case we can only check one of them, possibly using the
2671 * other to generate missing data
2674 switch (sh
->check_state
) {
2675 case check_state_idle
:
2676 /* start a new check operation if there are < 2 failures */
2677 if (s
->failed
== r6s
->q_failed
) {
2678 /* The only possible failed device holds Q, so it
2679 * makes sense to check P (If anything else were failed,
2680 * we would have used P to recreate it).
2682 sh
->check_state
= check_state_run
;
2684 if (!r6s
->q_failed
&& s
->failed
< 2) {
2685 /* Q is not failed, and we didn't use it to generate
2686 * anything, so it makes sense to check it
2688 if (sh
->check_state
== check_state_run
)
2689 sh
->check_state
= check_state_run_pq
;
2691 sh
->check_state
= check_state_run_q
;
2694 /* discard potentially stale zero_sum_result */
2695 sh
->ops
.zero_sum_result
= 0;
2697 if (sh
->check_state
== check_state_run
) {
2698 /* async_xor_zero_sum destroys the contents of P */
2699 clear_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
);
2702 if (sh
->check_state
>= check_state_run
&&
2703 sh
->check_state
<= check_state_run_pq
) {
2704 /* async_syndrome_zero_sum preserves P and Q, so
2705 * no need to mark them !uptodate here
2707 set_bit(STRIPE_OP_CHECK
, &s
->ops_request
);
2711 /* we have 2-disk failure */
2712 BUG_ON(s
->failed
!= 2);
2714 case check_state_compute_result
:
2715 sh
->check_state
= check_state_idle
;
2717 /* check that a write has not made the stripe insync */
2718 if (test_bit(STRIPE_INSYNC
, &sh
->state
))
2721 /* now write out any block on a failed drive,
2722 * or P or Q if they were recomputed
2724 BUG_ON(s
->uptodate
< disks
- 1); /* We don't need Q to recover */
2725 if (s
->failed
== 2) {
2726 dev
= &sh
->dev
[r6s
->failed_num
[1]];
2728 set_bit(R5_LOCKED
, &dev
->flags
);
2729 set_bit(R5_Wantwrite
, &dev
->flags
);
2731 if (s
->failed
>= 1) {
2732 dev
= &sh
->dev
[r6s
->failed_num
[0]];
2734 set_bit(R5_LOCKED
, &dev
->flags
);
2735 set_bit(R5_Wantwrite
, &dev
->flags
);
2737 if (sh
->ops
.zero_sum_result
& SUM_CHECK_P_RESULT
) {
2738 dev
= &sh
->dev
[pd_idx
];
2740 set_bit(R5_LOCKED
, &dev
->flags
);
2741 set_bit(R5_Wantwrite
, &dev
->flags
);
2743 if (sh
->ops
.zero_sum_result
& SUM_CHECK_Q_RESULT
) {
2744 dev
= &sh
->dev
[qd_idx
];
2746 set_bit(R5_LOCKED
, &dev
->flags
);
2747 set_bit(R5_Wantwrite
, &dev
->flags
);
2749 clear_bit(STRIPE_DEGRADED
, &sh
->state
);
2751 set_bit(STRIPE_INSYNC
, &sh
->state
);
2753 case check_state_run
:
2754 case check_state_run_q
:
2755 case check_state_run_pq
:
2756 break; /* we will be called again upon completion */
2757 case check_state_check_result
:
2758 sh
->check_state
= check_state_idle
;
2760 /* handle a successful check operation, if parity is correct
2761 * we are done. Otherwise update the mismatch count and repair
2762 * parity if !MD_RECOVERY_CHECK
2764 if (sh
->ops
.zero_sum_result
== 0) {
2765 /* both parities are correct */
2767 set_bit(STRIPE_INSYNC
, &sh
->state
);
2769 /* in contrast to the raid5 case we can validate
2770 * parity, but still have a failure to write
2773 sh
->check_state
= check_state_compute_result
;
2774 /* Returning at this point means that we may go
2775 * off and bring p and/or q uptodate again so
2776 * we make sure to check zero_sum_result again
2777 * to verify if p or q need writeback
2781 conf
->mddev
->resync_mismatches
+= STRIPE_SECTORS
;
2782 if (test_bit(MD_RECOVERY_CHECK
, &conf
->mddev
->recovery
))
2783 /* don't try to repair!! */
2784 set_bit(STRIPE_INSYNC
, &sh
->state
);
2786 int *target
= &sh
->ops
.target
;
2788 sh
->ops
.target
= -1;
2789 sh
->ops
.target2
= -1;
2790 sh
->check_state
= check_state_compute_run
;
2791 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2792 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2793 if (sh
->ops
.zero_sum_result
& SUM_CHECK_P_RESULT
) {
2794 set_bit(R5_Wantcompute
,
2795 &sh
->dev
[pd_idx
].flags
);
2797 target
= &sh
->ops
.target2
;
2800 if (sh
->ops
.zero_sum_result
& SUM_CHECK_Q_RESULT
) {
2801 set_bit(R5_Wantcompute
,
2802 &sh
->dev
[qd_idx
].flags
);
2809 case check_state_compute_run
:
2812 printk(KERN_ERR
"%s: unknown check_state: %d sector: %llu\n",
2813 __func__
, sh
->check_state
,
2814 (unsigned long long) sh
->sector
);
2819 static void handle_stripe_expansion(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2820 struct r6_state
*r6s
)
2824 /* We have read all the blocks in this stripe and now we need to
2825 * copy some of them into a target stripe for expand.
2827 struct dma_async_tx_descriptor
*tx
= NULL
;
2828 clear_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
2829 for (i
= 0; i
< sh
->disks
; i
++)
2830 if (i
!= sh
->pd_idx
&& i
!= sh
->qd_idx
) {
2832 struct stripe_head
*sh2
;
2833 struct async_submit_ctl submit
;
2835 sector_t bn
= compute_blocknr(sh
, i
, 1);
2836 sector_t s
= raid5_compute_sector(conf
, bn
, 0,
2838 sh2
= get_active_stripe(conf
, s
, 0, 1, 1);
2840 /* so far only the early blocks of this stripe
2841 * have been requested. When later blocks
2842 * get requested, we will try again
2845 if (!test_bit(STRIPE_EXPANDING
, &sh2
->state
) ||
2846 test_bit(R5_Expanded
, &sh2
->dev
[dd_idx
].flags
)) {
2847 /* must have already done this block */
2848 release_stripe(sh2
);
2852 /* place all the copies on one channel */
2853 init_async_submit(&submit
, 0, tx
, NULL
, NULL
, NULL
);
2854 tx
= async_memcpy(sh2
->dev
[dd_idx
].page
,
2855 sh
->dev
[i
].page
, 0, 0, STRIPE_SIZE
,
2858 set_bit(R5_Expanded
, &sh2
->dev
[dd_idx
].flags
);
2859 set_bit(R5_UPTODATE
, &sh2
->dev
[dd_idx
].flags
);
2860 for (j
= 0; j
< conf
->raid_disks
; j
++)
2861 if (j
!= sh2
->pd_idx
&&
2862 (!r6s
|| j
!= sh2
->qd_idx
) &&
2863 !test_bit(R5_Expanded
, &sh2
->dev
[j
].flags
))
2865 if (j
== conf
->raid_disks
) {
2866 set_bit(STRIPE_EXPAND_READY
, &sh2
->state
);
2867 set_bit(STRIPE_HANDLE
, &sh2
->state
);
2869 release_stripe(sh2
);
2872 /* done submitting copies, wait for them to complete */
2875 dma_wait_for_async_tx(tx
);
2881 * handle_stripe - do things to a stripe.
2883 * We lock the stripe and then examine the state of various bits
2884 * to see what needs to be done.
2886 * return some read request which now have data
2887 * return some write requests which are safely on disc
2888 * schedule a read on some buffers
2889 * schedule a write of some buffers
2890 * return confirmation of parity correctness
2892 * buffers are taken off read_list or write_list, and bh_cache buffers
2893 * get BH_Lock set before the stripe lock is released.
2897 static bool handle_stripe5(struct stripe_head
*sh
)
2899 raid5_conf_t
*conf
= sh
->raid_conf
;
2900 int disks
= sh
->disks
, i
;
2901 struct bio
*return_bi
= NULL
;
2902 struct stripe_head_state s
;
2904 mdk_rdev_t
*blocked_rdev
= NULL
;
2907 memset(&s
, 0, sizeof(s
));
2908 pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d "
2909 "reconstruct:%d\n", (unsigned long long)sh
->sector
, sh
->state
,
2910 atomic_read(&sh
->count
), sh
->pd_idx
, sh
->check_state
,
2911 sh
->reconstruct_state
);
2913 spin_lock(&sh
->lock
);
2914 clear_bit(STRIPE_HANDLE
, &sh
->state
);
2915 clear_bit(STRIPE_DELAYED
, &sh
->state
);
2917 s
.syncing
= test_bit(STRIPE_SYNCING
, &sh
->state
);
2918 s
.expanding
= test_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
2919 s
.expanded
= test_bit(STRIPE_EXPAND_READY
, &sh
->state
);
2921 /* Now to look around and see what can be done */
2923 for (i
=disks
; i
--; ) {
2925 struct r5dev
*dev
= &sh
->dev
[i
];
2926 clear_bit(R5_Insync
, &dev
->flags
);
2928 pr_debug("check %d: state 0x%lx toread %p read %p write %p "
2929 "written %p\n", i
, dev
->flags
, dev
->toread
, dev
->read
,
2930 dev
->towrite
, dev
->written
);
2932 /* maybe we can request a biofill operation
2934 * new wantfill requests are only permitted while
2935 * ops_complete_biofill is guaranteed to be inactive
2937 if (test_bit(R5_UPTODATE
, &dev
->flags
) && dev
->toread
&&
2938 !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
))
2939 set_bit(R5_Wantfill
, &dev
->flags
);
2941 /* now count some things */
2942 if (test_bit(R5_LOCKED
, &dev
->flags
)) s
.locked
++;
2943 if (test_bit(R5_UPTODATE
, &dev
->flags
)) s
.uptodate
++;
2944 if (test_bit(R5_Wantcompute
, &dev
->flags
)) s
.compute
++;
2946 if (test_bit(R5_Wantfill
, &dev
->flags
))
2948 else if (dev
->toread
)
2952 if (!test_bit(R5_OVERWRITE
, &dev
->flags
))
2957 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
2958 if (blocked_rdev
== NULL
&&
2959 rdev
&& unlikely(test_bit(Blocked
, &rdev
->flags
))) {
2960 blocked_rdev
= rdev
;
2961 atomic_inc(&rdev
->nr_pending
);
2963 if (!rdev
|| !test_bit(In_sync
, &rdev
->flags
)) {
2964 /* The ReadError flag will just be confusing now */
2965 clear_bit(R5_ReadError
, &dev
->flags
);
2966 clear_bit(R5_ReWrite
, &dev
->flags
);
2968 if (!rdev
|| !test_bit(In_sync
, &rdev
->flags
)
2969 || test_bit(R5_ReadError
, &dev
->flags
)) {
2973 set_bit(R5_Insync
, &dev
->flags
);
2977 if (unlikely(blocked_rdev
)) {
2978 if (s
.syncing
|| s
.expanding
|| s
.expanded
||
2979 s
.to_write
|| s
.written
) {
2980 set_bit(STRIPE_HANDLE
, &sh
->state
);
2983 /* There is nothing for the blocked_rdev to block */
2984 rdev_dec_pending(blocked_rdev
, conf
->mddev
);
2985 blocked_rdev
= NULL
;
2988 if (s
.to_fill
&& !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
)) {
2989 set_bit(STRIPE_OP_BIOFILL
, &s
.ops_request
);
2990 set_bit(STRIPE_BIOFILL_RUN
, &sh
->state
);
2993 pr_debug("locked=%d uptodate=%d to_read=%d"
2994 " to_write=%d failed=%d failed_num=%d\n",
2995 s
.locked
, s
.uptodate
, s
.to_read
, s
.to_write
,
2996 s
.failed
, s
.failed_num
);
2997 /* check if the array has lost two devices and, if so, some requests might
3000 if (s
.failed
> 1 && s
.to_read
+s
.to_write
+s
.written
)
3001 handle_failed_stripe(conf
, sh
, &s
, disks
, &return_bi
);
3002 if (s
.failed
> 1 && s
.syncing
) {
3003 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,0);
3004 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3008 /* might be able to return some write requests if the parity block
3009 * is safe, or on a failed drive
3011 dev
= &sh
->dev
[sh
->pd_idx
];
3013 ((test_bit(R5_Insync
, &dev
->flags
) &&
3014 !test_bit(R5_LOCKED
, &dev
->flags
) &&
3015 test_bit(R5_UPTODATE
, &dev
->flags
)) ||
3016 (s
.failed
== 1 && s
.failed_num
== sh
->pd_idx
)))
3017 handle_stripe_clean_event(conf
, sh
, disks
, &return_bi
);
3019 /* Now we might consider reading some blocks, either to check/generate
3020 * parity, or to satisfy requests
3021 * or to load a block that is being partially written.
3023 if (s
.to_read
|| s
.non_overwrite
||
3024 (s
.syncing
&& (s
.uptodate
+ s
.compute
< disks
)) || s
.expanding
)
3025 handle_stripe_fill5(sh
, &s
, disks
);
3027 /* Now we check to see if any write operations have recently
3031 if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_result
)
3033 if (sh
->reconstruct_state
== reconstruct_state_drain_result
||
3034 sh
->reconstruct_state
== reconstruct_state_prexor_drain_result
) {
3035 sh
->reconstruct_state
= reconstruct_state_idle
;
3037 /* All the 'written' buffers and the parity block are ready to
3038 * be written back to disk
3040 BUG_ON(!test_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
));
3041 for (i
= disks
; i
--; ) {
3043 if (test_bit(R5_LOCKED
, &dev
->flags
) &&
3044 (i
== sh
->pd_idx
|| dev
->written
)) {
3045 pr_debug("Writing block %d\n", i
);
3046 set_bit(R5_Wantwrite
, &dev
->flags
);
3049 if (!test_bit(R5_Insync
, &dev
->flags
) ||
3050 (i
== sh
->pd_idx
&& s
.failed
== 0))
3051 set_bit(STRIPE_INSYNC
, &sh
->state
);
3054 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
3055 atomic_dec(&conf
->preread_active_stripes
);
3056 if (atomic_read(&conf
->preread_active_stripes
) <
3058 md_wakeup_thread(conf
->mddev
->thread
);
3062 /* Now to consider new write requests and what else, if anything
3063 * should be read. We do not handle new writes when:
3064 * 1/ A 'write' operation (copy+xor) is already in flight.
3065 * 2/ A 'check' operation is in flight, as it may clobber the parity
3068 if (s
.to_write
&& !sh
->reconstruct_state
&& !sh
->check_state
)
3069 handle_stripe_dirtying5(conf
, sh
, &s
, disks
);
3071 /* maybe we need to check and possibly fix the parity for this stripe
3072 * Any reads will already have been scheduled, so we just see if enough
3073 * data is available. The parity check is held off while parity
3074 * dependent operations are in flight.
3076 if (sh
->check_state
||
3077 (s
.syncing
&& s
.locked
== 0 &&
3078 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) &&
3079 !test_bit(STRIPE_INSYNC
, &sh
->state
)))
3080 handle_parity_checks5(conf
, sh
, &s
, disks
);
3082 if (s
.syncing
&& s
.locked
== 0 && test_bit(STRIPE_INSYNC
, &sh
->state
)) {
3083 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,1);
3084 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3087 /* If the failed drive is just a ReadError, then we might need to progress
3088 * the repair/check process
3090 if (s
.failed
== 1 && !conf
->mddev
->ro
&&
3091 test_bit(R5_ReadError
, &sh
->dev
[s
.failed_num
].flags
)
3092 && !test_bit(R5_LOCKED
, &sh
->dev
[s
.failed_num
].flags
)
3093 && test_bit(R5_UPTODATE
, &sh
->dev
[s
.failed_num
].flags
)
3095 dev
= &sh
->dev
[s
.failed_num
];
3096 if (!test_bit(R5_ReWrite
, &dev
->flags
)) {
3097 set_bit(R5_Wantwrite
, &dev
->flags
);
3098 set_bit(R5_ReWrite
, &dev
->flags
);
3099 set_bit(R5_LOCKED
, &dev
->flags
);
3102 /* let's read it back */
3103 set_bit(R5_Wantread
, &dev
->flags
);
3104 set_bit(R5_LOCKED
, &dev
->flags
);
3109 /* Finish reconstruct operations initiated by the expansion process */
3110 if (sh
->reconstruct_state
== reconstruct_state_result
) {
3111 struct stripe_head
*sh2
3112 = get_active_stripe(conf
, sh
->sector
, 1, 1, 1);
3113 if (sh2
&& test_bit(STRIPE_EXPAND_SOURCE
, &sh2
->state
)) {
3114 /* sh cannot be written until sh2 has been read.
3115 * so arrange for sh to be delayed a little
3117 set_bit(STRIPE_DELAYED
, &sh
->state
);
3118 set_bit(STRIPE_HANDLE
, &sh
->state
);
3119 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
,
3121 atomic_inc(&conf
->preread_active_stripes
);
3122 release_stripe(sh2
);
3126 release_stripe(sh2
);
3128 sh
->reconstruct_state
= reconstruct_state_idle
;
3129 clear_bit(STRIPE_EXPANDING
, &sh
->state
);
3130 for (i
= conf
->raid_disks
; i
--; ) {
3131 set_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
);
3132 set_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
3137 if (s
.expanded
&& test_bit(STRIPE_EXPANDING
, &sh
->state
) &&
3138 !sh
->reconstruct_state
) {
3139 /* Need to write out all blocks after computing parity */
3140 sh
->disks
= conf
->raid_disks
;
3141 stripe_set_idx(sh
->sector
, conf
, 0, sh
);
3142 schedule_reconstruction(sh
, &s
, 1, 1);
3143 } else if (s
.expanded
&& !sh
->reconstruct_state
&& s
.locked
== 0) {
3144 clear_bit(STRIPE_EXPAND_READY
, &sh
->state
);
3145 atomic_dec(&conf
->reshape_stripes
);
3146 wake_up(&conf
->wait_for_overlap
);
3147 md_done_sync(conf
->mddev
, STRIPE_SECTORS
, 1);
3150 if (s
.expanding
&& s
.locked
== 0 &&
3151 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
))
3152 handle_stripe_expansion(conf
, sh
, NULL
);
3155 spin_unlock(&sh
->lock
);
3157 /* wait for this device to become unblocked */
3158 if (unlikely(blocked_rdev
))
3159 md_wait_for_blocked_rdev(blocked_rdev
, conf
->mddev
);
3162 raid_run_ops(sh
, s
.ops_request
);
3166 return_io(return_bi
);
3168 return blocked_rdev
== NULL
;
3171 static bool handle_stripe6(struct stripe_head
*sh
)
3173 raid5_conf_t
*conf
= sh
->raid_conf
;
3174 int disks
= sh
->disks
;
3175 struct bio
*return_bi
= NULL
;
3176 int i
, pd_idx
= sh
->pd_idx
, qd_idx
= sh
->qd_idx
;
3177 struct stripe_head_state s
;
3178 struct r6_state r6s
;
3179 struct r5dev
*dev
, *pdev
, *qdev
;
3180 mdk_rdev_t
*blocked_rdev
= NULL
;
3182 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
3183 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
3184 (unsigned long long)sh
->sector
, sh
->state
,
3185 atomic_read(&sh
->count
), pd_idx
, qd_idx
,
3186 sh
->check_state
, sh
->reconstruct_state
);
3187 memset(&s
, 0, sizeof(s
));
3189 spin_lock(&sh
->lock
);
3190 clear_bit(STRIPE_HANDLE
, &sh
->state
);
3191 clear_bit(STRIPE_DELAYED
, &sh
->state
);
3193 s
.syncing
= test_bit(STRIPE_SYNCING
, &sh
->state
);
3194 s
.expanding
= test_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
3195 s
.expanded
= test_bit(STRIPE_EXPAND_READY
, &sh
->state
);
3196 /* Now to look around and see what can be done */
3199 for (i
=disks
; i
--; ) {
3202 clear_bit(R5_Insync
, &dev
->flags
);
3204 pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
3205 i
, dev
->flags
, dev
->toread
, dev
->towrite
, dev
->written
);
3206 /* maybe we can reply to a read
3208 * new wantfill requests are only permitted while
3209 * ops_complete_biofill is guaranteed to be inactive
3211 if (test_bit(R5_UPTODATE
, &dev
->flags
) && dev
->toread
&&
3212 !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
))
3213 set_bit(R5_Wantfill
, &dev
->flags
);
3215 /* now count some things */
3216 if (test_bit(R5_LOCKED
, &dev
->flags
)) s
.locked
++;
3217 if (test_bit(R5_UPTODATE
, &dev
->flags
)) s
.uptodate
++;
3218 if (test_bit(R5_Wantcompute
, &dev
->flags
))
3219 BUG_ON(++s
.compute
> 2);
3221 if (test_bit(R5_Wantfill
, &dev
->flags
)) {
3223 } else if (dev
->toread
)
3227 if (!test_bit(R5_OVERWRITE
, &dev
->flags
))
3232 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
3233 if (blocked_rdev
== NULL
&&
3234 rdev
&& unlikely(test_bit(Blocked
, &rdev
->flags
))) {
3235 blocked_rdev
= rdev
;
3236 atomic_inc(&rdev
->nr_pending
);
3238 if (!rdev
|| !test_bit(In_sync
, &rdev
->flags
)) {
3239 /* The ReadError flag will just be confusing now */
3240 clear_bit(R5_ReadError
, &dev
->flags
);
3241 clear_bit(R5_ReWrite
, &dev
->flags
);
3243 if (!rdev
|| !test_bit(In_sync
, &rdev
->flags
)
3244 || test_bit(R5_ReadError
, &dev
->flags
)) {
3246 r6s
.failed_num
[s
.failed
] = i
;
3249 set_bit(R5_Insync
, &dev
->flags
);
3253 if (unlikely(blocked_rdev
)) {
3254 if (s
.syncing
|| s
.expanding
|| s
.expanded
||
3255 s
.to_write
|| s
.written
) {
3256 set_bit(STRIPE_HANDLE
, &sh
->state
);
3259 /* There is nothing for the blocked_rdev to block */
3260 rdev_dec_pending(blocked_rdev
, conf
->mddev
);
3261 blocked_rdev
= NULL
;
3264 if (s
.to_fill
&& !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
)) {
3265 set_bit(STRIPE_OP_BIOFILL
, &s
.ops_request
);
3266 set_bit(STRIPE_BIOFILL_RUN
, &sh
->state
);
3269 pr_debug("locked=%d uptodate=%d to_read=%d"
3270 " to_write=%d failed=%d failed_num=%d,%d\n",
3271 s
.locked
, s
.uptodate
, s
.to_read
, s
.to_write
, s
.failed
,
3272 r6s
.failed_num
[0], r6s
.failed_num
[1]);
3273 /* check if the array has lost >2 devices and, if so, some requests
3274 * might need to be failed
3276 if (s
.failed
> 2 && s
.to_read
+s
.to_write
+s
.written
)
3277 handle_failed_stripe(conf
, sh
, &s
, disks
, &return_bi
);
3278 if (s
.failed
> 2 && s
.syncing
) {
3279 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,0);
3280 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3285 * might be able to return some write requests if the parity blocks
3286 * are safe, or on a failed drive
3288 pdev
= &sh
->dev
[pd_idx
];
3289 r6s
.p_failed
= (s
.failed
>= 1 && r6s
.failed_num
[0] == pd_idx
)
3290 || (s
.failed
>= 2 && r6s
.failed_num
[1] == pd_idx
);
3291 qdev
= &sh
->dev
[qd_idx
];
3292 r6s
.q_failed
= (s
.failed
>= 1 && r6s
.failed_num
[0] == qd_idx
)
3293 || (s
.failed
>= 2 && r6s
.failed_num
[1] == qd_idx
);
3296 ( r6s
.p_failed
|| ((test_bit(R5_Insync
, &pdev
->flags
)
3297 && !test_bit(R5_LOCKED
, &pdev
->flags
)
3298 && test_bit(R5_UPTODATE
, &pdev
->flags
)))) &&
3299 ( r6s
.q_failed
|| ((test_bit(R5_Insync
, &qdev
->flags
)
3300 && !test_bit(R5_LOCKED
, &qdev
->flags
)
3301 && test_bit(R5_UPTODATE
, &qdev
->flags
)))))
3302 handle_stripe_clean_event(conf
, sh
, disks
, &return_bi
);
3304 /* Now we might consider reading some blocks, either to check/generate
3305 * parity, or to satisfy requests
3306 * or to load a block that is being partially written.
3308 if (s
.to_read
|| s
.non_overwrite
|| (s
.to_write
&& s
.failed
) ||
3309 (s
.syncing
&& (s
.uptodate
+ s
.compute
< disks
)) || s
.expanding
)
3310 handle_stripe_fill6(sh
, &s
, &r6s
, disks
);
3312 /* Now we check to see if any write operations have recently
3315 if (sh
->reconstruct_state
== reconstruct_state_drain_result
) {
3316 int qd_idx
= sh
->qd_idx
;
3318 sh
->reconstruct_state
= reconstruct_state_idle
;
3319 /* All the 'written' buffers and the parity blocks are ready to
3320 * be written back to disk
3322 BUG_ON(!test_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
));
3323 BUG_ON(!test_bit(R5_UPTODATE
, &sh
->dev
[qd_idx
].flags
));
3324 for (i
= disks
; i
--; ) {
3326 if (test_bit(R5_LOCKED
, &dev
->flags
) &&
3327 (i
== sh
->pd_idx
|| i
== qd_idx
||
3329 pr_debug("Writing block %d\n", i
);
3330 BUG_ON(!test_bit(R5_UPTODATE
, &dev
->flags
));
3331 set_bit(R5_Wantwrite
, &dev
->flags
);
3332 if (!test_bit(R5_Insync
, &dev
->flags
) ||
3333 ((i
== sh
->pd_idx
|| i
== qd_idx
) &&
3335 set_bit(STRIPE_INSYNC
, &sh
->state
);
3338 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
3339 atomic_dec(&conf
->preread_active_stripes
);
3340 if (atomic_read(&conf
->preread_active_stripes
) <
3342 md_wakeup_thread(conf
->mddev
->thread
);
3346 /* Now to consider new write requests and what else, if anything
3347 * should be read. We do not handle new writes when:
3348 * 1/ A 'write' operation (copy+gen_syndrome) is already in flight.
3349 * 2/ A 'check' operation is in flight, as it may clobber the parity
3352 if (s
.to_write
&& !sh
->reconstruct_state
&& !sh
->check_state
)
3353 handle_stripe_dirtying6(conf
, sh
, &s
, &r6s
, disks
);
3355 /* maybe we need to check and possibly fix the parity for this stripe
3356 * Any reads will already have been scheduled, so we just see if enough
3357 * data is available. The parity check is held off while parity
3358 * dependent operations are in flight.
3360 if (sh
->check_state
||
3361 (s
.syncing
&& s
.locked
== 0 &&
3362 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) &&
3363 !test_bit(STRIPE_INSYNC
, &sh
->state
)))
3364 handle_parity_checks6(conf
, sh
, &s
, &r6s
, disks
);
3366 if (s
.syncing
&& s
.locked
== 0 && test_bit(STRIPE_INSYNC
, &sh
->state
)) {
3367 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,1);
3368 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3371 /* If the failed drives are just a ReadError, then we might need
3372 * to progress the repair/check process
3374 if (s
.failed
<= 2 && !conf
->mddev
->ro
)
3375 for (i
= 0; i
< s
.failed
; i
++) {
3376 dev
= &sh
->dev
[r6s
.failed_num
[i
]];
3377 if (test_bit(R5_ReadError
, &dev
->flags
)
3378 && !test_bit(R5_LOCKED
, &dev
->flags
)
3379 && test_bit(R5_UPTODATE
, &dev
->flags
)
3381 if (!test_bit(R5_ReWrite
, &dev
->flags
)) {
3382 set_bit(R5_Wantwrite
, &dev
->flags
);
3383 set_bit(R5_ReWrite
, &dev
->flags
);
3384 set_bit(R5_LOCKED
, &dev
->flags
);
3387 /* let's read it back */
3388 set_bit(R5_Wantread
, &dev
->flags
);
3389 set_bit(R5_LOCKED
, &dev
->flags
);
3395 /* Finish reconstruct operations initiated by the expansion process */
3396 if (sh
->reconstruct_state
== reconstruct_state_result
) {
3397 sh
->reconstruct_state
= reconstruct_state_idle
;
3398 clear_bit(STRIPE_EXPANDING
, &sh
->state
);
3399 for (i
= conf
->raid_disks
; i
--; ) {
3400 set_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
);
3401 set_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
3406 if (s
.expanded
&& test_bit(STRIPE_EXPANDING
, &sh
->state
) &&
3407 !sh
->reconstruct_state
) {
3408 struct stripe_head
*sh2
3409 = get_active_stripe(conf
, sh
->sector
, 1, 1, 1);
3410 if (sh2
&& test_bit(STRIPE_EXPAND_SOURCE
, &sh2
->state
)) {
3411 /* sh cannot be written until sh2 has been read.
3412 * so arrange for sh to be delayed a little
3414 set_bit(STRIPE_DELAYED
, &sh
->state
);
3415 set_bit(STRIPE_HANDLE
, &sh
->state
);
3416 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
,
3418 atomic_inc(&conf
->preread_active_stripes
);
3419 release_stripe(sh2
);
3423 release_stripe(sh2
);
3425 /* Need to write out all blocks after computing P&Q */
3426 sh
->disks
= conf
->raid_disks
;
3427 stripe_set_idx(sh
->sector
, conf
, 0, sh
);
3428 schedule_reconstruction(sh
, &s
, 1, 1);
3429 } else if (s
.expanded
&& !sh
->reconstruct_state
&& s
.locked
== 0) {
3430 clear_bit(STRIPE_EXPAND_READY
, &sh
->state
);
3431 atomic_dec(&conf
->reshape_stripes
);
3432 wake_up(&conf
->wait_for_overlap
);
3433 md_done_sync(conf
->mddev
, STRIPE_SECTORS
, 1);
3436 if (s
.expanding
&& s
.locked
== 0 &&
3437 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
))
3438 handle_stripe_expansion(conf
, sh
, &r6s
);
3441 spin_unlock(&sh
->lock
);
3443 /* wait for this device to become unblocked */
3444 if (unlikely(blocked_rdev
))
3445 md_wait_for_blocked_rdev(blocked_rdev
, conf
->mddev
);
3448 raid_run_ops(sh
, s
.ops_request
);
3452 return_io(return_bi
);
3454 return blocked_rdev
== NULL
;
3457 /* returns true if the stripe was handled */
3458 static bool handle_stripe(struct stripe_head
*sh
)
3460 if (sh
->raid_conf
->level
== 6)
3461 return handle_stripe6(sh
);
3463 return handle_stripe5(sh
);
3466 static void raid5_activate_delayed(raid5_conf_t
*conf
)
3468 if (atomic_read(&conf
->preread_active_stripes
) < IO_THRESHOLD
) {
3469 while (!list_empty(&conf
->delayed_list
)) {
3470 struct list_head
*l
= conf
->delayed_list
.next
;
3471 struct stripe_head
*sh
;
3472 sh
= list_entry(l
, struct stripe_head
, lru
);
3474 clear_bit(STRIPE_DELAYED
, &sh
->state
);
3475 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
3476 atomic_inc(&conf
->preread_active_stripes
);
3477 list_add_tail(&sh
->lru
, &conf
->hold_list
);
3480 blk_plug_device(conf
->mddev
->queue
);
3483 static void activate_bit_delay(raid5_conf_t
*conf
)
3485 /* device_lock is held */
3486 struct list_head head
;
3487 list_add(&head
, &conf
->bitmap_list
);
3488 list_del_init(&conf
->bitmap_list
);
3489 while (!list_empty(&head
)) {
3490 struct stripe_head
*sh
= list_entry(head
.next
, struct stripe_head
, lru
);
3491 list_del_init(&sh
->lru
);
3492 atomic_inc(&sh
->count
);
3493 __release_stripe(conf
, sh
);
3497 static void unplug_slaves(mddev_t
*mddev
)
3499 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
3503 for (i
= 0; i
< conf
->raid_disks
; i
++) {
3504 mdk_rdev_t
*rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
3505 if (rdev
&& !test_bit(Faulty
, &rdev
->flags
) && atomic_read(&rdev
->nr_pending
)) {
3506 struct request_queue
*r_queue
= bdev_get_queue(rdev
->bdev
);
3508 atomic_inc(&rdev
->nr_pending
);
3511 blk_unplug(r_queue
);
3513 rdev_dec_pending(rdev
, mddev
);
3520 static void raid5_unplug_device(struct request_queue
*q
)
3522 mddev_t
*mddev
= q
->queuedata
;
3523 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
3524 unsigned long flags
;
3526 spin_lock_irqsave(&conf
->device_lock
, flags
);
3528 if (blk_remove_plug(q
)) {
3530 raid5_activate_delayed(conf
);
3532 md_wakeup_thread(mddev
->thread
);
3534 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
3536 unplug_slaves(mddev
);
3539 static int raid5_congested(void *data
, int bits
)
3541 mddev_t
*mddev
= data
;
3542 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
3544 /* No difference between reads and writes. Just check
3545 * how busy the stripe_cache is
3547 if (conf
->inactive_blocked
)
3551 if (list_empty_careful(&conf
->inactive_list
))
3557 /* We want read requests to align with chunks where possible,
3558 * but write requests don't need to.
3560 static int raid5_mergeable_bvec(struct request_queue
*q
,
3561 struct bvec_merge_data
*bvm
,
3562 struct bio_vec
*biovec
)
3564 mddev_t
*mddev
= q
->queuedata
;
3565 sector_t sector
= bvm
->bi_sector
+ get_start_sect(bvm
->bi_bdev
);
3567 unsigned int chunk_sectors
= mddev
->chunk_size
>> 9;
3568 unsigned int bio_sectors
= bvm
->bi_size
>> 9;
3570 if ((bvm
->bi_rw
& 1) == WRITE
)
3571 return biovec
->bv_len
; /* always allow writes to be mergeable */
3573 if (mddev
->new_chunk
< mddev
->chunk_size
)
3574 chunk_sectors
= mddev
->new_chunk
>> 9;
3575 max
= (chunk_sectors
- ((sector
& (chunk_sectors
- 1)) + bio_sectors
)) << 9;
3576 if (max
< 0) max
= 0;
3577 if (max
<= biovec
->bv_len
&& bio_sectors
== 0)
3578 return biovec
->bv_len
;
3584 static int in_chunk_boundary(mddev_t
*mddev
, struct bio
*bio
)
3586 sector_t sector
= bio
->bi_sector
+ get_start_sect(bio
->bi_bdev
);
3587 unsigned int chunk_sectors
= mddev
->chunk_size
>> 9;
3588 unsigned int bio_sectors
= bio
->bi_size
>> 9;
3590 if (mddev
->new_chunk
< mddev
->chunk_size
)
3591 chunk_sectors
= mddev
->new_chunk
>> 9;
3592 return chunk_sectors
>=
3593 ((sector
& (chunk_sectors
- 1)) + bio_sectors
);
3597 * add bio to the retry LIFO ( in O(1) ... we are in interrupt )
3598 * later sampled by raid5d.
3600 static void add_bio_to_retry(struct bio
*bi
,raid5_conf_t
*conf
)
3602 unsigned long flags
;
3604 spin_lock_irqsave(&conf
->device_lock
, flags
);
3606 bi
->bi_next
= conf
->retry_read_aligned_list
;
3607 conf
->retry_read_aligned_list
= bi
;
3609 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
3610 md_wakeup_thread(conf
->mddev
->thread
);
3614 static struct bio
*remove_bio_from_retry(raid5_conf_t
*conf
)
3618 bi
= conf
->retry_read_aligned
;
3620 conf
->retry_read_aligned
= NULL
;
3623 bi
= conf
->retry_read_aligned_list
;
3625 conf
->retry_read_aligned_list
= bi
->bi_next
;
3628 * this sets the active strip count to 1 and the processed
3629 * strip count to zero (upper 8 bits)
3631 bi
->bi_phys_segments
= 1; /* biased count of active stripes */
3639 * The "raid5_align_endio" should check if the read succeeded and if it
3640 * did, call bio_endio on the original bio (having bio_put the new bio
3642 * If the read failed..
3644 static void raid5_align_endio(struct bio
*bi
, int error
)
3646 struct bio
* raid_bi
= bi
->bi_private
;
3649 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
3654 mddev
= raid_bi
->bi_bdev
->bd_disk
->queue
->queuedata
;
3655 conf
= mddev_to_conf(mddev
);
3656 rdev
= (void*)raid_bi
->bi_next
;
3657 raid_bi
->bi_next
= NULL
;
3659 rdev_dec_pending(rdev
, conf
->mddev
);
3661 if (!error
&& uptodate
) {
3662 bio_endio(raid_bi
, 0);
3663 if (atomic_dec_and_test(&conf
->active_aligned_reads
))
3664 wake_up(&conf
->wait_for_stripe
);
3669 pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
3671 add_bio_to_retry(raid_bi
, conf
);
3674 static int bio_fits_rdev(struct bio
*bi
)
3676 struct request_queue
*q
= bdev_get_queue(bi
->bi_bdev
);
3678 if ((bi
->bi_size
>>9) > q
->max_sectors
)
3680 blk_recount_segments(q
, bi
);
3681 if (bi
->bi_phys_segments
> q
->max_phys_segments
)
3684 if (q
->merge_bvec_fn
)
3685 /* it's too hard to apply the merge_bvec_fn at this stage,
3694 static int chunk_aligned_read(struct request_queue
*q
, struct bio
* raid_bio
)
3696 mddev_t
*mddev
= q
->queuedata
;
3697 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
3698 unsigned int dd_idx
;
3699 struct bio
* align_bi
;
3702 if (!in_chunk_boundary(mddev
, raid_bio
)) {
3703 pr_debug("chunk_aligned_read : non aligned\n");
3707 * use bio_clone to make a copy of the bio
3709 align_bi
= bio_clone(raid_bio
, GFP_NOIO
);
3713 * set bi_end_io to a new function, and set bi_private to the
3716 align_bi
->bi_end_io
= raid5_align_endio
;
3717 align_bi
->bi_private
= raid_bio
;
3721 align_bi
->bi_sector
= raid5_compute_sector(conf
, raid_bio
->bi_sector
,
3726 rdev
= rcu_dereference(conf
->disks
[dd_idx
].rdev
);
3727 if (rdev
&& test_bit(In_sync
, &rdev
->flags
)) {
3728 atomic_inc(&rdev
->nr_pending
);
3730 raid_bio
->bi_next
= (void*)rdev
;
3731 align_bi
->bi_bdev
= rdev
->bdev
;
3732 align_bi
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
3733 align_bi
->bi_sector
+= rdev
->data_offset
;
3735 if (!bio_fits_rdev(align_bi
)) {
3736 /* too big in some way */
3738 rdev_dec_pending(rdev
, mddev
);
3742 spin_lock_irq(&conf
->device_lock
);
3743 wait_event_lock_irq(conf
->wait_for_stripe
,
3745 conf
->device_lock
, /* nothing */);
3746 atomic_inc(&conf
->active_aligned_reads
);
3747 spin_unlock_irq(&conf
->device_lock
);
3749 generic_make_request(align_bi
);
3758 /* __get_priority_stripe - get the next stripe to process
3760 * Full stripe writes are allowed to pass preread active stripes up until
3761 * the bypass_threshold is exceeded. In general the bypass_count
3762 * increments when the handle_list is handled before the hold_list; however, it
3763 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
3764 * stripe with in flight i/o. The bypass_count will be reset when the
3765 * head of the hold_list has changed, i.e. the head was promoted to the
3768 static struct stripe_head
*__get_priority_stripe(raid5_conf_t
*conf
)
3770 struct stripe_head
*sh
;
3772 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
3774 list_empty(&conf
->handle_list
) ? "empty" : "busy",
3775 list_empty(&conf
->hold_list
) ? "empty" : "busy",
3776 atomic_read(&conf
->pending_full_writes
), conf
->bypass_count
);
3778 if (!list_empty(&conf
->handle_list
)) {
3779 sh
= list_entry(conf
->handle_list
.next
, typeof(*sh
), lru
);
3781 if (list_empty(&conf
->hold_list
))
3782 conf
->bypass_count
= 0;
3783 else if (!test_bit(STRIPE_IO_STARTED
, &sh
->state
)) {
3784 if (conf
->hold_list
.next
== conf
->last_hold
)
3785 conf
->bypass_count
++;
3787 conf
->last_hold
= conf
->hold_list
.next
;
3788 conf
->bypass_count
-= conf
->bypass_threshold
;
3789 if (conf
->bypass_count
< 0)
3790 conf
->bypass_count
= 0;
3793 } else if (!list_empty(&conf
->hold_list
) &&
3794 ((conf
->bypass_threshold
&&
3795 conf
->bypass_count
> conf
->bypass_threshold
) ||
3796 atomic_read(&conf
->pending_full_writes
) == 0)) {
3797 sh
= list_entry(conf
->hold_list
.next
,
3799 conf
->bypass_count
-= conf
->bypass_threshold
;
3800 if (conf
->bypass_count
< 0)
3801 conf
->bypass_count
= 0;
3805 list_del_init(&sh
->lru
);
3806 atomic_inc(&sh
->count
);
3807 BUG_ON(atomic_read(&sh
->count
) != 1);
3811 static int make_request(struct request_queue
*q
, struct bio
* bi
)
3813 mddev_t
*mddev
= q
->queuedata
;
3814 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
3816 sector_t new_sector
;
3817 sector_t logical_sector
, last_sector
;
3818 struct stripe_head
*sh
;
3819 const int rw
= bio_data_dir(bi
);
3822 if (unlikely(bio_barrier(bi
))) {
3823 bio_endio(bi
, -EOPNOTSUPP
);
3827 md_write_start(mddev
, bi
);
3829 cpu
= part_stat_lock();
3830 part_stat_inc(cpu
, &mddev
->gendisk
->part0
, ios
[rw
]);
3831 part_stat_add(cpu
, &mddev
->gendisk
->part0
, sectors
[rw
],
3836 mddev
->reshape_position
== MaxSector
&&
3837 chunk_aligned_read(q
,bi
))
3840 logical_sector
= bi
->bi_sector
& ~((sector_t
)STRIPE_SECTORS
-1);
3841 last_sector
= bi
->bi_sector
+ (bi
->bi_size
>>9);
3843 bi
->bi_phys_segments
= 1; /* over-loaded to count active stripes */
3845 for (;logical_sector
< last_sector
; logical_sector
+= STRIPE_SECTORS
) {
3847 int disks
, data_disks
;
3852 disks
= conf
->raid_disks
;
3853 prepare_to_wait(&conf
->wait_for_overlap
, &w
, TASK_UNINTERRUPTIBLE
);
3854 if (unlikely(conf
->reshape_progress
!= MaxSector
)) {
3855 /* spinlock is needed as reshape_progress may be
3856 * 64bit on a 32bit platform, and so it might be
3857 * possible to see a half-updated value
3858 * Ofcourse reshape_progress could change after
3859 * the lock is dropped, so once we get a reference
3860 * to the stripe that we think it is, we will have
3863 spin_lock_irq(&conf
->device_lock
);
3864 if (mddev
->delta_disks
< 0
3865 ? logical_sector
< conf
->reshape_progress
3866 : logical_sector
>= conf
->reshape_progress
) {
3867 disks
= conf
->previous_raid_disks
;
3870 if (mddev
->delta_disks
< 0
3871 ? logical_sector
< conf
->reshape_safe
3872 : logical_sector
>= conf
->reshape_safe
) {
3873 spin_unlock_irq(&conf
->device_lock
);
3878 spin_unlock_irq(&conf
->device_lock
);
3880 data_disks
= disks
- conf
->max_degraded
;
3882 new_sector
= raid5_compute_sector(conf
, logical_sector
,
3885 pr_debug("raid5: make_request, sector %llu logical %llu\n",
3886 (unsigned long long)new_sector
,
3887 (unsigned long long)logical_sector
);
3889 sh
= get_active_stripe(conf
, new_sector
, previous
,
3890 (bi
->bi_rw
&RWA_MASK
), 0);
3892 if (unlikely(previous
)) {
3893 /* expansion might have moved on while waiting for a
3894 * stripe, so we must do the range check again.
3895 * Expansion could still move past after this
3896 * test, but as we are holding a reference to
3897 * 'sh', we know that if that happens,
3898 * STRIPE_EXPANDING will get set and the expansion
3899 * won't proceed until we finish with the stripe.
3902 spin_lock_irq(&conf
->device_lock
);
3903 if (mddev
->delta_disks
< 0
3904 ? logical_sector
>= conf
->reshape_progress
3905 : logical_sector
< conf
->reshape_progress
)
3906 /* mismatch, need to try again */
3908 spin_unlock_irq(&conf
->device_lock
);
3914 /* FIXME what if we get a false positive because these
3915 * are being updated.
3917 if (logical_sector
>= mddev
->suspend_lo
&&
3918 logical_sector
< mddev
->suspend_hi
) {
3924 if (test_bit(STRIPE_EXPANDING
, &sh
->state
) ||
3925 !add_stripe_bio(sh
, bi
, dd_idx
, (bi
->bi_rw
&RW_MASK
))) {
3926 /* Stripe is busy expanding or
3927 * add failed due to overlap. Flush everything
3930 raid5_unplug_device(mddev
->queue
);
3935 finish_wait(&conf
->wait_for_overlap
, &w
);
3936 set_bit(STRIPE_HANDLE
, &sh
->state
);
3937 clear_bit(STRIPE_DELAYED
, &sh
->state
);
3940 /* cannot get stripe for read-ahead, just give-up */
3941 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
3942 finish_wait(&conf
->wait_for_overlap
, &w
);
3947 spin_lock_irq(&conf
->device_lock
);
3948 remaining
= raid5_dec_bi_phys_segments(bi
);
3949 spin_unlock_irq(&conf
->device_lock
);
3950 if (remaining
== 0) {
3953 md_write_end(mddev
);
3960 static sector_t
raid5_size(mddev_t
*mddev
, sector_t sectors
, int raid_disks
);
3962 static sector_t
reshape_request(mddev_t
*mddev
, sector_t sector_nr
, int *skipped
)
3964 /* reshaping is quite different to recovery/resync so it is
3965 * handled quite separately ... here.
3967 * On each call to sync_request, we gather one chunk worth of
3968 * destination stripes and flag them as expanding.
3969 * Then we find all the source stripes and request reads.
3970 * As the reads complete, handle_stripe will copy the data
3971 * into the destination stripe and release that stripe.
3973 raid5_conf_t
*conf
= (raid5_conf_t
*) mddev
->private;
3974 struct stripe_head
*sh
;
3975 sector_t first_sector
, last_sector
;
3976 int raid_disks
= conf
->previous_raid_disks
;
3977 int data_disks
= raid_disks
- conf
->max_degraded
;
3978 int new_data_disks
= conf
->raid_disks
- conf
->max_degraded
;
3981 sector_t writepos
, readpos
, safepos
;
3982 sector_t stripe_addr
;
3983 int reshape_sectors
;
3984 struct list_head stripes
;
3986 if (sector_nr
== 0) {
3987 /* If restarting in the middle, skip the initial sectors */
3988 if (mddev
->delta_disks
< 0 &&
3989 conf
->reshape_progress
< raid5_size(mddev
, 0, 0)) {
3990 sector_nr
= raid5_size(mddev
, 0, 0)
3991 - conf
->reshape_progress
;
3992 } else if (mddev
->delta_disks
> 0 &&
3993 conf
->reshape_progress
> 0)
3994 sector_nr
= conf
->reshape_progress
;
3995 sector_div(sector_nr
, new_data_disks
);
4002 /* We need to process a full chunk at a time.
4003 * If old and new chunk sizes differ, we need to process the
4006 if (mddev
->new_chunk
> mddev
->chunk_size
)
4007 reshape_sectors
= mddev
->new_chunk
/ 512;
4009 reshape_sectors
= mddev
->chunk_size
/ 512;
4011 /* we update the metadata when there is more than 3Meg
4012 * in the block range (that is rather arbitrary, should
4013 * probably be time based) or when the data about to be
4014 * copied would over-write the source of the data at
4015 * the front of the range.
4016 * i.e. one new_stripe along from reshape_progress new_maps
4017 * to after where reshape_safe old_maps to
4019 writepos
= conf
->reshape_progress
;
4020 sector_div(writepos
, new_data_disks
);
4021 readpos
= conf
->reshape_progress
;
4022 sector_div(readpos
, data_disks
);
4023 safepos
= conf
->reshape_safe
;
4024 sector_div(safepos
, data_disks
);
4025 if (mddev
->delta_disks
< 0) {
4026 writepos
-= min_t(sector_t
, reshape_sectors
, writepos
);
4027 readpos
+= reshape_sectors
;
4028 safepos
+= reshape_sectors
;
4030 writepos
+= reshape_sectors
;
4031 readpos
-= min_t(sector_t
, reshape_sectors
, readpos
);
4032 safepos
-= min_t(sector_t
, reshape_sectors
, safepos
);
4035 /* 'writepos' is the most advanced device address we might write.
4036 * 'readpos' is the least advanced device address we might read.
4037 * 'safepos' is the least address recorded in the metadata as having
4039 * If 'readpos' is behind 'writepos', then there is no way that we can
4040 * ensure safety in the face of a crash - that must be done by userspace
4041 * making a backup of the data. So in that case there is no particular
4042 * rush to update metadata.
4043 * Otherwise if 'safepos' is behind 'writepos', then we really need to
4044 * update the metadata to advance 'safepos' to match 'readpos' so that
4045 * we can be safe in the event of a crash.
4046 * So we insist on updating metadata if safepos is behind writepos and
4047 * readpos is beyond writepos.
4048 * In any case, update the metadata every 10 seconds.
4049 * Maybe that number should be configurable, but I'm not sure it is
4050 * worth it.... maybe it could be a multiple of safemode_delay???
4052 if ((mddev
->delta_disks
< 0
4053 ? (safepos
> writepos
&& readpos
< writepos
)
4054 : (safepos
< writepos
&& readpos
> writepos
)) ||
4055 time_after(jiffies
, conf
->reshape_checkpoint
+ 10*HZ
)) {
4056 /* Cannot proceed until we've updated the superblock... */
4057 wait_event(conf
->wait_for_overlap
,
4058 atomic_read(&conf
->reshape_stripes
)==0);
4059 mddev
->reshape_position
= conf
->reshape_progress
;
4060 mddev
->curr_resync_completed
= mddev
->curr_resync
;
4061 conf
->reshape_checkpoint
= jiffies
;
4062 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
4063 md_wakeup_thread(mddev
->thread
);
4064 wait_event(mddev
->sb_wait
, mddev
->flags
== 0 ||
4065 kthread_should_stop());
4066 spin_lock_irq(&conf
->device_lock
);
4067 conf
->reshape_safe
= mddev
->reshape_position
;
4068 spin_unlock_irq(&conf
->device_lock
);
4069 wake_up(&conf
->wait_for_overlap
);
4070 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
4073 if (mddev
->delta_disks
< 0) {
4074 BUG_ON(conf
->reshape_progress
== 0);
4075 stripe_addr
= writepos
;
4076 BUG_ON((mddev
->dev_sectors
&
4077 ~((sector_t
)reshape_sectors
- 1))
4078 - reshape_sectors
- stripe_addr
4081 BUG_ON(writepos
!= sector_nr
+ reshape_sectors
);
4082 stripe_addr
= sector_nr
;
4084 INIT_LIST_HEAD(&stripes
);
4085 for (i
= 0; i
< reshape_sectors
; i
+= STRIPE_SECTORS
) {
4088 sh
= get_active_stripe(conf
, stripe_addr
+i
, 0, 0, 1);
4089 set_bit(STRIPE_EXPANDING
, &sh
->state
);
4090 atomic_inc(&conf
->reshape_stripes
);
4091 /* If any of this stripe is beyond the end of the old
4092 * array, then we need to zero those blocks
4094 for (j
=sh
->disks
; j
--;) {
4096 if (j
== sh
->pd_idx
)
4098 if (conf
->level
== 6 &&
4101 s
= compute_blocknr(sh
, j
, 0);
4102 if (s
< raid5_size(mddev
, 0, 0)) {
4106 memset(page_address(sh
->dev
[j
].page
), 0, STRIPE_SIZE
);
4107 set_bit(R5_Expanded
, &sh
->dev
[j
].flags
);
4108 set_bit(R5_UPTODATE
, &sh
->dev
[j
].flags
);
4111 set_bit(STRIPE_EXPAND_READY
, &sh
->state
);
4112 set_bit(STRIPE_HANDLE
, &sh
->state
);
4114 list_add(&sh
->lru
, &stripes
);
4116 spin_lock_irq(&conf
->device_lock
);
4117 if (mddev
->delta_disks
< 0)
4118 conf
->reshape_progress
-= reshape_sectors
* new_data_disks
;
4120 conf
->reshape_progress
+= reshape_sectors
* new_data_disks
;
4121 spin_unlock_irq(&conf
->device_lock
);
4122 /* Ok, those stripe are ready. We can start scheduling
4123 * reads on the source stripes.
4124 * The source stripes are determined by mapping the first and last
4125 * block on the destination stripes.
4128 raid5_compute_sector(conf
, stripe_addr
*(new_data_disks
),
4131 raid5_compute_sector(conf
, ((stripe_addr
+reshape_sectors
)
4132 *(new_data_disks
) - 1),
4134 if (last_sector
>= mddev
->dev_sectors
)
4135 last_sector
= mddev
->dev_sectors
- 1;
4136 while (first_sector
<= last_sector
) {
4137 sh
= get_active_stripe(conf
, first_sector
, 1, 0, 1);
4138 set_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
4139 set_bit(STRIPE_HANDLE
, &sh
->state
);
4141 first_sector
+= STRIPE_SECTORS
;
4143 /* Now that the sources are clearly marked, we can release
4144 * the destination stripes
4146 while (!list_empty(&stripes
)) {
4147 sh
= list_entry(stripes
.next
, struct stripe_head
, lru
);
4148 list_del_init(&sh
->lru
);
4151 /* If this takes us to the resync_max point where we have to pause,
4152 * then we need to write out the superblock.
4154 sector_nr
+= reshape_sectors
;
4155 if ((sector_nr
- mddev
->curr_resync_completed
) * 2
4156 >= mddev
->resync_max
- mddev
->curr_resync_completed
) {
4157 /* Cannot proceed until we've updated the superblock... */
4158 wait_event(conf
->wait_for_overlap
,
4159 atomic_read(&conf
->reshape_stripes
) == 0);
4160 mddev
->reshape_position
= conf
->reshape_progress
;
4161 mddev
->curr_resync_completed
= mddev
->curr_resync
;
4162 conf
->reshape_checkpoint
= jiffies
;
4163 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
4164 md_wakeup_thread(mddev
->thread
);
4165 wait_event(mddev
->sb_wait
,
4166 !test_bit(MD_CHANGE_DEVS
, &mddev
->flags
)
4167 || kthread_should_stop());
4168 spin_lock_irq(&conf
->device_lock
);
4169 conf
->reshape_safe
= mddev
->reshape_position
;
4170 spin_unlock_irq(&conf
->device_lock
);
4171 wake_up(&conf
->wait_for_overlap
);
4172 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
4174 return reshape_sectors
;
4177 /* FIXME go_faster isn't used */
4178 static inline sector_t
sync_request(mddev_t
*mddev
, sector_t sector_nr
, int *skipped
, int go_faster
)
4180 raid5_conf_t
*conf
= (raid5_conf_t
*) mddev
->private;
4181 struct stripe_head
*sh
;
4182 sector_t max_sector
= mddev
->dev_sectors
;
4184 int still_degraded
= 0;
4187 if (sector_nr
>= max_sector
) {
4188 /* just being told to finish up .. nothing much to do */
4189 unplug_slaves(mddev
);
4191 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
)) {
4196 if (mddev
->curr_resync
< max_sector
) /* aborted */
4197 bitmap_end_sync(mddev
->bitmap
, mddev
->curr_resync
,
4199 else /* completed sync */
4201 bitmap_close_sync(mddev
->bitmap
);
4206 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
4207 return reshape_request(mddev
, sector_nr
, skipped
);
4209 /* No need to check resync_max as we never do more than one
4210 * stripe, and as resync_max will always be on a chunk boundary,
4211 * if the check in md_do_sync didn't fire, there is no chance
4212 * of overstepping resync_max here
4215 /* if there is too many failed drives and we are trying
4216 * to resync, then assert that we are finished, because there is
4217 * nothing we can do.
4219 if (mddev
->degraded
>= conf
->max_degraded
&&
4220 test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
4221 sector_t rv
= mddev
->dev_sectors
- sector_nr
;
4225 if (!bitmap_start_sync(mddev
->bitmap
, sector_nr
, &sync_blocks
, 1) &&
4226 !test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
) &&
4227 !conf
->fullsync
&& sync_blocks
>= STRIPE_SECTORS
) {
4228 /* we can skip this block, and probably more */
4229 sync_blocks
/= STRIPE_SECTORS
;
4231 return sync_blocks
* STRIPE_SECTORS
; /* keep things rounded to whole stripes */
4235 bitmap_cond_end_sync(mddev
->bitmap
, sector_nr
);
4237 sh
= get_active_stripe(conf
, sector_nr
, 0, 1, 0);
4239 sh
= get_active_stripe(conf
, sector_nr
, 0, 0, 0);
4240 /* make sure we don't swamp the stripe cache if someone else
4241 * is trying to get access
4243 schedule_timeout_uninterruptible(1);
4245 /* Need to check if array will still be degraded after recovery/resync
4246 * We don't need to check the 'failed' flag as when that gets set,
4249 for (i
= 0; i
< conf
->raid_disks
; i
++)
4250 if (conf
->disks
[i
].rdev
== NULL
)
4253 bitmap_start_sync(mddev
->bitmap
, sector_nr
, &sync_blocks
, still_degraded
);
4255 spin_lock(&sh
->lock
);
4256 set_bit(STRIPE_SYNCING
, &sh
->state
);
4257 clear_bit(STRIPE_INSYNC
, &sh
->state
);
4258 spin_unlock(&sh
->lock
);
4260 /* wait for any blocked device to be handled */
4261 while (unlikely(!handle_stripe(sh
)))
4265 return STRIPE_SECTORS
;
4268 static int retry_aligned_read(raid5_conf_t
*conf
, struct bio
*raid_bio
)
4270 /* We may not be able to submit a whole bio at once as there
4271 * may not be enough stripe_heads available.
4272 * We cannot pre-allocate enough stripe_heads as we may need
4273 * more than exist in the cache (if we allow ever large chunks).
4274 * So we do one stripe head at a time and record in
4275 * ->bi_hw_segments how many have been done.
4277 * We *know* that this entire raid_bio is in one chunk, so
4278 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
4280 struct stripe_head
*sh
;
4282 sector_t sector
, logical_sector
, last_sector
;
4287 logical_sector
= raid_bio
->bi_sector
& ~((sector_t
)STRIPE_SECTORS
-1);
4288 sector
= raid5_compute_sector(conf
, logical_sector
,
4290 last_sector
= raid_bio
->bi_sector
+ (raid_bio
->bi_size
>>9);
4292 for (; logical_sector
< last_sector
;
4293 logical_sector
+= STRIPE_SECTORS
,
4294 sector
+= STRIPE_SECTORS
,
4297 if (scnt
< raid5_bi_hw_segments(raid_bio
))
4298 /* already done this stripe */
4301 sh
= get_active_stripe(conf
, sector
, 0, 1, 0);
4304 /* failed to get a stripe - must wait */
4305 raid5_set_bi_hw_segments(raid_bio
, scnt
);
4306 conf
->retry_read_aligned
= raid_bio
;
4310 set_bit(R5_ReadError
, &sh
->dev
[dd_idx
].flags
);
4311 if (!add_stripe_bio(sh
, raid_bio
, dd_idx
, 0)) {
4313 raid5_set_bi_hw_segments(raid_bio
, scnt
);
4314 conf
->retry_read_aligned
= raid_bio
;
4322 spin_lock_irq(&conf
->device_lock
);
4323 remaining
= raid5_dec_bi_phys_segments(raid_bio
);
4324 spin_unlock_irq(&conf
->device_lock
);
4326 bio_endio(raid_bio
, 0);
4327 if (atomic_dec_and_test(&conf
->active_aligned_reads
))
4328 wake_up(&conf
->wait_for_stripe
);
4332 #ifdef CONFIG_MULTICORE_RAID456
4333 static void __process_stripe(void *param
, async_cookie_t cookie
)
4335 struct stripe_head
*sh
= param
;
4341 static void process_stripe(struct stripe_head
*sh
, struct list_head
*domain
)
4343 async_schedule_domain(__process_stripe
, sh
, domain
);
4346 static void synchronize_stripe_processing(struct list_head
*domain
)
4348 async_synchronize_full_domain(domain
);
4351 static void process_stripe(struct stripe_head
*sh
, struct list_head
*domain
)
4358 static void synchronize_stripe_processing(struct list_head
*domain
)
4365 * This is our raid5 kernel thread.
4367 * We scan the hash table for stripes which can be handled now.
4368 * During the scan, completed stripes are saved for us by the interrupt
4369 * handler, so that they will not have to wait for our next wakeup.
4371 static void raid5d(mddev_t
*mddev
)
4373 struct stripe_head
*sh
;
4374 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
4376 LIST_HEAD(raid_domain
);
4378 pr_debug("+++ raid5d active\n");
4380 md_check_recovery(mddev
);
4383 spin_lock_irq(&conf
->device_lock
);
4387 if (conf
->seq_flush
!= conf
->seq_write
) {
4388 int seq
= conf
->seq_flush
;
4389 spin_unlock_irq(&conf
->device_lock
);
4390 bitmap_unplug(mddev
->bitmap
);
4391 spin_lock_irq(&conf
->device_lock
);
4392 conf
->seq_write
= seq
;
4393 activate_bit_delay(conf
);
4396 while ((bio
= remove_bio_from_retry(conf
))) {
4398 spin_unlock_irq(&conf
->device_lock
);
4399 ok
= retry_aligned_read(conf
, bio
);
4400 spin_lock_irq(&conf
->device_lock
);
4406 sh
= __get_priority_stripe(conf
);
4410 spin_unlock_irq(&conf
->device_lock
);
4413 process_stripe(sh
, &raid_domain
);
4415 spin_lock_irq(&conf
->device_lock
);
4417 pr_debug("%d stripes handled\n", handled
);
4419 spin_unlock_irq(&conf
->device_lock
);
4421 synchronize_stripe_processing(&raid_domain
);
4422 async_tx_issue_pending_all();
4423 unplug_slaves(mddev
);
4425 pr_debug("--- raid5d inactive\n");
4429 raid5_show_stripe_cache_size(mddev_t
*mddev
, char *page
)
4431 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
4433 return sprintf(page
, "%d\n", conf
->max_nr_stripes
);
4439 raid5_store_stripe_cache_size(mddev_t
*mddev
, const char *page
, size_t len
)
4441 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
4445 if (len
>= PAGE_SIZE
)
4450 if (strict_strtoul(page
, 10, &new))
4452 if (new <= 16 || new > 32768)
4454 while (new < conf
->max_nr_stripes
) {
4455 if (drop_one_stripe(conf
))
4456 conf
->max_nr_stripes
--;
4460 err
= md_allow_write(mddev
);
4463 while (new > conf
->max_nr_stripes
) {
4464 if (grow_one_stripe(conf
))
4465 conf
->max_nr_stripes
++;
4471 static struct md_sysfs_entry
4472 raid5_stripecache_size
= __ATTR(stripe_cache_size
, S_IRUGO
| S_IWUSR
,
4473 raid5_show_stripe_cache_size
,
4474 raid5_store_stripe_cache_size
);
4477 raid5_show_preread_threshold(mddev_t
*mddev
, char *page
)
4479 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
4481 return sprintf(page
, "%d\n", conf
->bypass_threshold
);
4487 raid5_store_preread_threshold(mddev_t
*mddev
, const char *page
, size_t len
)
4489 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
4491 if (len
>= PAGE_SIZE
)
4496 if (strict_strtoul(page
, 10, &new))
4498 if (new > conf
->max_nr_stripes
)
4500 conf
->bypass_threshold
= new;
4504 static struct md_sysfs_entry
4505 raid5_preread_bypass_threshold
= __ATTR(preread_bypass_threshold
,
4507 raid5_show_preread_threshold
,
4508 raid5_store_preread_threshold
);
4511 stripe_cache_active_show(mddev_t
*mddev
, char *page
)
4513 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
4515 return sprintf(page
, "%d\n", atomic_read(&conf
->active_stripes
));
4520 static struct md_sysfs_entry
4521 raid5_stripecache_active
= __ATTR_RO(stripe_cache_active
);
4523 static struct attribute
*raid5_attrs
[] = {
4524 &raid5_stripecache_size
.attr
,
4525 &raid5_stripecache_active
.attr
,
4526 &raid5_preread_bypass_threshold
.attr
,
4529 static struct attribute_group raid5_attrs_group
= {
4531 .attrs
= raid5_attrs
,
4535 raid5_size(mddev_t
*mddev
, sector_t sectors
, int raid_disks
)
4537 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
4540 sectors
= mddev
->dev_sectors
;
4542 /* size is defined by the smallest of previous and new size */
4543 if (conf
->raid_disks
< conf
->previous_raid_disks
)
4544 raid_disks
= conf
->raid_disks
;
4546 raid_disks
= conf
->previous_raid_disks
;
4549 sectors
&= ~((sector_t
)mddev
->chunk_size
/512 - 1);
4550 sectors
&= ~((sector_t
)mddev
->new_chunk
/512 - 1);
4551 return sectors
* (raid_disks
- conf
->max_degraded
);
4554 static void raid5_free_percpu(raid5_conf_t
*conf
)
4556 struct raid5_percpu
*percpu
;
4563 for_each_possible_cpu(cpu
) {
4564 percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
4565 safe_put_page(percpu
->spare_page
);
4566 kfree(percpu
->scribble
);
4568 #ifdef CONFIG_HOTPLUG_CPU
4569 unregister_cpu_notifier(&conf
->cpu_notify
);
4573 free_percpu(conf
->percpu
);
4576 static void free_conf(raid5_conf_t
*conf
)
4578 shrink_stripes(conf
);
4579 raid5_free_percpu(conf
);
4581 kfree(conf
->stripe_hashtbl
);
4585 #ifdef CONFIG_HOTPLUG_CPU
4586 static int raid456_cpu_notify(struct notifier_block
*nfb
, unsigned long action
,
4589 raid5_conf_t
*conf
= container_of(nfb
, raid5_conf_t
, cpu_notify
);
4590 long cpu
= (long)hcpu
;
4591 struct raid5_percpu
*percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
4594 case CPU_UP_PREPARE
:
4595 case CPU_UP_PREPARE_FROZEN
:
4596 if (conf
->level
== 6 && !percpu
->spare_page
)
4597 percpu
->spare_page
= alloc_page(GFP_KERNEL
);
4598 if (!percpu
->scribble
)
4599 percpu
->scribble
= kmalloc(conf
->scribble_len
, GFP_KERNEL
);
4601 if (!percpu
->scribble
||
4602 (conf
->level
== 6 && !percpu
->spare_page
)) {
4603 safe_put_page(percpu
->spare_page
);
4604 kfree(percpu
->scribble
);
4605 pr_err("%s: failed memory allocation for cpu%ld\n",
4611 case CPU_DEAD_FROZEN
:
4612 safe_put_page(percpu
->spare_page
);
4613 kfree(percpu
->scribble
);
4614 percpu
->spare_page
= NULL
;
4615 percpu
->scribble
= NULL
;
4624 static int raid5_alloc_percpu(raid5_conf_t
*conf
)
4627 struct page
*spare_page
;
4628 struct raid5_percpu
*allcpus
;
4632 allcpus
= alloc_percpu(struct raid5_percpu
);
4635 conf
->percpu
= allcpus
;
4639 for_each_present_cpu(cpu
) {
4640 if (conf
->level
== 6) {
4641 spare_page
= alloc_page(GFP_KERNEL
);
4646 per_cpu_ptr(conf
->percpu
, cpu
)->spare_page
= spare_page
;
4648 scribble
= kmalloc(scribble_len(conf
->raid_disks
), GFP_KERNEL
);
4653 per_cpu_ptr(conf
->percpu
, cpu
)->scribble
= scribble
;
4655 #ifdef CONFIG_HOTPLUG_CPU
4656 conf
->cpu_notify
.notifier_call
= raid456_cpu_notify
;
4657 conf
->cpu_notify
.priority
= 0;
4659 err
= register_cpu_notifier(&conf
->cpu_notify
);
4666 static raid5_conf_t
*setup_conf(mddev_t
*mddev
)
4669 int raid_disk
, memory
;
4671 struct disk_info
*disk
;
4673 if (mddev
->new_level
!= 5
4674 && mddev
->new_level
!= 4
4675 && mddev
->new_level
!= 6) {
4676 printk(KERN_ERR
"raid5: %s: raid level not set to 4/5/6 (%d)\n",
4677 mdname(mddev
), mddev
->new_level
);
4678 return ERR_PTR(-EIO
);
4680 if ((mddev
->new_level
== 5
4681 && !algorithm_valid_raid5(mddev
->new_layout
)) ||
4682 (mddev
->new_level
== 6
4683 && !algorithm_valid_raid6(mddev
->new_layout
))) {
4684 printk(KERN_ERR
"raid5: %s: layout %d not supported\n",
4685 mdname(mddev
), mddev
->new_layout
);
4686 return ERR_PTR(-EIO
);
4688 if (mddev
->new_level
== 6 && mddev
->raid_disks
< 4) {
4689 printk(KERN_ERR
"raid6: not enough configured devices for %s (%d, minimum 4)\n",
4690 mdname(mddev
), mddev
->raid_disks
);
4691 return ERR_PTR(-EINVAL
);
4694 if (!mddev
->new_chunk
|| mddev
->new_chunk
% PAGE_SIZE
) {
4695 printk(KERN_ERR
"raid5: invalid chunk size %d for %s\n",
4696 mddev
->new_chunk
, mdname(mddev
));
4697 return ERR_PTR(-EINVAL
);
4700 conf
= kzalloc(sizeof(raid5_conf_t
), GFP_KERNEL
);
4704 conf
->raid_disks
= mddev
->raid_disks
;
4705 conf
->scribble_len
= scribble_len(conf
->raid_disks
);
4706 if (mddev
->reshape_position
== MaxSector
)
4707 conf
->previous_raid_disks
= mddev
->raid_disks
;
4709 conf
->previous_raid_disks
= mddev
->raid_disks
- mddev
->delta_disks
;
4711 conf
->disks
= kzalloc(conf
->raid_disks
* sizeof(struct disk_info
),
4716 conf
->mddev
= mddev
;
4718 if ((conf
->stripe_hashtbl
= kzalloc(PAGE_SIZE
, GFP_KERNEL
)) == NULL
)
4721 conf
->level
= mddev
->new_level
;
4722 if (raid5_alloc_percpu(conf
) != 0)
4725 spin_lock_init(&conf
->device_lock
);
4726 init_waitqueue_head(&conf
->wait_for_stripe
);
4727 init_waitqueue_head(&conf
->wait_for_overlap
);
4728 INIT_LIST_HEAD(&conf
->handle_list
);
4729 INIT_LIST_HEAD(&conf
->hold_list
);
4730 INIT_LIST_HEAD(&conf
->delayed_list
);
4731 INIT_LIST_HEAD(&conf
->bitmap_list
);
4732 INIT_LIST_HEAD(&conf
->inactive_list
);
4733 atomic_set(&conf
->active_stripes
, 0);
4734 atomic_set(&conf
->preread_active_stripes
, 0);
4735 atomic_set(&conf
->active_aligned_reads
, 0);
4736 conf
->bypass_threshold
= BYPASS_THRESHOLD
;
4738 pr_debug("raid5: run(%s) called.\n", mdname(mddev
));
4740 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
4741 raid_disk
= rdev
->raid_disk
;
4742 if (raid_disk
>= conf
->raid_disks
4745 disk
= conf
->disks
+ raid_disk
;
4749 if (test_bit(In_sync
, &rdev
->flags
)) {
4750 char b
[BDEVNAME_SIZE
];
4751 printk(KERN_INFO
"raid5: device %s operational as raid"
4752 " disk %d\n", bdevname(rdev
->bdev
,b
),
4755 /* Cannot rely on bitmap to complete recovery */
4759 conf
->chunk_size
= mddev
->new_chunk
;
4760 if (conf
->level
== 6)
4761 conf
->max_degraded
= 2;
4763 conf
->max_degraded
= 1;
4764 conf
->algorithm
= mddev
->new_layout
;
4765 conf
->max_nr_stripes
= NR_STRIPES
;
4766 conf
->reshape_progress
= mddev
->reshape_position
;
4767 if (conf
->reshape_progress
!= MaxSector
) {
4768 conf
->prev_chunk
= mddev
->chunk_size
;
4769 conf
->prev_algo
= mddev
->layout
;
4772 memory
= conf
->max_nr_stripes
* (sizeof(struct stripe_head
) +
4773 conf
->raid_disks
* ((sizeof(struct bio
) + PAGE_SIZE
))) / 1024;
4774 if (grow_stripes(conf
, conf
->max_nr_stripes
)) {
4776 "raid5: couldn't allocate %dkB for buffers\n", memory
);
4779 printk(KERN_INFO
"raid5: allocated %dkB for %s\n",
4780 memory
, mdname(mddev
));
4782 conf
->thread
= md_register_thread(raid5d
, mddev
, "%s_raid5");
4783 if (!conf
->thread
) {
4785 "raid5: couldn't allocate thread for %s\n",
4795 return ERR_PTR(-EIO
);
4797 return ERR_PTR(-ENOMEM
);
4800 static int run(mddev_t
*mddev
)
4803 int working_disks
= 0;
4806 if (mddev
->reshape_position
!= MaxSector
) {
4807 /* Check that we can continue the reshape.
4808 * Currently only disks can change, it must
4809 * increase, and we must be past the point where
4810 * a stripe over-writes itself
4812 sector_t here_new
, here_old
;
4814 int max_degraded
= (mddev
->level
== 6 ? 2 : 1);
4816 if (mddev
->new_level
!= mddev
->level
) {
4817 printk(KERN_ERR
"raid5: %s: unsupported reshape "
4818 "required - aborting.\n",
4822 old_disks
= mddev
->raid_disks
- mddev
->delta_disks
;
4823 /* reshape_position must be on a new-stripe boundary, and one
4824 * further up in new geometry must map after here in old
4827 here_new
= mddev
->reshape_position
;
4828 if (sector_div(here_new
, (mddev
->new_chunk
>>9)*
4829 (mddev
->raid_disks
- max_degraded
))) {
4830 printk(KERN_ERR
"raid5: reshape_position not "
4831 "on a stripe boundary\n");
4834 /* here_new is the stripe we will write to */
4835 here_old
= mddev
->reshape_position
;
4836 sector_div(here_old
, (mddev
->chunk_size
>>9)*
4837 (old_disks
-max_degraded
));
4838 /* here_old is the first stripe that we might need to read
4840 if (here_new
>= here_old
) {
4841 /* Reading from the same stripe as writing to - bad */
4842 printk(KERN_ERR
"raid5: reshape_position too early for "
4843 "auto-recovery - aborting.\n");
4846 printk(KERN_INFO
"raid5: reshape will continue\n");
4847 /* OK, we should be able to continue; */
4849 BUG_ON(mddev
->level
!= mddev
->new_level
);
4850 BUG_ON(mddev
->layout
!= mddev
->new_layout
);
4851 BUG_ON(mddev
->chunk_size
!= mddev
->new_chunk
);
4852 BUG_ON(mddev
->delta_disks
!= 0);
4855 if (mddev
->private == NULL
)
4856 conf
= setup_conf(mddev
);
4858 conf
= mddev
->private;
4861 return PTR_ERR(conf
);
4863 mddev
->thread
= conf
->thread
;
4864 conf
->thread
= NULL
;
4865 mddev
->private = conf
;
4868 * 0 for a fully functional array, 1 or 2 for a degraded array.
4870 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
4871 if (rdev
->raid_disk
>= 0 &&
4872 test_bit(In_sync
, &rdev
->flags
))
4875 mddev
->degraded
= conf
->raid_disks
- working_disks
;
4877 if (mddev
->degraded
> conf
->max_degraded
) {
4878 printk(KERN_ERR
"raid5: not enough operational devices for %s"
4879 " (%d/%d failed)\n",
4880 mdname(mddev
), mddev
->degraded
, conf
->raid_disks
);
4884 /* device size must be a multiple of chunk size */
4885 mddev
->dev_sectors
&= ~(mddev
->chunk_size
/ 512 - 1);
4886 mddev
->resync_max_sectors
= mddev
->dev_sectors
;
4888 if (mddev
->degraded
> 0 &&
4889 mddev
->recovery_cp
!= MaxSector
) {
4890 if (mddev
->ok_start_degraded
)
4892 "raid5: starting dirty degraded array: %s"
4893 "- data corruption possible.\n",
4897 "raid5: cannot start dirty degraded array for %s\n",
4903 if (mddev
->degraded
== 0)
4904 printk("raid5: raid level %d set %s active with %d out of %d"
4905 " devices, algorithm %d\n", conf
->level
, mdname(mddev
),
4906 mddev
->raid_disks
-mddev
->degraded
, mddev
->raid_disks
,
4909 printk(KERN_ALERT
"raid5: raid level %d set %s active with %d"
4910 " out of %d devices, algorithm %d\n", conf
->level
,
4911 mdname(mddev
), mddev
->raid_disks
- mddev
->degraded
,
4912 mddev
->raid_disks
, mddev
->new_layout
);
4914 print_raid5_conf(conf
);
4916 if (conf
->reshape_progress
!= MaxSector
) {
4917 printk("...ok start reshape thread\n");
4918 conf
->reshape_safe
= conf
->reshape_progress
;
4919 atomic_set(&conf
->reshape_stripes
, 0);
4920 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
4921 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
4922 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
4923 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
4924 mddev
->sync_thread
= md_register_thread(md_do_sync
, mddev
,
4928 /* read-ahead size must cover two whole stripes, which is
4929 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
4932 int data_disks
= conf
->previous_raid_disks
- conf
->max_degraded
;
4933 int stripe
= data_disks
*
4934 (mddev
->chunk_size
/ PAGE_SIZE
);
4935 if (mddev
->queue
->backing_dev_info
.ra_pages
< 2 * stripe
)
4936 mddev
->queue
->backing_dev_info
.ra_pages
= 2 * stripe
;
4939 /* Ok, everything is just fine now */
4940 if (sysfs_create_group(&mddev
->kobj
, &raid5_attrs_group
))
4942 "raid5: failed to create sysfs attributes for %s\n",
4945 mddev
->queue
->queue_lock
= &conf
->device_lock
;
4947 mddev
->queue
->unplug_fn
= raid5_unplug_device
;
4948 mddev
->queue
->backing_dev_info
.congested_data
= mddev
;
4949 mddev
->queue
->backing_dev_info
.congested_fn
= raid5_congested
;
4951 md_set_array_sectors(mddev
, raid5_size(mddev
, 0, 0));
4953 blk_queue_merge_bvec(mddev
->queue
, raid5_mergeable_bvec
);
4957 md_unregister_thread(mddev
->thread
);
4958 mddev
->thread
= NULL
;
4960 print_raid5_conf(conf
);
4963 mddev
->private = NULL
;
4964 printk(KERN_ALERT
"raid5: failed to run raid set %s\n", mdname(mddev
));
4970 static int stop(mddev_t
*mddev
)
4972 raid5_conf_t
*conf
= (raid5_conf_t
*) mddev
->private;
4974 md_unregister_thread(mddev
->thread
);
4975 mddev
->thread
= NULL
;
4976 mddev
->queue
->backing_dev_info
.congested_fn
= NULL
;
4977 blk_sync_queue(mddev
->queue
); /* the unplug fn references 'conf'*/
4978 sysfs_remove_group(&mddev
->kobj
, &raid5_attrs_group
);
4980 mddev
->private = NULL
;
4985 static void print_sh(struct seq_file
*seq
, struct stripe_head
*sh
)
4989 seq_printf(seq
, "sh %llu, pd_idx %d, state %ld.\n",
4990 (unsigned long long)sh
->sector
, sh
->pd_idx
, sh
->state
);
4991 seq_printf(seq
, "sh %llu, count %d.\n",
4992 (unsigned long long)sh
->sector
, atomic_read(&sh
->count
));
4993 seq_printf(seq
, "sh %llu, ", (unsigned long long)sh
->sector
);
4994 for (i
= 0; i
< sh
->disks
; i
++) {
4995 seq_printf(seq
, "(cache%d: %p %ld) ",
4996 i
, sh
->dev
[i
].page
, sh
->dev
[i
].flags
);
4998 seq_printf(seq
, "\n");
5001 static void printall(struct seq_file
*seq
, raid5_conf_t
*conf
)
5003 struct stripe_head
*sh
;
5004 struct hlist_node
*hn
;
5007 spin_lock_irq(&conf
->device_lock
);
5008 for (i
= 0; i
< NR_HASH
; i
++) {
5009 hlist_for_each_entry(sh
, hn
, &conf
->stripe_hashtbl
[i
], hash
) {
5010 if (sh
->raid_conf
!= conf
)
5015 spin_unlock_irq(&conf
->device_lock
);
5019 static void status(struct seq_file
*seq
, mddev_t
*mddev
)
5021 raid5_conf_t
*conf
= (raid5_conf_t
*) mddev
->private;
5024 seq_printf (seq
, " level %d, %dk chunk, algorithm %d", mddev
->level
, mddev
->chunk_size
>> 10, mddev
->layout
);
5025 seq_printf (seq
, " [%d/%d] [", conf
->raid_disks
, conf
->raid_disks
- mddev
->degraded
);
5026 for (i
= 0; i
< conf
->raid_disks
; i
++)
5027 seq_printf (seq
, "%s",
5028 conf
->disks
[i
].rdev
&&
5029 test_bit(In_sync
, &conf
->disks
[i
].rdev
->flags
) ? "U" : "_");
5030 seq_printf (seq
, "]");
5032 seq_printf (seq
, "\n");
5033 printall(seq
, conf
);
5037 static void print_raid5_conf (raid5_conf_t
*conf
)
5040 struct disk_info
*tmp
;
5042 printk("RAID5 conf printout:\n");
5044 printk("(conf==NULL)\n");
5047 printk(" --- rd:%d wd:%d\n", conf
->raid_disks
,
5048 conf
->raid_disks
- conf
->mddev
->degraded
);
5050 for (i
= 0; i
< conf
->raid_disks
; i
++) {
5051 char b
[BDEVNAME_SIZE
];
5052 tmp
= conf
->disks
+ i
;
5054 printk(" disk %d, o:%d, dev:%s\n",
5055 i
, !test_bit(Faulty
, &tmp
->rdev
->flags
),
5056 bdevname(tmp
->rdev
->bdev
,b
));
5060 static int raid5_spare_active(mddev_t
*mddev
)
5063 raid5_conf_t
*conf
= mddev
->private;
5064 struct disk_info
*tmp
;
5066 for (i
= 0; i
< conf
->raid_disks
; i
++) {
5067 tmp
= conf
->disks
+ i
;
5069 && !test_bit(Faulty
, &tmp
->rdev
->flags
)
5070 && !test_and_set_bit(In_sync
, &tmp
->rdev
->flags
)) {
5071 unsigned long flags
;
5072 spin_lock_irqsave(&conf
->device_lock
, flags
);
5074 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
5077 print_raid5_conf(conf
);
5081 static int raid5_remove_disk(mddev_t
*mddev
, int number
)
5083 raid5_conf_t
*conf
= mddev
->private;
5086 struct disk_info
*p
= conf
->disks
+ number
;
5088 print_raid5_conf(conf
);
5091 if (number
>= conf
->raid_disks
&&
5092 conf
->reshape_progress
== MaxSector
)
5093 clear_bit(In_sync
, &rdev
->flags
);
5095 if (test_bit(In_sync
, &rdev
->flags
) ||
5096 atomic_read(&rdev
->nr_pending
)) {
5100 /* Only remove non-faulty devices if recovery
5103 if (!test_bit(Faulty
, &rdev
->flags
) &&
5104 mddev
->degraded
<= conf
->max_degraded
&&
5105 number
< conf
->raid_disks
) {
5111 if (atomic_read(&rdev
->nr_pending
)) {
5112 /* lost the race, try later */
5119 print_raid5_conf(conf
);
5123 static int raid5_add_disk(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
5125 raid5_conf_t
*conf
= mddev
->private;
5128 struct disk_info
*p
;
5130 int last
= conf
->raid_disks
- 1;
5132 if (mddev
->degraded
> conf
->max_degraded
)
5133 /* no point adding a device */
5136 if (rdev
->raid_disk
>= 0)
5137 first
= last
= rdev
->raid_disk
;
5140 * find the disk ... but prefer rdev->saved_raid_disk
5143 if (rdev
->saved_raid_disk
>= 0 &&
5144 rdev
->saved_raid_disk
>= first
&&
5145 conf
->disks
[rdev
->saved_raid_disk
].rdev
== NULL
)
5146 disk
= rdev
->saved_raid_disk
;
5149 for ( ; disk
<= last
; disk
++)
5150 if ((p
=conf
->disks
+ disk
)->rdev
== NULL
) {
5151 clear_bit(In_sync
, &rdev
->flags
);
5152 rdev
->raid_disk
= disk
;
5154 if (rdev
->saved_raid_disk
!= disk
)
5156 rcu_assign_pointer(p
->rdev
, rdev
);
5159 print_raid5_conf(conf
);
5163 static int raid5_resize(mddev_t
*mddev
, sector_t sectors
)
5165 /* no resync is happening, and there is enough space
5166 * on all devices, so we can resize.
5167 * We need to make sure resync covers any new space.
5168 * If the array is shrinking we should possibly wait until
5169 * any io in the removed space completes, but it hardly seems
5172 sectors
&= ~((sector_t
)mddev
->chunk_size
/512 - 1);
5173 md_set_array_sectors(mddev
, raid5_size(mddev
, sectors
,
5174 mddev
->raid_disks
));
5175 if (mddev
->array_sectors
>
5176 raid5_size(mddev
, sectors
, mddev
->raid_disks
))
5178 set_capacity(mddev
->gendisk
, mddev
->array_sectors
);
5180 if (sectors
> mddev
->dev_sectors
&& mddev
->recovery_cp
== MaxSector
) {
5181 mddev
->recovery_cp
= mddev
->dev_sectors
;
5182 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
5184 mddev
->dev_sectors
= sectors
;
5185 mddev
->resync_max_sectors
= sectors
;
5189 static int raid5_check_reshape(mddev_t
*mddev
)
5191 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
5193 if (mddev
->delta_disks
== 0 &&
5194 mddev
->new_layout
== mddev
->layout
&&
5195 mddev
->new_chunk
== mddev
->chunk_size
)
5196 return -EINVAL
; /* nothing to do */
5198 /* Cannot grow a bitmap yet */
5200 if (mddev
->degraded
> conf
->max_degraded
)
5202 if (mddev
->delta_disks
< 0) {
5203 /* We might be able to shrink, but the devices must
5204 * be made bigger first.
5205 * For raid6, 4 is the minimum size.
5206 * Otherwise 2 is the minimum
5209 if (mddev
->level
== 6)
5211 if (mddev
->raid_disks
+ mddev
->delta_disks
< min
)
5215 /* Can only proceed if there are plenty of stripe_heads.
5216 * We need a minimum of one full stripe,, and for sensible progress
5217 * it is best to have about 4 times that.
5218 * If we require 4 times, then the default 256 4K stripe_heads will
5219 * allow for chunk sizes up to 256K, which is probably OK.
5220 * If the chunk size is greater, user-space should request more
5221 * stripe_heads first.
5223 if ((mddev
->chunk_size
/ STRIPE_SIZE
) * 4 > conf
->max_nr_stripes
||
5224 (mddev
->new_chunk
/ STRIPE_SIZE
) * 4 > conf
->max_nr_stripes
) {
5225 printk(KERN_WARNING
"raid5: reshape: not enough stripes. Needed %lu\n",
5226 (max(mddev
->chunk_size
, mddev
->new_chunk
)
5231 return resize_stripes(conf
, conf
->raid_disks
+ mddev
->delta_disks
);
5234 static int raid5_start_reshape(mddev_t
*mddev
)
5236 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
5239 int added_devices
= 0;
5240 unsigned long flags
;
5242 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
))
5245 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
5246 if (rdev
->raid_disk
< 0 &&
5247 !test_bit(Faulty
, &rdev
->flags
))
5250 if (spares
- mddev
->degraded
< mddev
->delta_disks
- conf
->max_degraded
)
5251 /* Not enough devices even to make a degraded array
5256 /* Refuse to reduce size of the array. Any reductions in
5257 * array size must be through explicit setting of array_size
5260 if (raid5_size(mddev
, 0, conf
->raid_disks
+ mddev
->delta_disks
)
5261 < mddev
->array_sectors
) {
5262 printk(KERN_ERR
"md: %s: array size must be reduced "
5263 "before number of disks\n", mdname(mddev
));
5267 atomic_set(&conf
->reshape_stripes
, 0);
5268 spin_lock_irq(&conf
->device_lock
);
5269 conf
->previous_raid_disks
= conf
->raid_disks
;
5270 conf
->raid_disks
+= mddev
->delta_disks
;
5271 conf
->prev_chunk
= conf
->chunk_size
;
5272 conf
->chunk_size
= mddev
->new_chunk
;
5273 conf
->prev_algo
= conf
->algorithm
;
5274 conf
->algorithm
= mddev
->new_layout
;
5275 if (mddev
->delta_disks
< 0)
5276 conf
->reshape_progress
= raid5_size(mddev
, 0, 0);
5278 conf
->reshape_progress
= 0;
5279 conf
->reshape_safe
= conf
->reshape_progress
;
5281 spin_unlock_irq(&conf
->device_lock
);
5283 /* Add some new drives, as many as will fit.
5284 * We know there are enough to make the newly sized array work.
5286 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
5287 if (rdev
->raid_disk
< 0 &&
5288 !test_bit(Faulty
, &rdev
->flags
)) {
5289 if (raid5_add_disk(mddev
, rdev
) == 0) {
5291 set_bit(In_sync
, &rdev
->flags
);
5293 rdev
->recovery_offset
= 0;
5294 sprintf(nm
, "rd%d", rdev
->raid_disk
);
5295 if (sysfs_create_link(&mddev
->kobj
,
5298 "raid5: failed to create "
5299 " link %s for %s\n",
5305 if (mddev
->delta_disks
> 0) {
5306 spin_lock_irqsave(&conf
->device_lock
, flags
);
5307 mddev
->degraded
= (conf
->raid_disks
- conf
->previous_raid_disks
)
5309 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
5311 mddev
->raid_disks
= conf
->raid_disks
;
5312 mddev
->reshape_position
= 0;
5313 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
5315 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
5316 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
5317 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
5318 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
5319 mddev
->sync_thread
= md_register_thread(md_do_sync
, mddev
,
5321 if (!mddev
->sync_thread
) {
5322 mddev
->recovery
= 0;
5323 spin_lock_irq(&conf
->device_lock
);
5324 mddev
->raid_disks
= conf
->raid_disks
= conf
->previous_raid_disks
;
5325 conf
->reshape_progress
= MaxSector
;
5326 spin_unlock_irq(&conf
->device_lock
);
5329 conf
->reshape_checkpoint
= jiffies
;
5330 md_wakeup_thread(mddev
->sync_thread
);
5331 md_new_event(mddev
);
5335 /* This is called from the reshape thread and should make any
5336 * changes needed in 'conf'
5338 static void end_reshape(raid5_conf_t
*conf
)
5341 if (!test_bit(MD_RECOVERY_INTR
, &conf
->mddev
->recovery
)) {
5343 spin_lock_irq(&conf
->device_lock
);
5344 conf
->previous_raid_disks
= conf
->raid_disks
;
5345 conf
->reshape_progress
= MaxSector
;
5346 spin_unlock_irq(&conf
->device_lock
);
5347 wake_up(&conf
->wait_for_overlap
);
5349 /* read-ahead size must cover two whole stripes, which is
5350 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
5353 int data_disks
= conf
->raid_disks
- conf
->max_degraded
;
5354 int stripe
= data_disks
* (conf
->chunk_size
5356 if (conf
->mddev
->queue
->backing_dev_info
.ra_pages
< 2 * stripe
)
5357 conf
->mddev
->queue
->backing_dev_info
.ra_pages
= 2 * stripe
;
5362 /* This is called from the raid5d thread with mddev_lock held.
5363 * It makes config changes to the device.
5365 static void raid5_finish_reshape(mddev_t
*mddev
)
5367 struct block_device
*bdev
;
5368 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
5370 if (!test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
)) {
5372 if (mddev
->delta_disks
> 0) {
5373 md_set_array_sectors(mddev
, raid5_size(mddev
, 0, 0));
5374 set_capacity(mddev
->gendisk
, mddev
->array_sectors
);
5377 bdev
= bdget_disk(mddev
->gendisk
, 0);
5379 mutex_lock(&bdev
->bd_inode
->i_mutex
);
5380 i_size_write(bdev
->bd_inode
,
5381 (loff_t
)mddev
->array_sectors
<< 9);
5382 mutex_unlock(&bdev
->bd_inode
->i_mutex
);
5387 mddev
->degraded
= conf
->raid_disks
;
5388 for (d
= 0; d
< conf
->raid_disks
; d
++)
5389 if (conf
->disks
[d
].rdev
&&
5391 &conf
->disks
[d
].rdev
->flags
))
5393 for (d
= conf
->raid_disks
;
5394 d
< conf
->raid_disks
- mddev
->delta_disks
;
5396 raid5_remove_disk(mddev
, d
);
5398 mddev
->layout
= conf
->algorithm
;
5399 mddev
->chunk_size
= conf
->chunk_size
;
5400 mddev
->reshape_position
= MaxSector
;
5401 mddev
->delta_disks
= 0;
5405 static void raid5_quiesce(mddev_t
*mddev
, int state
)
5407 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
5410 case 2: /* resume for a suspend */
5411 wake_up(&conf
->wait_for_overlap
);
5414 case 1: /* stop all writes */
5415 spin_lock_irq(&conf
->device_lock
);
5417 wait_event_lock_irq(conf
->wait_for_stripe
,
5418 atomic_read(&conf
->active_stripes
) == 0 &&
5419 atomic_read(&conf
->active_aligned_reads
) == 0,
5420 conf
->device_lock
, /* nothing */);
5421 spin_unlock_irq(&conf
->device_lock
);
5424 case 0: /* re-enable writes */
5425 spin_lock_irq(&conf
->device_lock
);
5427 wake_up(&conf
->wait_for_stripe
);
5428 wake_up(&conf
->wait_for_overlap
);
5429 spin_unlock_irq(&conf
->device_lock
);
5435 static void *raid5_takeover_raid1(mddev_t
*mddev
)
5439 if (mddev
->raid_disks
!= 2 ||
5440 mddev
->degraded
> 1)
5441 return ERR_PTR(-EINVAL
);
5443 /* Should check if there are write-behind devices? */
5445 chunksect
= 64*2; /* 64K by default */
5447 /* The array must be an exact multiple of chunksize */
5448 while (chunksect
&& (mddev
->array_sectors
& (chunksect
-1)))
5451 if ((chunksect
<<9) < STRIPE_SIZE
)
5452 /* array size does not allow a suitable chunk size */
5453 return ERR_PTR(-EINVAL
);
5455 mddev
->new_level
= 5;
5456 mddev
->new_layout
= ALGORITHM_LEFT_SYMMETRIC
;
5457 mddev
->new_chunk
= chunksect
<< 9;
5459 return setup_conf(mddev
);
5462 static void *raid5_takeover_raid6(mddev_t
*mddev
)
5466 switch (mddev
->layout
) {
5467 case ALGORITHM_LEFT_ASYMMETRIC_6
:
5468 new_layout
= ALGORITHM_LEFT_ASYMMETRIC
;
5470 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
5471 new_layout
= ALGORITHM_RIGHT_ASYMMETRIC
;
5473 case ALGORITHM_LEFT_SYMMETRIC_6
:
5474 new_layout
= ALGORITHM_LEFT_SYMMETRIC
;
5476 case ALGORITHM_RIGHT_SYMMETRIC_6
:
5477 new_layout
= ALGORITHM_RIGHT_SYMMETRIC
;
5479 case ALGORITHM_PARITY_0_6
:
5480 new_layout
= ALGORITHM_PARITY_0
;
5482 case ALGORITHM_PARITY_N
:
5483 new_layout
= ALGORITHM_PARITY_N
;
5486 return ERR_PTR(-EINVAL
);
5488 mddev
->new_level
= 5;
5489 mddev
->new_layout
= new_layout
;
5490 mddev
->delta_disks
= -1;
5491 mddev
->raid_disks
-= 1;
5492 return setup_conf(mddev
);
5496 static int raid5_reconfig(mddev_t
*mddev
, int new_layout
, int new_chunk
)
5498 /* For a 2-drive array, the layout and chunk size can be changed
5499 * immediately as not restriping is needed.
5500 * For larger arrays we record the new value - after validation
5501 * to be used by a reshape pass.
5503 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
5505 if (new_layout
>= 0 && !algorithm_valid_raid5(new_layout
))
5507 if (new_chunk
> 0) {
5508 if (new_chunk
& (new_chunk
-1))
5509 /* not a power of 2 */
5511 if (new_chunk
< PAGE_SIZE
)
5513 if (mddev
->array_sectors
& ((new_chunk
>>9)-1))
5514 /* not factor of array size */
5518 /* They look valid */
5520 if (mddev
->raid_disks
== 2) {
5522 if (new_layout
>= 0) {
5523 conf
->algorithm
= new_layout
;
5524 mddev
->layout
= mddev
->new_layout
= new_layout
;
5526 if (new_chunk
> 0) {
5527 conf
->chunk_size
= new_chunk
;
5528 mddev
->chunk_size
= mddev
->new_chunk
= new_chunk
;
5530 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
5531 md_wakeup_thread(mddev
->thread
);
5533 if (new_layout
>= 0)
5534 mddev
->new_layout
= new_layout
;
5536 mddev
->new_chunk
= new_chunk
;
5541 static int raid6_reconfig(mddev_t
*mddev
, int new_layout
, int new_chunk
)
5543 if (new_layout
>= 0 && !algorithm_valid_raid6(new_layout
))
5545 if (new_chunk
> 0) {
5546 if (new_chunk
& (new_chunk
-1))
5547 /* not a power of 2 */
5549 if (new_chunk
< PAGE_SIZE
)
5551 if (mddev
->array_sectors
& ((new_chunk
>>9)-1))
5552 /* not factor of array size */
5556 /* They look valid */
5558 if (new_layout
>= 0)
5559 mddev
->new_layout
= new_layout
;
5561 mddev
->new_chunk
= new_chunk
;
5566 static void *raid5_takeover(mddev_t
*mddev
)
5568 /* raid5 can take over:
5569 * raid0 - if all devices are the same - make it a raid4 layout
5570 * raid1 - if there are two drives. We need to know the chunk size
5571 * raid4 - trivial - just use a raid4 layout.
5572 * raid6 - Providing it is a *_6 layout
5574 * For now, just do raid1
5577 if (mddev
->level
== 1)
5578 return raid5_takeover_raid1(mddev
);
5579 if (mddev
->level
== 4) {
5580 mddev
->new_layout
= ALGORITHM_PARITY_N
;
5581 mddev
->new_level
= 5;
5582 return setup_conf(mddev
);
5584 if (mddev
->level
== 6)
5585 return raid5_takeover_raid6(mddev
);
5587 return ERR_PTR(-EINVAL
);
5591 static struct mdk_personality raid5_personality
;
5593 static void *raid6_takeover(mddev_t
*mddev
)
5595 /* Currently can only take over a raid5. We map the
5596 * personality to an equivalent raid6 personality
5597 * with the Q block at the end.
5601 if (mddev
->pers
!= &raid5_personality
)
5602 return ERR_PTR(-EINVAL
);
5603 if (mddev
->degraded
> 1)
5604 return ERR_PTR(-EINVAL
);
5605 if (mddev
->raid_disks
> 253)
5606 return ERR_PTR(-EINVAL
);
5607 if (mddev
->raid_disks
< 3)
5608 return ERR_PTR(-EINVAL
);
5610 switch (mddev
->layout
) {
5611 case ALGORITHM_LEFT_ASYMMETRIC
:
5612 new_layout
= ALGORITHM_LEFT_ASYMMETRIC_6
;
5614 case ALGORITHM_RIGHT_ASYMMETRIC
:
5615 new_layout
= ALGORITHM_RIGHT_ASYMMETRIC_6
;
5617 case ALGORITHM_LEFT_SYMMETRIC
:
5618 new_layout
= ALGORITHM_LEFT_SYMMETRIC_6
;
5620 case ALGORITHM_RIGHT_SYMMETRIC
:
5621 new_layout
= ALGORITHM_RIGHT_SYMMETRIC_6
;
5623 case ALGORITHM_PARITY_0
:
5624 new_layout
= ALGORITHM_PARITY_0_6
;
5626 case ALGORITHM_PARITY_N
:
5627 new_layout
= ALGORITHM_PARITY_N
;
5630 return ERR_PTR(-EINVAL
);
5632 mddev
->new_level
= 6;
5633 mddev
->new_layout
= new_layout
;
5634 mddev
->delta_disks
= 1;
5635 mddev
->raid_disks
+= 1;
5636 return setup_conf(mddev
);
5640 static struct mdk_personality raid6_personality
=
5644 .owner
= THIS_MODULE
,
5645 .make_request
= make_request
,
5649 .error_handler
= error
,
5650 .hot_add_disk
= raid5_add_disk
,
5651 .hot_remove_disk
= raid5_remove_disk
,
5652 .spare_active
= raid5_spare_active
,
5653 .sync_request
= sync_request
,
5654 .resize
= raid5_resize
,
5656 .check_reshape
= raid5_check_reshape
,
5657 .start_reshape
= raid5_start_reshape
,
5658 .finish_reshape
= raid5_finish_reshape
,
5659 .quiesce
= raid5_quiesce
,
5660 .takeover
= raid6_takeover
,
5661 .reconfig
= raid6_reconfig
,
5663 static struct mdk_personality raid5_personality
=
5667 .owner
= THIS_MODULE
,
5668 .make_request
= make_request
,
5672 .error_handler
= error
,
5673 .hot_add_disk
= raid5_add_disk
,
5674 .hot_remove_disk
= raid5_remove_disk
,
5675 .spare_active
= raid5_spare_active
,
5676 .sync_request
= sync_request
,
5677 .resize
= raid5_resize
,
5679 .check_reshape
= raid5_check_reshape
,
5680 .start_reshape
= raid5_start_reshape
,
5681 .finish_reshape
= raid5_finish_reshape
,
5682 .quiesce
= raid5_quiesce
,
5683 .takeover
= raid5_takeover
,
5684 .reconfig
= raid5_reconfig
,
5687 static struct mdk_personality raid4_personality
=
5691 .owner
= THIS_MODULE
,
5692 .make_request
= make_request
,
5696 .error_handler
= error
,
5697 .hot_add_disk
= raid5_add_disk
,
5698 .hot_remove_disk
= raid5_remove_disk
,
5699 .spare_active
= raid5_spare_active
,
5700 .sync_request
= sync_request
,
5701 .resize
= raid5_resize
,
5703 .check_reshape
= raid5_check_reshape
,
5704 .start_reshape
= raid5_start_reshape
,
5705 .finish_reshape
= raid5_finish_reshape
,
5706 .quiesce
= raid5_quiesce
,
5709 static int __init
raid5_init(void)
5711 register_md_personality(&raid6_personality
);
5712 register_md_personality(&raid5_personality
);
5713 register_md_personality(&raid4_personality
);
5717 static void raid5_exit(void)
5719 unregister_md_personality(&raid6_personality
);
5720 unregister_md_personality(&raid5_personality
);
5721 unregister_md_personality(&raid4_personality
);
5724 module_init(raid5_init
);
5725 module_exit(raid5_exit
);
5726 MODULE_LICENSE("GPL");
5727 MODULE_ALIAS("md-personality-4"); /* RAID5 */
5728 MODULE_ALIAS("md-raid5");
5729 MODULE_ALIAS("md-raid4");
5730 MODULE_ALIAS("md-level-5");
5731 MODULE_ALIAS("md-level-4");
5732 MODULE_ALIAS("md-personality-8"); /* RAID6 */
5733 MODULE_ALIAS("md-raid6");
5734 MODULE_ALIAS("md-level-6");
5736 /* This used to be two separate modules, they were: */
5737 MODULE_ALIAS("raid5");
5738 MODULE_ALIAS("raid6");