2 md.c : Multiple Devices driver for Linux
3 Copyright (C) 1998, 1999, 2000 Ingo Molnar
5 completely rewritten, based on the MD driver code from Marc Zyngier
9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13 - kmod support by: Cyrus Durgin
14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17 - lots of fixes and improvements to the RAID1/RAID5 and generic
18 RAID code (such as request based resynchronization):
20 Neil Brown <neilb@cse.unsw.edu.au>.
22 - persistent bitmap code
23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25 This program is free software; you can redistribute it and/or modify
26 it under the terms of the GNU General Public License as published by
27 the Free Software Foundation; either version 2, or (at your option)
30 You should have received a copy of the GNU General Public License
31 (for example /usr/src/linux/COPYING); if not, write to the Free
32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 #include <linux/kthread.h>
36 #include <linux/blkdev.h>
37 #include <linux/sysctl.h>
38 #include <linux/seq_file.h>
39 #include <linux/buffer_head.h> /* for invalidate_bdev */
40 #include <linux/poll.h>
41 #include <linux/ctype.h>
42 #include <linux/hdreg.h>
43 #include <linux/proc_fs.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
46 #include <linux/file.h>
47 #include <linux/delay.h>
48 #include <linux/raid/md_p.h>
49 #include <linux/raid/md_u.h>
54 #define dprintk(x...) ((void)(DEBUG && printk(x)))
58 static void autostart_arrays(int part
);
61 static LIST_HEAD(pers_list
);
62 static DEFINE_SPINLOCK(pers_lock
);
64 static void md_print_devices(void);
66 static DECLARE_WAIT_QUEUE_HEAD(resync_wait
);
68 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
71 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
72 * is 1000 KB/sec, so the extra system load does not show up that much.
73 * Increase it if you want to have more _guaranteed_ speed. Note that
74 * the RAID driver will use the maximum available bandwidth if the IO
75 * subsystem is idle. There is also an 'absolute maximum' reconstruction
76 * speed limit - in case reconstruction slows down your system despite
79 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
80 * or /sys/block/mdX/md/sync_speed_{min,max}
83 static int sysctl_speed_limit_min
= 1000;
84 static int sysctl_speed_limit_max
= 200000;
85 static inline int speed_min(mddev_t
*mddev
)
87 return mddev
->sync_speed_min
?
88 mddev
->sync_speed_min
: sysctl_speed_limit_min
;
91 static inline int speed_max(mddev_t
*mddev
)
93 return mddev
->sync_speed_max
?
94 mddev
->sync_speed_max
: sysctl_speed_limit_max
;
97 static struct ctl_table_header
*raid_table_header
;
99 static ctl_table raid_table
[] = {
101 .procname
= "speed_limit_min",
102 .data
= &sysctl_speed_limit_min
,
103 .maxlen
= sizeof(int),
104 .mode
= S_IRUGO
|S_IWUSR
,
105 .proc_handler
= proc_dointvec
,
108 .procname
= "speed_limit_max",
109 .data
= &sysctl_speed_limit_max
,
110 .maxlen
= sizeof(int),
111 .mode
= S_IRUGO
|S_IWUSR
,
112 .proc_handler
= proc_dointvec
,
117 static ctl_table raid_dir_table
[] = {
121 .mode
= S_IRUGO
|S_IXUGO
,
127 static ctl_table raid_root_table
[] = {
132 .child
= raid_dir_table
,
137 static const struct block_device_operations md_fops
;
139 static int start_readonly
;
142 * We have a system wide 'event count' that is incremented
143 * on any 'interesting' event, and readers of /proc/mdstat
144 * can use 'poll' or 'select' to find out when the event
148 * start array, stop array, error, add device, remove device,
149 * start build, activate spare
151 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters
);
152 static atomic_t md_event_count
;
153 void md_new_event(mddev_t
*mddev
)
155 atomic_inc(&md_event_count
);
156 wake_up(&md_event_waiters
);
158 EXPORT_SYMBOL_GPL(md_new_event
);
160 /* Alternate version that can be called from interrupts
161 * when calling sysfs_notify isn't needed.
163 static void md_new_event_inintr(mddev_t
*mddev
)
165 atomic_inc(&md_event_count
);
166 wake_up(&md_event_waiters
);
170 * Enables to iterate over all existing md arrays
171 * all_mddevs_lock protects this list.
173 static LIST_HEAD(all_mddevs
);
174 static DEFINE_SPINLOCK(all_mddevs_lock
);
178 * iterates through all used mddevs in the system.
179 * We take care to grab the all_mddevs_lock whenever navigating
180 * the list, and to always hold a refcount when unlocked.
181 * Any code which breaks out of this loop while own
182 * a reference to the current mddev and must mddev_put it.
184 #define for_each_mddev(mddev,tmp) \
186 for (({ spin_lock(&all_mddevs_lock); \
187 tmp = all_mddevs.next; \
189 ({ if (tmp != &all_mddevs) \
190 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
191 spin_unlock(&all_mddevs_lock); \
192 if (mddev) mddev_put(mddev); \
193 mddev = list_entry(tmp, mddev_t, all_mddevs); \
194 tmp != &all_mddevs;}); \
195 ({ spin_lock(&all_mddevs_lock); \
200 /* Rather than calling directly into the personality make_request function,
201 * IO requests come here first so that we can check if the device is
202 * being suspended pending a reconfiguration.
203 * We hold a refcount over the call to ->make_request. By the time that
204 * call has finished, the bio has been linked into some internal structure
205 * and so is visible to ->quiesce(), so we don't need the refcount any more.
207 static int md_make_request(struct request_queue
*q
, struct bio
*bio
)
209 mddev_t
*mddev
= q
->queuedata
;
211 if (mddev
== NULL
|| mddev
->pers
== NULL
) {
216 if (mddev
->suspended
) {
219 prepare_to_wait(&mddev
->sb_wait
, &__wait
,
220 TASK_UNINTERRUPTIBLE
);
221 if (!mddev
->suspended
)
227 finish_wait(&mddev
->sb_wait
, &__wait
);
229 atomic_inc(&mddev
->active_io
);
231 rv
= mddev
->pers
->make_request(q
, bio
);
232 if (atomic_dec_and_test(&mddev
->active_io
) && mddev
->suspended
)
233 wake_up(&mddev
->sb_wait
);
238 static void mddev_suspend(mddev_t
*mddev
)
240 BUG_ON(mddev
->suspended
);
241 mddev
->suspended
= 1;
243 wait_event(mddev
->sb_wait
, atomic_read(&mddev
->active_io
) == 0);
244 mddev
->pers
->quiesce(mddev
, 1);
245 md_unregister_thread(mddev
->thread
);
246 mddev
->thread
= NULL
;
247 /* we now know that no code is executing in the personality module,
248 * except possibly the tail end of a ->bi_end_io function, but that
249 * is certain to complete before the module has a chance to get
254 static void mddev_resume(mddev_t
*mddev
)
256 mddev
->suspended
= 0;
257 wake_up(&mddev
->sb_wait
);
258 mddev
->pers
->quiesce(mddev
, 0);
261 int mddev_congested(mddev_t
*mddev
, int bits
)
263 return mddev
->suspended
;
265 EXPORT_SYMBOL(mddev_congested
);
268 static inline mddev_t
*mddev_get(mddev_t
*mddev
)
270 atomic_inc(&mddev
->active
);
274 static void mddev_delayed_delete(struct work_struct
*ws
);
276 static void mddev_put(mddev_t
*mddev
)
278 if (!atomic_dec_and_lock(&mddev
->active
, &all_mddevs_lock
))
280 if (!mddev
->raid_disks
&& list_empty(&mddev
->disks
) &&
281 !mddev
->hold_active
) {
282 list_del(&mddev
->all_mddevs
);
283 if (mddev
->gendisk
) {
284 /* we did a probe so need to clean up.
285 * Call schedule_work inside the spinlock
286 * so that flush_scheduled_work() after
287 * mddev_find will succeed in waiting for the
290 INIT_WORK(&mddev
->del_work
, mddev_delayed_delete
);
291 schedule_work(&mddev
->del_work
);
295 spin_unlock(&all_mddevs_lock
);
298 static mddev_t
* mddev_find(dev_t unit
)
300 mddev_t
*mddev
, *new = NULL
;
303 spin_lock(&all_mddevs_lock
);
306 list_for_each_entry(mddev
, &all_mddevs
, all_mddevs
)
307 if (mddev
->unit
== unit
) {
309 spin_unlock(&all_mddevs_lock
);
315 list_add(&new->all_mddevs
, &all_mddevs
);
316 spin_unlock(&all_mddevs_lock
);
317 new->hold_active
= UNTIL_IOCTL
;
321 /* find an unused unit number */
322 static int next_minor
= 512;
323 int start
= next_minor
;
327 dev
= MKDEV(MD_MAJOR
, next_minor
);
329 if (next_minor
> MINORMASK
)
331 if (next_minor
== start
) {
332 /* Oh dear, all in use. */
333 spin_unlock(&all_mddevs_lock
);
339 list_for_each_entry(mddev
, &all_mddevs
, all_mddevs
)
340 if (mddev
->unit
== dev
) {
346 new->md_minor
= MINOR(dev
);
347 new->hold_active
= UNTIL_STOP
;
348 list_add(&new->all_mddevs
, &all_mddevs
);
349 spin_unlock(&all_mddevs_lock
);
352 spin_unlock(&all_mddevs_lock
);
354 new = kzalloc(sizeof(*new), GFP_KERNEL
);
359 if (MAJOR(unit
) == MD_MAJOR
)
360 new->md_minor
= MINOR(unit
);
362 new->md_minor
= MINOR(unit
) >> MdpMinorShift
;
364 mutex_init(&new->open_mutex
);
365 mutex_init(&new->reconfig_mutex
);
366 mutex_init(&new->bitmap_mutex
);
367 INIT_LIST_HEAD(&new->disks
);
368 INIT_LIST_HEAD(&new->all_mddevs
);
369 init_timer(&new->safemode_timer
);
370 atomic_set(&new->active
, 1);
371 atomic_set(&new->openers
, 0);
372 atomic_set(&new->active_io
, 0);
373 spin_lock_init(&new->write_lock
);
374 init_waitqueue_head(&new->sb_wait
);
375 init_waitqueue_head(&new->recovery_wait
);
376 new->reshape_position
= MaxSector
;
378 new->resync_max
= MaxSector
;
379 new->level
= LEVEL_NONE
;
384 static inline int mddev_lock(mddev_t
* mddev
)
386 return mutex_lock_interruptible(&mddev
->reconfig_mutex
);
389 static inline int mddev_is_locked(mddev_t
*mddev
)
391 return mutex_is_locked(&mddev
->reconfig_mutex
);
394 static inline int mddev_trylock(mddev_t
* mddev
)
396 return mutex_trylock(&mddev
->reconfig_mutex
);
399 static inline void mddev_unlock(mddev_t
* mddev
)
401 mutex_unlock(&mddev
->reconfig_mutex
);
403 md_wakeup_thread(mddev
->thread
);
406 static mdk_rdev_t
* find_rdev_nr(mddev_t
*mddev
, int nr
)
410 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
411 if (rdev
->desc_nr
== nr
)
417 static mdk_rdev_t
* find_rdev(mddev_t
* mddev
, dev_t dev
)
421 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
422 if (rdev
->bdev
->bd_dev
== dev
)
428 static struct mdk_personality
*find_pers(int level
, char *clevel
)
430 struct mdk_personality
*pers
;
431 list_for_each_entry(pers
, &pers_list
, list
) {
432 if (level
!= LEVEL_NONE
&& pers
->level
== level
)
434 if (strcmp(pers
->name
, clevel
)==0)
440 /* return the offset of the super block in 512byte sectors */
441 static inline sector_t
calc_dev_sboffset(struct block_device
*bdev
)
443 sector_t num_sectors
= bdev
->bd_inode
->i_size
/ 512;
444 return MD_NEW_SIZE_SECTORS(num_sectors
);
447 static int alloc_disk_sb(mdk_rdev_t
* rdev
)
452 rdev
->sb_page
= alloc_page(GFP_KERNEL
);
453 if (!rdev
->sb_page
) {
454 printk(KERN_ALERT
"md: out of memory.\n");
461 static void free_disk_sb(mdk_rdev_t
* rdev
)
464 put_page(rdev
->sb_page
);
466 rdev
->sb_page
= NULL
;
473 static void super_written(struct bio
*bio
, int error
)
475 mdk_rdev_t
*rdev
= bio
->bi_private
;
476 mddev_t
*mddev
= rdev
->mddev
;
478 if (error
|| !test_bit(BIO_UPTODATE
, &bio
->bi_flags
)) {
479 printk("md: super_written gets error=%d, uptodate=%d\n",
480 error
, test_bit(BIO_UPTODATE
, &bio
->bi_flags
));
481 WARN_ON(test_bit(BIO_UPTODATE
, &bio
->bi_flags
));
482 md_error(mddev
, rdev
);
485 if (atomic_dec_and_test(&mddev
->pending_writes
))
486 wake_up(&mddev
->sb_wait
);
490 static void super_written_barrier(struct bio
*bio
, int error
)
492 struct bio
*bio2
= bio
->bi_private
;
493 mdk_rdev_t
*rdev
= bio2
->bi_private
;
494 mddev_t
*mddev
= rdev
->mddev
;
496 if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
) &&
497 error
== -EOPNOTSUPP
) {
499 /* barriers don't appear to be supported :-( */
500 set_bit(BarriersNotsupp
, &rdev
->flags
);
501 mddev
->barriers_work
= 0;
502 spin_lock_irqsave(&mddev
->write_lock
, flags
);
503 bio2
->bi_next
= mddev
->biolist
;
504 mddev
->biolist
= bio2
;
505 spin_unlock_irqrestore(&mddev
->write_lock
, flags
);
506 wake_up(&mddev
->sb_wait
);
510 bio
->bi_private
= rdev
;
511 super_written(bio
, error
);
515 void md_super_write(mddev_t
*mddev
, mdk_rdev_t
*rdev
,
516 sector_t sector
, int size
, struct page
*page
)
518 /* write first size bytes of page to sector of rdev
519 * Increment mddev->pending_writes before returning
520 * and decrement it on completion, waking up sb_wait
521 * if zero is reached.
522 * If an error occurred, call md_error
524 * As we might need to resubmit the request if BIO_RW_BARRIER
525 * causes ENOTSUPP, we allocate a spare bio...
527 struct bio
*bio
= bio_alloc(GFP_NOIO
, 1);
528 int rw
= (1<<BIO_RW
) | (1<<BIO_RW_SYNCIO
) | (1<<BIO_RW_UNPLUG
);
530 bio
->bi_bdev
= rdev
->bdev
;
531 bio
->bi_sector
= sector
;
532 bio_add_page(bio
, page
, size
, 0);
533 bio
->bi_private
= rdev
;
534 bio
->bi_end_io
= super_written
;
537 atomic_inc(&mddev
->pending_writes
);
538 if (!test_bit(BarriersNotsupp
, &rdev
->flags
)) {
540 rw
|= (1<<BIO_RW_BARRIER
);
541 rbio
= bio_clone(bio
, GFP_NOIO
);
542 rbio
->bi_private
= bio
;
543 rbio
->bi_end_io
= super_written_barrier
;
544 submit_bio(rw
, rbio
);
549 void md_super_wait(mddev_t
*mddev
)
551 /* wait for all superblock writes that were scheduled to complete.
552 * if any had to be retried (due to BARRIER problems), retry them
556 prepare_to_wait(&mddev
->sb_wait
, &wq
, TASK_UNINTERRUPTIBLE
);
557 if (atomic_read(&mddev
->pending_writes
)==0)
559 while (mddev
->biolist
) {
561 spin_lock_irq(&mddev
->write_lock
);
562 bio
= mddev
->biolist
;
563 mddev
->biolist
= bio
->bi_next
;
565 spin_unlock_irq(&mddev
->write_lock
);
566 submit_bio(bio
->bi_rw
, bio
);
570 finish_wait(&mddev
->sb_wait
, &wq
);
573 static void bi_complete(struct bio
*bio
, int error
)
575 complete((struct completion
*)bio
->bi_private
);
578 int sync_page_io(struct block_device
*bdev
, sector_t sector
, int size
,
579 struct page
*page
, int rw
)
581 struct bio
*bio
= bio_alloc(GFP_NOIO
, 1);
582 struct completion event
;
585 rw
|= (1 << BIO_RW_SYNCIO
) | (1 << BIO_RW_UNPLUG
);
588 bio
->bi_sector
= sector
;
589 bio_add_page(bio
, page
, size
, 0);
590 init_completion(&event
);
591 bio
->bi_private
= &event
;
592 bio
->bi_end_io
= bi_complete
;
594 wait_for_completion(&event
);
596 ret
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
600 EXPORT_SYMBOL_GPL(sync_page_io
);
602 static int read_disk_sb(mdk_rdev_t
* rdev
, int size
)
604 char b
[BDEVNAME_SIZE
];
605 if (!rdev
->sb_page
) {
613 if (!sync_page_io(rdev
->bdev
, rdev
->sb_start
, size
, rdev
->sb_page
, READ
))
619 printk(KERN_WARNING
"md: disabled device %s, could not read superblock.\n",
620 bdevname(rdev
->bdev
,b
));
624 static int uuid_equal(mdp_super_t
*sb1
, mdp_super_t
*sb2
)
626 return sb1
->set_uuid0
== sb2
->set_uuid0
&&
627 sb1
->set_uuid1
== sb2
->set_uuid1
&&
628 sb1
->set_uuid2
== sb2
->set_uuid2
&&
629 sb1
->set_uuid3
== sb2
->set_uuid3
;
632 static int sb_equal(mdp_super_t
*sb1
, mdp_super_t
*sb2
)
635 mdp_super_t
*tmp1
, *tmp2
;
637 tmp1
= kmalloc(sizeof(*tmp1
),GFP_KERNEL
);
638 tmp2
= kmalloc(sizeof(*tmp2
),GFP_KERNEL
);
640 if (!tmp1
|| !tmp2
) {
642 printk(KERN_INFO
"md.c sb_equal(): failed to allocate memory!\n");
650 * nr_disks is not constant
655 ret
= (memcmp(tmp1
, tmp2
, MD_SB_GENERIC_CONSTANT_WORDS
* 4) == 0);
663 static u32
md_csum_fold(u32 csum
)
665 csum
= (csum
& 0xffff) + (csum
>> 16);
666 return (csum
& 0xffff) + (csum
>> 16);
669 static unsigned int calc_sb_csum(mdp_super_t
* sb
)
672 u32
*sb32
= (u32
*)sb
;
674 unsigned int disk_csum
, csum
;
676 disk_csum
= sb
->sb_csum
;
679 for (i
= 0; i
< MD_SB_BYTES
/4 ; i
++)
681 csum
= (newcsum
& 0xffffffff) + (newcsum
>>32);
685 /* This used to use csum_partial, which was wrong for several
686 * reasons including that different results are returned on
687 * different architectures. It isn't critical that we get exactly
688 * the same return value as before (we always csum_fold before
689 * testing, and that removes any differences). However as we
690 * know that csum_partial always returned a 16bit value on
691 * alphas, do a fold to maximise conformity to previous behaviour.
693 sb
->sb_csum
= md_csum_fold(disk_csum
);
695 sb
->sb_csum
= disk_csum
;
702 * Handle superblock details.
703 * We want to be able to handle multiple superblock formats
704 * so we have a common interface to them all, and an array of
705 * different handlers.
706 * We rely on user-space to write the initial superblock, and support
707 * reading and updating of superblocks.
708 * Interface methods are:
709 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
710 * loads and validates a superblock on dev.
711 * if refdev != NULL, compare superblocks on both devices
713 * 0 - dev has a superblock that is compatible with refdev
714 * 1 - dev has a superblock that is compatible and newer than refdev
715 * so dev should be used as the refdev in future
716 * -EINVAL superblock incompatible or invalid
717 * -othererror e.g. -EIO
719 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
720 * Verify that dev is acceptable into mddev.
721 * The first time, mddev->raid_disks will be 0, and data from
722 * dev should be merged in. Subsequent calls check that dev
723 * is new enough. Return 0 or -EINVAL
725 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
726 * Update the superblock for rdev with data in mddev
727 * This does not write to disc.
733 struct module
*owner
;
734 int (*load_super
)(mdk_rdev_t
*rdev
, mdk_rdev_t
*refdev
,
736 int (*validate_super
)(mddev_t
*mddev
, mdk_rdev_t
*rdev
);
737 void (*sync_super
)(mddev_t
*mddev
, mdk_rdev_t
*rdev
);
738 unsigned long long (*rdev_size_change
)(mdk_rdev_t
*rdev
,
739 sector_t num_sectors
);
743 * Check that the given mddev has no bitmap.
745 * This function is called from the run method of all personalities that do not
746 * support bitmaps. It prints an error message and returns non-zero if mddev
747 * has a bitmap. Otherwise, it returns 0.
750 int md_check_no_bitmap(mddev_t
*mddev
)
752 if (!mddev
->bitmap_file
&& !mddev
->bitmap_offset
)
754 printk(KERN_ERR
"%s: bitmaps are not supported for %s\n",
755 mdname(mddev
), mddev
->pers
->name
);
758 EXPORT_SYMBOL(md_check_no_bitmap
);
761 * load_super for 0.90.0
763 static int super_90_load(mdk_rdev_t
*rdev
, mdk_rdev_t
*refdev
, int minor_version
)
765 char b
[BDEVNAME_SIZE
], b2
[BDEVNAME_SIZE
];
770 * Calculate the position of the superblock (512byte sectors),
771 * it's at the end of the disk.
773 * It also happens to be a multiple of 4Kb.
775 rdev
->sb_start
= calc_dev_sboffset(rdev
->bdev
);
777 ret
= read_disk_sb(rdev
, MD_SB_BYTES
);
782 bdevname(rdev
->bdev
, b
);
783 sb
= (mdp_super_t
*)page_address(rdev
->sb_page
);
785 if (sb
->md_magic
!= MD_SB_MAGIC
) {
786 printk(KERN_ERR
"md: invalid raid superblock magic on %s\n",
791 if (sb
->major_version
!= 0 ||
792 sb
->minor_version
< 90 ||
793 sb
->minor_version
> 91) {
794 printk(KERN_WARNING
"Bad version number %d.%d on %s\n",
795 sb
->major_version
, sb
->minor_version
,
800 if (sb
->raid_disks
<= 0)
803 if (md_csum_fold(calc_sb_csum(sb
)) != md_csum_fold(sb
->sb_csum
)) {
804 printk(KERN_WARNING
"md: invalid superblock checksum on %s\n",
809 rdev
->preferred_minor
= sb
->md_minor
;
810 rdev
->data_offset
= 0;
811 rdev
->sb_size
= MD_SB_BYTES
;
813 if (sb
->level
== LEVEL_MULTIPATH
)
816 rdev
->desc_nr
= sb
->this_disk
.number
;
822 mdp_super_t
*refsb
= (mdp_super_t
*)page_address(refdev
->sb_page
);
823 if (!uuid_equal(refsb
, sb
)) {
824 printk(KERN_WARNING
"md: %s has different UUID to %s\n",
825 b
, bdevname(refdev
->bdev
,b2
));
828 if (!sb_equal(refsb
, sb
)) {
829 printk(KERN_WARNING
"md: %s has same UUID"
830 " but different superblock to %s\n",
831 b
, bdevname(refdev
->bdev
, b2
));
835 ev2
= md_event(refsb
);
841 rdev
->sectors
= rdev
->sb_start
;
843 if (rdev
->sectors
< sb
->size
* 2 && sb
->level
> 1)
844 /* "this cannot possibly happen" ... */
852 * validate_super for 0.90.0
854 static int super_90_validate(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
857 mdp_super_t
*sb
= (mdp_super_t
*)page_address(rdev
->sb_page
);
858 __u64 ev1
= md_event(sb
);
860 rdev
->raid_disk
= -1;
861 clear_bit(Faulty
, &rdev
->flags
);
862 clear_bit(In_sync
, &rdev
->flags
);
863 clear_bit(WriteMostly
, &rdev
->flags
);
864 clear_bit(BarriersNotsupp
, &rdev
->flags
);
866 if (mddev
->raid_disks
== 0) {
867 mddev
->major_version
= 0;
868 mddev
->minor_version
= sb
->minor_version
;
869 mddev
->patch_version
= sb
->patch_version
;
871 mddev
->chunk_sectors
= sb
->chunk_size
>> 9;
872 mddev
->ctime
= sb
->ctime
;
873 mddev
->utime
= sb
->utime
;
874 mddev
->level
= sb
->level
;
875 mddev
->clevel
[0] = 0;
876 mddev
->layout
= sb
->layout
;
877 mddev
->raid_disks
= sb
->raid_disks
;
878 mddev
->dev_sectors
= sb
->size
* 2;
880 mddev
->bitmap_offset
= 0;
881 mddev
->default_bitmap_offset
= MD_SB_BYTES
>> 9;
883 if (mddev
->minor_version
>= 91) {
884 mddev
->reshape_position
= sb
->reshape_position
;
885 mddev
->delta_disks
= sb
->delta_disks
;
886 mddev
->new_level
= sb
->new_level
;
887 mddev
->new_layout
= sb
->new_layout
;
888 mddev
->new_chunk_sectors
= sb
->new_chunk
>> 9;
890 mddev
->reshape_position
= MaxSector
;
891 mddev
->delta_disks
= 0;
892 mddev
->new_level
= mddev
->level
;
893 mddev
->new_layout
= mddev
->layout
;
894 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
897 if (sb
->state
& (1<<MD_SB_CLEAN
))
898 mddev
->recovery_cp
= MaxSector
;
900 if (sb
->events_hi
== sb
->cp_events_hi
&&
901 sb
->events_lo
== sb
->cp_events_lo
) {
902 mddev
->recovery_cp
= sb
->recovery_cp
;
904 mddev
->recovery_cp
= 0;
907 memcpy(mddev
->uuid
+0, &sb
->set_uuid0
, 4);
908 memcpy(mddev
->uuid
+4, &sb
->set_uuid1
, 4);
909 memcpy(mddev
->uuid
+8, &sb
->set_uuid2
, 4);
910 memcpy(mddev
->uuid
+12,&sb
->set_uuid3
, 4);
912 mddev
->max_disks
= MD_SB_DISKS
;
914 if (sb
->state
& (1<<MD_SB_BITMAP_PRESENT
) &&
915 mddev
->bitmap_file
== NULL
)
916 mddev
->bitmap_offset
= mddev
->default_bitmap_offset
;
918 } else if (mddev
->pers
== NULL
) {
919 /* Insist on good event counter while assembling */
921 if (ev1
< mddev
->events
)
923 } else if (mddev
->bitmap
) {
924 /* if adding to array with a bitmap, then we can accept an
925 * older device ... but not too old.
927 if (ev1
< mddev
->bitmap
->events_cleared
)
930 if (ev1
< mddev
->events
)
931 /* just a hot-add of a new device, leave raid_disk at -1 */
935 if (mddev
->level
!= LEVEL_MULTIPATH
) {
936 desc
= sb
->disks
+ rdev
->desc_nr
;
938 if (desc
->state
& (1<<MD_DISK_FAULTY
))
939 set_bit(Faulty
, &rdev
->flags
);
940 else if (desc
->state
& (1<<MD_DISK_SYNC
) /* &&
941 desc->raid_disk < mddev->raid_disks */) {
942 set_bit(In_sync
, &rdev
->flags
);
943 rdev
->raid_disk
= desc
->raid_disk
;
944 } else if (desc
->state
& (1<<MD_DISK_ACTIVE
)) {
945 /* active but not in sync implies recovery up to
946 * reshape position. We don't know exactly where
947 * that is, so set to zero for now */
948 if (mddev
->minor_version
>= 91) {
949 rdev
->recovery_offset
= 0;
950 rdev
->raid_disk
= desc
->raid_disk
;
953 if (desc
->state
& (1<<MD_DISK_WRITEMOSTLY
))
954 set_bit(WriteMostly
, &rdev
->flags
);
955 } else /* MULTIPATH are always insync */
956 set_bit(In_sync
, &rdev
->flags
);
961 * sync_super for 0.90.0
963 static void super_90_sync(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
967 int next_spare
= mddev
->raid_disks
;
970 /* make rdev->sb match mddev data..
973 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
974 * 3/ any empty disks < next_spare become removed
976 * disks[0] gets initialised to REMOVED because
977 * we cannot be sure from other fields if it has
978 * been initialised or not.
981 int active
=0, working
=0,failed
=0,spare
=0,nr_disks
=0;
983 rdev
->sb_size
= MD_SB_BYTES
;
985 sb
= (mdp_super_t
*)page_address(rdev
->sb_page
);
987 memset(sb
, 0, sizeof(*sb
));
989 sb
->md_magic
= MD_SB_MAGIC
;
990 sb
->major_version
= mddev
->major_version
;
991 sb
->patch_version
= mddev
->patch_version
;
992 sb
->gvalid_words
= 0; /* ignored */
993 memcpy(&sb
->set_uuid0
, mddev
->uuid
+0, 4);
994 memcpy(&sb
->set_uuid1
, mddev
->uuid
+4, 4);
995 memcpy(&sb
->set_uuid2
, mddev
->uuid
+8, 4);
996 memcpy(&sb
->set_uuid3
, mddev
->uuid
+12,4);
998 sb
->ctime
= mddev
->ctime
;
999 sb
->level
= mddev
->level
;
1000 sb
->size
= mddev
->dev_sectors
/ 2;
1001 sb
->raid_disks
= mddev
->raid_disks
;
1002 sb
->md_minor
= mddev
->md_minor
;
1003 sb
->not_persistent
= 0;
1004 sb
->utime
= mddev
->utime
;
1006 sb
->events_hi
= (mddev
->events
>>32);
1007 sb
->events_lo
= (u32
)mddev
->events
;
1009 if (mddev
->reshape_position
== MaxSector
)
1010 sb
->minor_version
= 90;
1012 sb
->minor_version
= 91;
1013 sb
->reshape_position
= mddev
->reshape_position
;
1014 sb
->new_level
= mddev
->new_level
;
1015 sb
->delta_disks
= mddev
->delta_disks
;
1016 sb
->new_layout
= mddev
->new_layout
;
1017 sb
->new_chunk
= mddev
->new_chunk_sectors
<< 9;
1019 mddev
->minor_version
= sb
->minor_version
;
1022 sb
->recovery_cp
= mddev
->recovery_cp
;
1023 sb
->cp_events_hi
= (mddev
->events
>>32);
1024 sb
->cp_events_lo
= (u32
)mddev
->events
;
1025 if (mddev
->recovery_cp
== MaxSector
)
1026 sb
->state
= (1<< MD_SB_CLEAN
);
1028 sb
->recovery_cp
= 0;
1030 sb
->layout
= mddev
->layout
;
1031 sb
->chunk_size
= mddev
->chunk_sectors
<< 9;
1033 if (mddev
->bitmap
&& mddev
->bitmap_file
== NULL
)
1034 sb
->state
|= (1<<MD_SB_BITMAP_PRESENT
);
1036 sb
->disks
[0].state
= (1<<MD_DISK_REMOVED
);
1037 list_for_each_entry(rdev2
, &mddev
->disks
, same_set
) {
1040 int is_active
= test_bit(In_sync
, &rdev2
->flags
);
1042 if (rdev2
->raid_disk
>= 0 &&
1043 sb
->minor_version
>= 91)
1044 /* we have nowhere to store the recovery_offset,
1045 * but if it is not below the reshape_position,
1046 * we can piggy-back on that.
1049 if (rdev2
->raid_disk
< 0 ||
1050 test_bit(Faulty
, &rdev2
->flags
))
1053 desc_nr
= rdev2
->raid_disk
;
1055 desc_nr
= next_spare
++;
1056 rdev2
->desc_nr
= desc_nr
;
1057 d
= &sb
->disks
[rdev2
->desc_nr
];
1059 d
->number
= rdev2
->desc_nr
;
1060 d
->major
= MAJOR(rdev2
->bdev
->bd_dev
);
1061 d
->minor
= MINOR(rdev2
->bdev
->bd_dev
);
1063 d
->raid_disk
= rdev2
->raid_disk
;
1065 d
->raid_disk
= rdev2
->desc_nr
; /* compatibility */
1066 if (test_bit(Faulty
, &rdev2
->flags
))
1067 d
->state
= (1<<MD_DISK_FAULTY
);
1068 else if (is_active
) {
1069 d
->state
= (1<<MD_DISK_ACTIVE
);
1070 if (test_bit(In_sync
, &rdev2
->flags
))
1071 d
->state
|= (1<<MD_DISK_SYNC
);
1079 if (test_bit(WriteMostly
, &rdev2
->flags
))
1080 d
->state
|= (1<<MD_DISK_WRITEMOSTLY
);
1082 /* now set the "removed" and "faulty" bits on any missing devices */
1083 for (i
=0 ; i
< mddev
->raid_disks
; i
++) {
1084 mdp_disk_t
*d
= &sb
->disks
[i
];
1085 if (d
->state
== 0 && d
->number
== 0) {
1088 d
->state
= (1<<MD_DISK_REMOVED
);
1089 d
->state
|= (1<<MD_DISK_FAULTY
);
1093 sb
->nr_disks
= nr_disks
;
1094 sb
->active_disks
= active
;
1095 sb
->working_disks
= working
;
1096 sb
->failed_disks
= failed
;
1097 sb
->spare_disks
= spare
;
1099 sb
->this_disk
= sb
->disks
[rdev
->desc_nr
];
1100 sb
->sb_csum
= calc_sb_csum(sb
);
1104 * rdev_size_change for 0.90.0
1106 static unsigned long long
1107 super_90_rdev_size_change(mdk_rdev_t
*rdev
, sector_t num_sectors
)
1109 if (num_sectors
&& num_sectors
< rdev
->mddev
->dev_sectors
)
1110 return 0; /* component must fit device */
1111 if (rdev
->mddev
->bitmap_offset
)
1112 return 0; /* can't move bitmap */
1113 rdev
->sb_start
= calc_dev_sboffset(rdev
->bdev
);
1114 if (!num_sectors
|| num_sectors
> rdev
->sb_start
)
1115 num_sectors
= rdev
->sb_start
;
1116 md_super_write(rdev
->mddev
, rdev
, rdev
->sb_start
, rdev
->sb_size
,
1118 md_super_wait(rdev
->mddev
);
1119 return num_sectors
/ 2; /* kB for sysfs */
1124 * version 1 superblock
1127 static __le32
calc_sb_1_csum(struct mdp_superblock_1
* sb
)
1131 unsigned long long newcsum
;
1132 int size
= 256 + le32_to_cpu(sb
->max_dev
)*2;
1133 __le32
*isuper
= (__le32
*)sb
;
1136 disk_csum
= sb
->sb_csum
;
1139 for (i
=0; size
>=4; size
-= 4 )
1140 newcsum
+= le32_to_cpu(*isuper
++);
1143 newcsum
+= le16_to_cpu(*(__le16
*) isuper
);
1145 csum
= (newcsum
& 0xffffffff) + (newcsum
>> 32);
1146 sb
->sb_csum
= disk_csum
;
1147 return cpu_to_le32(csum
);
1150 static int super_1_load(mdk_rdev_t
*rdev
, mdk_rdev_t
*refdev
, int minor_version
)
1152 struct mdp_superblock_1
*sb
;
1155 char b
[BDEVNAME_SIZE
], b2
[BDEVNAME_SIZE
];
1159 * Calculate the position of the superblock in 512byte sectors.
1160 * It is always aligned to a 4K boundary and
1161 * depeding on minor_version, it can be:
1162 * 0: At least 8K, but less than 12K, from end of device
1163 * 1: At start of device
1164 * 2: 4K from start of device.
1166 switch(minor_version
) {
1168 sb_start
= rdev
->bdev
->bd_inode
->i_size
>> 9;
1170 sb_start
&= ~(sector_t
)(4*2-1);
1181 rdev
->sb_start
= sb_start
;
1183 /* superblock is rarely larger than 1K, but it can be larger,
1184 * and it is safe to read 4k, so we do that
1186 ret
= read_disk_sb(rdev
, 4096);
1187 if (ret
) return ret
;
1190 sb
= (struct mdp_superblock_1
*)page_address(rdev
->sb_page
);
1192 if (sb
->magic
!= cpu_to_le32(MD_SB_MAGIC
) ||
1193 sb
->major_version
!= cpu_to_le32(1) ||
1194 le32_to_cpu(sb
->max_dev
) > (4096-256)/2 ||
1195 le64_to_cpu(sb
->super_offset
) != rdev
->sb_start
||
1196 (le32_to_cpu(sb
->feature_map
) & ~MD_FEATURE_ALL
) != 0)
1199 if (calc_sb_1_csum(sb
) != sb
->sb_csum
) {
1200 printk("md: invalid superblock checksum on %s\n",
1201 bdevname(rdev
->bdev
,b
));
1204 if (le64_to_cpu(sb
->data_size
) < 10) {
1205 printk("md: data_size too small on %s\n",
1206 bdevname(rdev
->bdev
,b
));
1210 rdev
->preferred_minor
= 0xffff;
1211 rdev
->data_offset
= le64_to_cpu(sb
->data_offset
);
1212 atomic_set(&rdev
->corrected_errors
, le32_to_cpu(sb
->cnt_corrected_read
));
1214 rdev
->sb_size
= le32_to_cpu(sb
->max_dev
) * 2 + 256;
1215 bmask
= queue_logical_block_size(rdev
->bdev
->bd_disk
->queue
)-1;
1216 if (rdev
->sb_size
& bmask
)
1217 rdev
->sb_size
= (rdev
->sb_size
| bmask
) + 1;
1220 && rdev
->data_offset
< sb_start
+ (rdev
->sb_size
/512))
1223 if (sb
->level
== cpu_to_le32(LEVEL_MULTIPATH
))
1226 rdev
->desc_nr
= le32_to_cpu(sb
->dev_number
);
1232 struct mdp_superblock_1
*refsb
=
1233 (struct mdp_superblock_1
*)page_address(refdev
->sb_page
);
1235 if (memcmp(sb
->set_uuid
, refsb
->set_uuid
, 16) != 0 ||
1236 sb
->level
!= refsb
->level
||
1237 sb
->layout
!= refsb
->layout
||
1238 sb
->chunksize
!= refsb
->chunksize
) {
1239 printk(KERN_WARNING
"md: %s has strangely different"
1240 " superblock to %s\n",
1241 bdevname(rdev
->bdev
,b
),
1242 bdevname(refdev
->bdev
,b2
));
1245 ev1
= le64_to_cpu(sb
->events
);
1246 ev2
= le64_to_cpu(refsb
->events
);
1254 rdev
->sectors
= (rdev
->bdev
->bd_inode
->i_size
>> 9) -
1255 le64_to_cpu(sb
->data_offset
);
1257 rdev
->sectors
= rdev
->sb_start
;
1258 if (rdev
->sectors
< le64_to_cpu(sb
->data_size
))
1260 rdev
->sectors
= le64_to_cpu(sb
->data_size
);
1261 if (le64_to_cpu(sb
->size
) > rdev
->sectors
)
1266 static int super_1_validate(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1268 struct mdp_superblock_1
*sb
= (struct mdp_superblock_1
*)page_address(rdev
->sb_page
);
1269 __u64 ev1
= le64_to_cpu(sb
->events
);
1271 rdev
->raid_disk
= -1;
1272 clear_bit(Faulty
, &rdev
->flags
);
1273 clear_bit(In_sync
, &rdev
->flags
);
1274 clear_bit(WriteMostly
, &rdev
->flags
);
1275 clear_bit(BarriersNotsupp
, &rdev
->flags
);
1277 if (mddev
->raid_disks
== 0) {
1278 mddev
->major_version
= 1;
1279 mddev
->patch_version
= 0;
1280 mddev
->external
= 0;
1281 mddev
->chunk_sectors
= le32_to_cpu(sb
->chunksize
);
1282 mddev
->ctime
= le64_to_cpu(sb
->ctime
) & ((1ULL << 32)-1);
1283 mddev
->utime
= le64_to_cpu(sb
->utime
) & ((1ULL << 32)-1);
1284 mddev
->level
= le32_to_cpu(sb
->level
);
1285 mddev
->clevel
[0] = 0;
1286 mddev
->layout
= le32_to_cpu(sb
->layout
);
1287 mddev
->raid_disks
= le32_to_cpu(sb
->raid_disks
);
1288 mddev
->dev_sectors
= le64_to_cpu(sb
->size
);
1289 mddev
->events
= ev1
;
1290 mddev
->bitmap_offset
= 0;
1291 mddev
->default_bitmap_offset
= 1024 >> 9;
1293 mddev
->recovery_cp
= le64_to_cpu(sb
->resync_offset
);
1294 memcpy(mddev
->uuid
, sb
->set_uuid
, 16);
1296 mddev
->max_disks
= (4096-256)/2;
1298 if ((le32_to_cpu(sb
->feature_map
) & MD_FEATURE_BITMAP_OFFSET
) &&
1299 mddev
->bitmap_file
== NULL
)
1300 mddev
->bitmap_offset
= (__s32
)le32_to_cpu(sb
->bitmap_offset
);
1302 if ((le32_to_cpu(sb
->feature_map
) & MD_FEATURE_RESHAPE_ACTIVE
)) {
1303 mddev
->reshape_position
= le64_to_cpu(sb
->reshape_position
);
1304 mddev
->delta_disks
= le32_to_cpu(sb
->delta_disks
);
1305 mddev
->new_level
= le32_to_cpu(sb
->new_level
);
1306 mddev
->new_layout
= le32_to_cpu(sb
->new_layout
);
1307 mddev
->new_chunk_sectors
= le32_to_cpu(sb
->new_chunk
);
1309 mddev
->reshape_position
= MaxSector
;
1310 mddev
->delta_disks
= 0;
1311 mddev
->new_level
= mddev
->level
;
1312 mddev
->new_layout
= mddev
->layout
;
1313 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
1316 } else if (mddev
->pers
== NULL
) {
1317 /* Insist of good event counter while assembling */
1319 if (ev1
< mddev
->events
)
1321 } else if (mddev
->bitmap
) {
1322 /* If adding to array with a bitmap, then we can accept an
1323 * older device, but not too old.
1325 if (ev1
< mddev
->bitmap
->events_cleared
)
1328 if (ev1
< mddev
->events
)
1329 /* just a hot-add of a new device, leave raid_disk at -1 */
1332 if (mddev
->level
!= LEVEL_MULTIPATH
) {
1334 if (rdev
->desc_nr
< 0 ||
1335 rdev
->desc_nr
>= le32_to_cpu(sb
->max_dev
)) {
1339 role
= le16_to_cpu(sb
->dev_roles
[rdev
->desc_nr
]);
1341 case 0xffff: /* spare */
1343 case 0xfffe: /* faulty */
1344 set_bit(Faulty
, &rdev
->flags
);
1347 if ((le32_to_cpu(sb
->feature_map
) &
1348 MD_FEATURE_RECOVERY_OFFSET
))
1349 rdev
->recovery_offset
= le64_to_cpu(sb
->recovery_offset
);
1351 set_bit(In_sync
, &rdev
->flags
);
1352 rdev
->raid_disk
= role
;
1355 if (sb
->devflags
& WriteMostly1
)
1356 set_bit(WriteMostly
, &rdev
->flags
);
1357 } else /* MULTIPATH are always insync */
1358 set_bit(In_sync
, &rdev
->flags
);
1363 static void super_1_sync(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1365 struct mdp_superblock_1
*sb
;
1368 /* make rdev->sb match mddev and rdev data. */
1370 sb
= (struct mdp_superblock_1
*)page_address(rdev
->sb_page
);
1372 sb
->feature_map
= 0;
1374 sb
->recovery_offset
= cpu_to_le64(0);
1375 memset(sb
->pad1
, 0, sizeof(sb
->pad1
));
1376 memset(sb
->pad2
, 0, sizeof(sb
->pad2
));
1377 memset(sb
->pad3
, 0, sizeof(sb
->pad3
));
1379 sb
->utime
= cpu_to_le64((__u64
)mddev
->utime
);
1380 sb
->events
= cpu_to_le64(mddev
->events
);
1382 sb
->resync_offset
= cpu_to_le64(mddev
->recovery_cp
);
1384 sb
->resync_offset
= cpu_to_le64(0);
1386 sb
->cnt_corrected_read
= cpu_to_le32(atomic_read(&rdev
->corrected_errors
));
1388 sb
->raid_disks
= cpu_to_le32(mddev
->raid_disks
);
1389 sb
->size
= cpu_to_le64(mddev
->dev_sectors
);
1390 sb
->chunksize
= cpu_to_le32(mddev
->chunk_sectors
);
1391 sb
->level
= cpu_to_le32(mddev
->level
);
1392 sb
->layout
= cpu_to_le32(mddev
->layout
);
1394 if (mddev
->bitmap
&& mddev
->bitmap_file
== NULL
) {
1395 sb
->bitmap_offset
= cpu_to_le32((__u32
)mddev
->bitmap_offset
);
1396 sb
->feature_map
= cpu_to_le32(MD_FEATURE_BITMAP_OFFSET
);
1399 if (rdev
->raid_disk
>= 0 &&
1400 !test_bit(In_sync
, &rdev
->flags
)) {
1401 if (rdev
->recovery_offset
> 0) {
1403 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET
);
1404 sb
->recovery_offset
=
1405 cpu_to_le64(rdev
->recovery_offset
);
1409 if (mddev
->reshape_position
!= MaxSector
) {
1410 sb
->feature_map
|= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE
);
1411 sb
->reshape_position
= cpu_to_le64(mddev
->reshape_position
);
1412 sb
->new_layout
= cpu_to_le32(mddev
->new_layout
);
1413 sb
->delta_disks
= cpu_to_le32(mddev
->delta_disks
);
1414 sb
->new_level
= cpu_to_le32(mddev
->new_level
);
1415 sb
->new_chunk
= cpu_to_le32(mddev
->new_chunk_sectors
);
1419 list_for_each_entry(rdev2
, &mddev
->disks
, same_set
)
1420 if (rdev2
->desc_nr
+1 > max_dev
)
1421 max_dev
= rdev2
->desc_nr
+1;
1423 if (max_dev
> le32_to_cpu(sb
->max_dev
)) {
1425 sb
->max_dev
= cpu_to_le32(max_dev
);
1426 rdev
->sb_size
= max_dev
* 2 + 256;
1427 bmask
= queue_logical_block_size(rdev
->bdev
->bd_disk
->queue
)-1;
1428 if (rdev
->sb_size
& bmask
)
1429 rdev
->sb_size
= (rdev
->sb_size
| bmask
) + 1;
1431 for (i
=0; i
<max_dev
;i
++)
1432 sb
->dev_roles
[i
] = cpu_to_le16(0xfffe);
1434 list_for_each_entry(rdev2
, &mddev
->disks
, same_set
) {
1436 if (test_bit(Faulty
, &rdev2
->flags
))
1437 sb
->dev_roles
[i
] = cpu_to_le16(0xfffe);
1438 else if (test_bit(In_sync
, &rdev2
->flags
))
1439 sb
->dev_roles
[i
] = cpu_to_le16(rdev2
->raid_disk
);
1440 else if (rdev2
->raid_disk
>= 0 && rdev2
->recovery_offset
> 0)
1441 sb
->dev_roles
[i
] = cpu_to_le16(rdev2
->raid_disk
);
1443 sb
->dev_roles
[i
] = cpu_to_le16(0xffff);
1446 sb
->sb_csum
= calc_sb_1_csum(sb
);
1449 static unsigned long long
1450 super_1_rdev_size_change(mdk_rdev_t
*rdev
, sector_t num_sectors
)
1452 struct mdp_superblock_1
*sb
;
1453 sector_t max_sectors
;
1454 if (num_sectors
&& num_sectors
< rdev
->mddev
->dev_sectors
)
1455 return 0; /* component must fit device */
1456 if (rdev
->sb_start
< rdev
->data_offset
) {
1457 /* minor versions 1 and 2; superblock before data */
1458 max_sectors
= rdev
->bdev
->bd_inode
->i_size
>> 9;
1459 max_sectors
-= rdev
->data_offset
;
1460 if (!num_sectors
|| num_sectors
> max_sectors
)
1461 num_sectors
= max_sectors
;
1462 } else if (rdev
->mddev
->bitmap_offset
) {
1463 /* minor version 0 with bitmap we can't move */
1466 /* minor version 0; superblock after data */
1468 sb_start
= (rdev
->bdev
->bd_inode
->i_size
>> 9) - 8*2;
1469 sb_start
&= ~(sector_t
)(4*2 - 1);
1470 max_sectors
= rdev
->sectors
+ sb_start
- rdev
->sb_start
;
1471 if (!num_sectors
|| num_sectors
> max_sectors
)
1472 num_sectors
= max_sectors
;
1473 rdev
->sb_start
= sb_start
;
1475 sb
= (struct mdp_superblock_1
*) page_address(rdev
->sb_page
);
1476 sb
->data_size
= cpu_to_le64(num_sectors
);
1477 sb
->super_offset
= rdev
->sb_start
;
1478 sb
->sb_csum
= calc_sb_1_csum(sb
);
1479 md_super_write(rdev
->mddev
, rdev
, rdev
->sb_start
, rdev
->sb_size
,
1481 md_super_wait(rdev
->mddev
);
1482 return num_sectors
/ 2; /* kB for sysfs */
1485 static struct super_type super_types
[] = {
1488 .owner
= THIS_MODULE
,
1489 .load_super
= super_90_load
,
1490 .validate_super
= super_90_validate
,
1491 .sync_super
= super_90_sync
,
1492 .rdev_size_change
= super_90_rdev_size_change
,
1496 .owner
= THIS_MODULE
,
1497 .load_super
= super_1_load
,
1498 .validate_super
= super_1_validate
,
1499 .sync_super
= super_1_sync
,
1500 .rdev_size_change
= super_1_rdev_size_change
,
1504 static int match_mddev_units(mddev_t
*mddev1
, mddev_t
*mddev2
)
1506 mdk_rdev_t
*rdev
, *rdev2
;
1509 rdev_for_each_rcu(rdev
, mddev1
)
1510 rdev_for_each_rcu(rdev2
, mddev2
)
1511 if (rdev
->bdev
->bd_contains
==
1512 rdev2
->bdev
->bd_contains
) {
1520 static LIST_HEAD(pending_raid_disks
);
1523 * Try to register data integrity profile for an mddev
1525 * This is called when an array is started and after a disk has been kicked
1526 * from the array. It only succeeds if all working and active component devices
1527 * are integrity capable with matching profiles.
1529 int md_integrity_register(mddev_t
*mddev
)
1531 mdk_rdev_t
*rdev
, *reference
= NULL
;
1533 if (list_empty(&mddev
->disks
))
1534 return 0; /* nothing to do */
1535 if (blk_get_integrity(mddev
->gendisk
))
1536 return 0; /* already registered */
1537 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
1538 /* skip spares and non-functional disks */
1539 if (test_bit(Faulty
, &rdev
->flags
))
1541 if (rdev
->raid_disk
< 0)
1544 * If at least one rdev is not integrity capable, we can not
1545 * enable data integrity for the md device.
1547 if (!bdev_get_integrity(rdev
->bdev
))
1550 /* Use the first rdev as the reference */
1554 /* does this rdev's profile match the reference profile? */
1555 if (blk_integrity_compare(reference
->bdev
->bd_disk
,
1556 rdev
->bdev
->bd_disk
) < 0)
1560 * All component devices are integrity capable and have matching
1561 * profiles, register the common profile for the md device.
1563 if (blk_integrity_register(mddev
->gendisk
,
1564 bdev_get_integrity(reference
->bdev
)) != 0) {
1565 printk(KERN_ERR
"md: failed to register integrity for %s\n",
1569 printk(KERN_NOTICE
"md: data integrity on %s enabled\n",
1573 EXPORT_SYMBOL(md_integrity_register
);
1575 /* Disable data integrity if non-capable/non-matching disk is being added */
1576 void md_integrity_add_rdev(mdk_rdev_t
*rdev
, mddev_t
*mddev
)
1578 struct blk_integrity
*bi_rdev
= bdev_get_integrity(rdev
->bdev
);
1579 struct blk_integrity
*bi_mddev
= blk_get_integrity(mddev
->gendisk
);
1581 if (!bi_mddev
) /* nothing to do */
1583 if (rdev
->raid_disk
< 0) /* skip spares */
1585 if (bi_rdev
&& blk_integrity_compare(mddev
->gendisk
,
1586 rdev
->bdev
->bd_disk
) >= 0)
1588 printk(KERN_NOTICE
"disabling data integrity on %s\n", mdname(mddev
));
1589 blk_integrity_unregister(mddev
->gendisk
);
1591 EXPORT_SYMBOL(md_integrity_add_rdev
);
1593 static int bind_rdev_to_array(mdk_rdev_t
* rdev
, mddev_t
* mddev
)
1595 char b
[BDEVNAME_SIZE
];
1605 /* prevent duplicates */
1606 if (find_rdev(mddev
, rdev
->bdev
->bd_dev
))
1609 /* make sure rdev->sectors exceeds mddev->dev_sectors */
1610 if (rdev
->sectors
&& (mddev
->dev_sectors
== 0 ||
1611 rdev
->sectors
< mddev
->dev_sectors
)) {
1613 /* Cannot change size, so fail
1614 * If mddev->level <= 0, then we don't care
1615 * about aligning sizes (e.g. linear)
1617 if (mddev
->level
> 0)
1620 mddev
->dev_sectors
= rdev
->sectors
;
1623 /* Verify rdev->desc_nr is unique.
1624 * If it is -1, assign a free number, else
1625 * check number is not in use
1627 if (rdev
->desc_nr
< 0) {
1629 if (mddev
->pers
) choice
= mddev
->raid_disks
;
1630 while (find_rdev_nr(mddev
, choice
))
1632 rdev
->desc_nr
= choice
;
1634 if (find_rdev_nr(mddev
, rdev
->desc_nr
))
1637 if (mddev
->max_disks
&& rdev
->desc_nr
>= mddev
->max_disks
) {
1638 printk(KERN_WARNING
"md: %s: array is limited to %d devices\n",
1639 mdname(mddev
), mddev
->max_disks
);
1642 bdevname(rdev
->bdev
,b
);
1643 while ( (s
=strchr(b
, '/')) != NULL
)
1646 rdev
->mddev
= mddev
;
1647 printk(KERN_INFO
"md: bind<%s>\n", b
);
1649 if ((err
= kobject_add(&rdev
->kobj
, &mddev
->kobj
, "dev-%s", b
)))
1652 ko
= &part_to_dev(rdev
->bdev
->bd_part
)->kobj
;
1653 if ((err
= sysfs_create_link(&rdev
->kobj
, ko
, "block"))) {
1654 kobject_del(&rdev
->kobj
);
1657 rdev
->sysfs_state
= sysfs_get_dirent(rdev
->kobj
.sd
, "state");
1659 list_add_rcu(&rdev
->same_set
, &mddev
->disks
);
1660 bd_claim_by_disk(rdev
->bdev
, rdev
->bdev
->bd_holder
, mddev
->gendisk
);
1662 /* May as well allow recovery to be retried once */
1663 mddev
->recovery_disabled
= 0;
1668 printk(KERN_WARNING
"md: failed to register dev-%s for %s\n",
1673 static void md_delayed_delete(struct work_struct
*ws
)
1675 mdk_rdev_t
*rdev
= container_of(ws
, mdk_rdev_t
, del_work
);
1676 kobject_del(&rdev
->kobj
);
1677 kobject_put(&rdev
->kobj
);
1680 static void unbind_rdev_from_array(mdk_rdev_t
* rdev
)
1682 char b
[BDEVNAME_SIZE
];
1687 bd_release_from_disk(rdev
->bdev
, rdev
->mddev
->gendisk
);
1688 list_del_rcu(&rdev
->same_set
);
1689 printk(KERN_INFO
"md: unbind<%s>\n", bdevname(rdev
->bdev
,b
));
1691 sysfs_remove_link(&rdev
->kobj
, "block");
1692 sysfs_put(rdev
->sysfs_state
);
1693 rdev
->sysfs_state
= NULL
;
1694 /* We need to delay this, otherwise we can deadlock when
1695 * writing to 'remove' to "dev/state". We also need
1696 * to delay it due to rcu usage.
1699 INIT_WORK(&rdev
->del_work
, md_delayed_delete
);
1700 kobject_get(&rdev
->kobj
);
1701 schedule_work(&rdev
->del_work
);
1705 * prevent the device from being mounted, repartitioned or
1706 * otherwise reused by a RAID array (or any other kernel
1707 * subsystem), by bd_claiming the device.
1709 static int lock_rdev(mdk_rdev_t
*rdev
, dev_t dev
, int shared
)
1712 struct block_device
*bdev
;
1713 char b
[BDEVNAME_SIZE
];
1715 bdev
= open_by_devnum(dev
, FMODE_READ
|FMODE_WRITE
);
1717 printk(KERN_ERR
"md: could not open %s.\n",
1718 __bdevname(dev
, b
));
1719 return PTR_ERR(bdev
);
1721 err
= bd_claim(bdev
, shared
? (mdk_rdev_t
*)lock_rdev
: rdev
);
1723 printk(KERN_ERR
"md: could not bd_claim %s.\n",
1725 blkdev_put(bdev
, FMODE_READ
|FMODE_WRITE
);
1729 set_bit(AllReserved
, &rdev
->flags
);
1734 static void unlock_rdev(mdk_rdev_t
*rdev
)
1736 struct block_device
*bdev
= rdev
->bdev
;
1741 blkdev_put(bdev
, FMODE_READ
|FMODE_WRITE
);
1744 void md_autodetect_dev(dev_t dev
);
1746 static void export_rdev(mdk_rdev_t
* rdev
)
1748 char b
[BDEVNAME_SIZE
];
1749 printk(KERN_INFO
"md: export_rdev(%s)\n",
1750 bdevname(rdev
->bdev
,b
));
1755 if (test_bit(AutoDetected
, &rdev
->flags
))
1756 md_autodetect_dev(rdev
->bdev
->bd_dev
);
1759 kobject_put(&rdev
->kobj
);
1762 static void kick_rdev_from_array(mdk_rdev_t
* rdev
)
1764 unbind_rdev_from_array(rdev
);
1768 static void export_array(mddev_t
*mddev
)
1770 mdk_rdev_t
*rdev
, *tmp
;
1772 rdev_for_each(rdev
, tmp
, mddev
) {
1777 kick_rdev_from_array(rdev
);
1779 if (!list_empty(&mddev
->disks
))
1781 mddev
->raid_disks
= 0;
1782 mddev
->major_version
= 0;
1785 static void print_desc(mdp_disk_t
*desc
)
1787 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc
->number
,
1788 desc
->major
,desc
->minor
,desc
->raid_disk
,desc
->state
);
1791 static void print_sb_90(mdp_super_t
*sb
)
1796 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1797 sb
->major_version
, sb
->minor_version
, sb
->patch_version
,
1798 sb
->set_uuid0
, sb
->set_uuid1
, sb
->set_uuid2
, sb
->set_uuid3
,
1800 printk(KERN_INFO
"md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1801 sb
->level
, sb
->size
, sb
->nr_disks
, sb
->raid_disks
,
1802 sb
->md_minor
, sb
->layout
, sb
->chunk_size
);
1803 printk(KERN_INFO
"md: UT:%08x ST:%d AD:%d WD:%d"
1804 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1805 sb
->utime
, sb
->state
, sb
->active_disks
, sb
->working_disks
,
1806 sb
->failed_disks
, sb
->spare_disks
,
1807 sb
->sb_csum
, (unsigned long)sb
->events_lo
);
1810 for (i
= 0; i
< MD_SB_DISKS
; i
++) {
1813 desc
= sb
->disks
+ i
;
1814 if (desc
->number
|| desc
->major
|| desc
->minor
||
1815 desc
->raid_disk
|| (desc
->state
&& (desc
->state
!= 4))) {
1816 printk(" D %2d: ", i
);
1820 printk(KERN_INFO
"md: THIS: ");
1821 print_desc(&sb
->this_disk
);
1824 static void print_sb_1(struct mdp_superblock_1
*sb
)
1828 uuid
= sb
->set_uuid
;
1830 "md: SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x"
1831 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n"
1832 "md: Name: \"%s\" CT:%llu\n",
1833 le32_to_cpu(sb
->major_version
),
1834 le32_to_cpu(sb
->feature_map
),
1835 uuid
[0], uuid
[1], uuid
[2], uuid
[3],
1836 uuid
[4], uuid
[5], uuid
[6], uuid
[7],
1837 uuid
[8], uuid
[9], uuid
[10], uuid
[11],
1838 uuid
[12], uuid
[13], uuid
[14], uuid
[15],
1840 (unsigned long long)le64_to_cpu(sb
->ctime
)
1841 & MD_SUPERBLOCK_1_TIME_SEC_MASK
);
1843 uuid
= sb
->device_uuid
;
1845 "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
1847 "md: Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x"
1848 ":%02x%02x%02x%02x%02x%02x\n"
1849 "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
1850 "md: (MaxDev:%u) \n",
1851 le32_to_cpu(sb
->level
),
1852 (unsigned long long)le64_to_cpu(sb
->size
),
1853 le32_to_cpu(sb
->raid_disks
),
1854 le32_to_cpu(sb
->layout
),
1855 le32_to_cpu(sb
->chunksize
),
1856 (unsigned long long)le64_to_cpu(sb
->data_offset
),
1857 (unsigned long long)le64_to_cpu(sb
->data_size
),
1858 (unsigned long long)le64_to_cpu(sb
->super_offset
),
1859 (unsigned long long)le64_to_cpu(sb
->recovery_offset
),
1860 le32_to_cpu(sb
->dev_number
),
1861 uuid
[0], uuid
[1], uuid
[2], uuid
[3],
1862 uuid
[4], uuid
[5], uuid
[6], uuid
[7],
1863 uuid
[8], uuid
[9], uuid
[10], uuid
[11],
1864 uuid
[12], uuid
[13], uuid
[14], uuid
[15],
1866 (unsigned long long)le64_to_cpu(sb
->utime
) & MD_SUPERBLOCK_1_TIME_SEC_MASK
,
1867 (unsigned long long)le64_to_cpu(sb
->events
),
1868 (unsigned long long)le64_to_cpu(sb
->resync_offset
),
1869 le32_to_cpu(sb
->sb_csum
),
1870 le32_to_cpu(sb
->max_dev
)
1874 static void print_rdev(mdk_rdev_t
*rdev
, int major_version
)
1876 char b
[BDEVNAME_SIZE
];
1877 printk(KERN_INFO
"md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n",
1878 bdevname(rdev
->bdev
, b
), (unsigned long long)rdev
->sectors
,
1879 test_bit(Faulty
, &rdev
->flags
), test_bit(In_sync
, &rdev
->flags
),
1881 if (rdev
->sb_loaded
) {
1882 printk(KERN_INFO
"md: rdev superblock (MJ:%d):\n", major_version
);
1883 switch (major_version
) {
1885 print_sb_90((mdp_super_t
*)page_address(rdev
->sb_page
));
1888 print_sb_1((struct mdp_superblock_1
*)page_address(rdev
->sb_page
));
1892 printk(KERN_INFO
"md: no rdev superblock!\n");
1895 static void md_print_devices(void)
1897 struct list_head
*tmp
;
1900 char b
[BDEVNAME_SIZE
];
1903 printk("md: **********************************\n");
1904 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
1905 printk("md: **********************************\n");
1906 for_each_mddev(mddev
, tmp
) {
1909 bitmap_print_sb(mddev
->bitmap
);
1911 printk("%s: ", mdname(mddev
));
1912 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
1913 printk("<%s>", bdevname(rdev
->bdev
,b
));
1916 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
1917 print_rdev(rdev
, mddev
->major_version
);
1919 printk("md: **********************************\n");
1924 static void sync_sbs(mddev_t
* mddev
, int nospares
)
1926 /* Update each superblock (in-memory image), but
1927 * if we are allowed to, skip spares which already
1928 * have the right event counter, or have one earlier
1929 * (which would mean they aren't being marked as dirty
1930 * with the rest of the array)
1934 /* First make sure individual recovery_offsets are correct */
1935 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
1936 if (rdev
->raid_disk
>= 0 &&
1937 !test_bit(In_sync
, &rdev
->flags
) &&
1938 mddev
->curr_resync_completed
> rdev
->recovery_offset
)
1939 rdev
->recovery_offset
= mddev
->curr_resync_completed
;
1942 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
1943 if (rdev
->sb_events
== mddev
->events
||
1945 rdev
->raid_disk
< 0 &&
1946 (rdev
->sb_events
&1)==0 &&
1947 rdev
->sb_events
+1 == mddev
->events
)) {
1948 /* Don't update this superblock */
1949 rdev
->sb_loaded
= 2;
1951 super_types
[mddev
->major_version
].
1952 sync_super(mddev
, rdev
);
1953 rdev
->sb_loaded
= 1;
1958 static void md_update_sb(mddev_t
* mddev
, int force_change
)
1964 mddev
->utime
= get_seconds();
1965 if (mddev
->external
)
1968 spin_lock_irq(&mddev
->write_lock
);
1970 set_bit(MD_CHANGE_PENDING
, &mddev
->flags
);
1971 if (test_and_clear_bit(MD_CHANGE_DEVS
, &mddev
->flags
))
1973 if (test_and_clear_bit(MD_CHANGE_CLEAN
, &mddev
->flags
))
1974 /* just a clean<-> dirty transition, possibly leave spares alone,
1975 * though if events isn't the right even/odd, we will have to do
1981 if (mddev
->degraded
)
1982 /* If the array is degraded, then skipping spares is both
1983 * dangerous and fairly pointless.
1984 * Dangerous because a device that was removed from the array
1985 * might have a event_count that still looks up-to-date,
1986 * so it can be re-added without a resync.
1987 * Pointless because if there are any spares to skip,
1988 * then a recovery will happen and soon that array won't
1989 * be degraded any more and the spare can go back to sleep then.
1993 sync_req
= mddev
->in_sync
;
1995 /* If this is just a dirty<->clean transition, and the array is clean
1996 * and 'events' is odd, we can roll back to the previous clean state */
1998 && (mddev
->in_sync
&& mddev
->recovery_cp
== MaxSector
)
1999 && (mddev
->events
& 1)
2000 && mddev
->events
!= 1)
2003 /* otherwise we have to go forward and ... */
2005 if (!mddev
->in_sync
|| mddev
->recovery_cp
!= MaxSector
) { /* not clean */
2006 /* .. if the array isn't clean, an 'even' event must also go
2008 if ((mddev
->events
&1)==0)
2011 /* otherwise an 'odd' event must go to spares */
2012 if ((mddev
->events
&1))
2017 if (!mddev
->events
) {
2019 * oops, this 64-bit counter should never wrap.
2020 * Either we are in around ~1 trillion A.C., assuming
2021 * 1 reboot per second, or we have a bug:
2028 * do not write anything to disk if using
2029 * nonpersistent superblocks
2031 if (!mddev
->persistent
) {
2032 if (!mddev
->external
)
2033 clear_bit(MD_CHANGE_PENDING
, &mddev
->flags
);
2035 spin_unlock_irq(&mddev
->write_lock
);
2036 wake_up(&mddev
->sb_wait
);
2039 sync_sbs(mddev
, nospares
);
2040 spin_unlock_irq(&mddev
->write_lock
);
2043 "md: updating %s RAID superblock on device (in sync %d)\n",
2044 mdname(mddev
),mddev
->in_sync
);
2046 bitmap_update_sb(mddev
->bitmap
);
2047 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
2048 char b
[BDEVNAME_SIZE
];
2049 dprintk(KERN_INFO
"md: ");
2050 if (rdev
->sb_loaded
!= 1)
2051 continue; /* no noise on spare devices */
2052 if (test_bit(Faulty
, &rdev
->flags
))
2053 dprintk("(skipping faulty ");
2055 dprintk("%s ", bdevname(rdev
->bdev
,b
));
2056 if (!test_bit(Faulty
, &rdev
->flags
)) {
2057 md_super_write(mddev
,rdev
,
2058 rdev
->sb_start
, rdev
->sb_size
,
2060 dprintk(KERN_INFO
"(write) %s's sb offset: %llu\n",
2061 bdevname(rdev
->bdev
,b
),
2062 (unsigned long long)rdev
->sb_start
);
2063 rdev
->sb_events
= mddev
->events
;
2067 if (mddev
->level
== LEVEL_MULTIPATH
)
2068 /* only need to write one superblock... */
2071 md_super_wait(mddev
);
2072 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
2074 spin_lock_irq(&mddev
->write_lock
);
2075 if (mddev
->in_sync
!= sync_req
||
2076 test_bit(MD_CHANGE_DEVS
, &mddev
->flags
)) {
2077 /* have to write it out again */
2078 spin_unlock_irq(&mddev
->write_lock
);
2081 clear_bit(MD_CHANGE_PENDING
, &mddev
->flags
);
2082 spin_unlock_irq(&mddev
->write_lock
);
2083 wake_up(&mddev
->sb_wait
);
2084 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
))
2085 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
2089 /* words written to sysfs files may, or may not, be \n terminated.
2090 * We want to accept with case. For this we use cmd_match.
2092 static int cmd_match(const char *cmd
, const char *str
)
2094 /* See if cmd, written into a sysfs file, matches
2095 * str. They must either be the same, or cmd can
2096 * have a trailing newline
2098 while (*cmd
&& *str
&& *cmd
== *str
) {
2109 struct rdev_sysfs_entry
{
2110 struct attribute attr
;
2111 ssize_t (*show
)(mdk_rdev_t
*, char *);
2112 ssize_t (*store
)(mdk_rdev_t
*, const char *, size_t);
2116 state_show(mdk_rdev_t
*rdev
, char *page
)
2121 if (test_bit(Faulty
, &rdev
->flags
)) {
2122 len
+= sprintf(page
+len
, "%sfaulty",sep
);
2125 if (test_bit(In_sync
, &rdev
->flags
)) {
2126 len
+= sprintf(page
+len
, "%sin_sync",sep
);
2129 if (test_bit(WriteMostly
, &rdev
->flags
)) {
2130 len
+= sprintf(page
+len
, "%swrite_mostly",sep
);
2133 if (test_bit(Blocked
, &rdev
->flags
)) {
2134 len
+= sprintf(page
+len
, "%sblocked", sep
);
2137 if (!test_bit(Faulty
, &rdev
->flags
) &&
2138 !test_bit(In_sync
, &rdev
->flags
)) {
2139 len
+= sprintf(page
+len
, "%sspare", sep
);
2142 return len
+sprintf(page
+len
, "\n");
2146 state_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
2149 * faulty - simulates and error
2150 * remove - disconnects the device
2151 * writemostly - sets write_mostly
2152 * -writemostly - clears write_mostly
2153 * blocked - sets the Blocked flag
2154 * -blocked - clears the Blocked flag
2155 * insync - sets Insync providing device isn't active
2158 if (cmd_match(buf
, "faulty") && rdev
->mddev
->pers
) {
2159 md_error(rdev
->mddev
, rdev
);
2161 } else if (cmd_match(buf
, "remove")) {
2162 if (rdev
->raid_disk
>= 0)
2165 mddev_t
*mddev
= rdev
->mddev
;
2166 kick_rdev_from_array(rdev
);
2168 md_update_sb(mddev
, 1);
2169 md_new_event(mddev
);
2172 } else if (cmd_match(buf
, "writemostly")) {
2173 set_bit(WriteMostly
, &rdev
->flags
);
2175 } else if (cmd_match(buf
, "-writemostly")) {
2176 clear_bit(WriteMostly
, &rdev
->flags
);
2178 } else if (cmd_match(buf
, "blocked")) {
2179 set_bit(Blocked
, &rdev
->flags
);
2181 } else if (cmd_match(buf
, "-blocked")) {
2182 clear_bit(Blocked
, &rdev
->flags
);
2183 wake_up(&rdev
->blocked_wait
);
2184 set_bit(MD_RECOVERY_NEEDED
, &rdev
->mddev
->recovery
);
2185 md_wakeup_thread(rdev
->mddev
->thread
);
2188 } else if (cmd_match(buf
, "insync") && rdev
->raid_disk
== -1) {
2189 set_bit(In_sync
, &rdev
->flags
);
2192 if (!err
&& rdev
->sysfs_state
)
2193 sysfs_notify_dirent(rdev
->sysfs_state
);
2194 return err
? err
: len
;
2196 static struct rdev_sysfs_entry rdev_state
=
2197 __ATTR(state
, S_IRUGO
|S_IWUSR
, state_show
, state_store
);
2200 errors_show(mdk_rdev_t
*rdev
, char *page
)
2202 return sprintf(page
, "%d\n", atomic_read(&rdev
->corrected_errors
));
2206 errors_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
2209 unsigned long n
= simple_strtoul(buf
, &e
, 10);
2210 if (*buf
&& (*e
== 0 || *e
== '\n')) {
2211 atomic_set(&rdev
->corrected_errors
, n
);
2216 static struct rdev_sysfs_entry rdev_errors
=
2217 __ATTR(errors
, S_IRUGO
|S_IWUSR
, errors_show
, errors_store
);
2220 slot_show(mdk_rdev_t
*rdev
, char *page
)
2222 if (rdev
->raid_disk
< 0)
2223 return sprintf(page
, "none\n");
2225 return sprintf(page
, "%d\n", rdev
->raid_disk
);
2229 slot_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
2234 int slot
= simple_strtoul(buf
, &e
, 10);
2235 if (strncmp(buf
, "none", 4)==0)
2237 else if (e
==buf
|| (*e
&& *e
!= '\n'))
2239 if (rdev
->mddev
->pers
&& slot
== -1) {
2240 /* Setting 'slot' on an active array requires also
2241 * updating the 'rd%d' link, and communicating
2242 * with the personality with ->hot_*_disk.
2243 * For now we only support removing
2244 * failed/spare devices. This normally happens automatically,
2245 * but not when the metadata is externally managed.
2247 if (rdev
->raid_disk
== -1)
2249 /* personality does all needed checks */
2250 if (rdev
->mddev
->pers
->hot_add_disk
== NULL
)
2252 err
= rdev
->mddev
->pers
->
2253 hot_remove_disk(rdev
->mddev
, rdev
->raid_disk
);
2256 sprintf(nm
, "rd%d", rdev
->raid_disk
);
2257 sysfs_remove_link(&rdev
->mddev
->kobj
, nm
);
2258 set_bit(MD_RECOVERY_NEEDED
, &rdev
->mddev
->recovery
);
2259 md_wakeup_thread(rdev
->mddev
->thread
);
2260 } else if (rdev
->mddev
->pers
) {
2262 /* Activating a spare .. or possibly reactivating
2263 * if we ever get bitmaps working here.
2266 if (rdev
->raid_disk
!= -1)
2269 if (rdev
->mddev
->pers
->hot_add_disk
== NULL
)
2272 list_for_each_entry(rdev2
, &rdev
->mddev
->disks
, same_set
)
2273 if (rdev2
->raid_disk
== slot
)
2276 rdev
->raid_disk
= slot
;
2277 if (test_bit(In_sync
, &rdev
->flags
))
2278 rdev
->saved_raid_disk
= slot
;
2280 rdev
->saved_raid_disk
= -1;
2281 err
= rdev
->mddev
->pers
->
2282 hot_add_disk(rdev
->mddev
, rdev
);
2284 rdev
->raid_disk
= -1;
2287 sysfs_notify_dirent(rdev
->sysfs_state
);
2288 sprintf(nm
, "rd%d", rdev
->raid_disk
);
2289 if (sysfs_create_link(&rdev
->mddev
->kobj
, &rdev
->kobj
, nm
))
2291 "md: cannot register "
2293 nm
, mdname(rdev
->mddev
));
2295 /* don't wakeup anyone, leave that to userspace. */
2297 if (slot
>= rdev
->mddev
->raid_disks
)
2299 rdev
->raid_disk
= slot
;
2300 /* assume it is working */
2301 clear_bit(Faulty
, &rdev
->flags
);
2302 clear_bit(WriteMostly
, &rdev
->flags
);
2303 set_bit(In_sync
, &rdev
->flags
);
2304 sysfs_notify_dirent(rdev
->sysfs_state
);
2310 static struct rdev_sysfs_entry rdev_slot
=
2311 __ATTR(slot
, S_IRUGO
|S_IWUSR
, slot_show
, slot_store
);
2314 offset_show(mdk_rdev_t
*rdev
, char *page
)
2316 return sprintf(page
, "%llu\n", (unsigned long long)rdev
->data_offset
);
2320 offset_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
2323 unsigned long long offset
= simple_strtoull(buf
, &e
, 10);
2324 if (e
==buf
|| (*e
&& *e
!= '\n'))
2326 if (rdev
->mddev
->pers
&& rdev
->raid_disk
>= 0)
2328 if (rdev
->sectors
&& rdev
->mddev
->external
)
2329 /* Must set offset before size, so overlap checks
2332 rdev
->data_offset
= offset
;
2336 static struct rdev_sysfs_entry rdev_offset
=
2337 __ATTR(offset
, S_IRUGO
|S_IWUSR
, offset_show
, offset_store
);
2340 rdev_size_show(mdk_rdev_t
*rdev
, char *page
)
2342 return sprintf(page
, "%llu\n", (unsigned long long)rdev
->sectors
/ 2);
2345 static int overlaps(sector_t s1
, sector_t l1
, sector_t s2
, sector_t l2
)
2347 /* check if two start/length pairs overlap */
2355 static int strict_blocks_to_sectors(const char *buf
, sector_t
*sectors
)
2357 unsigned long long blocks
;
2360 if (strict_strtoull(buf
, 10, &blocks
) < 0)
2363 if (blocks
& 1ULL << (8 * sizeof(blocks
) - 1))
2364 return -EINVAL
; /* sector conversion overflow */
2367 if (new != blocks
* 2)
2368 return -EINVAL
; /* unsigned long long to sector_t overflow */
2375 rdev_size_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
2377 mddev_t
*my_mddev
= rdev
->mddev
;
2378 sector_t oldsectors
= rdev
->sectors
;
2381 if (strict_blocks_to_sectors(buf
, §ors
) < 0)
2383 if (my_mddev
->pers
&& rdev
->raid_disk
>= 0) {
2384 if (my_mddev
->persistent
) {
2385 sectors
= super_types
[my_mddev
->major_version
].
2386 rdev_size_change(rdev
, sectors
);
2389 } else if (!sectors
)
2390 sectors
= (rdev
->bdev
->bd_inode
->i_size
>> 9) -
2393 if (sectors
< my_mddev
->dev_sectors
)
2394 return -EINVAL
; /* component must fit device */
2396 rdev
->sectors
= sectors
;
2397 if (sectors
> oldsectors
&& my_mddev
->external
) {
2398 /* need to check that all other rdevs with the same ->bdev
2399 * do not overlap. We need to unlock the mddev to avoid
2400 * a deadlock. We have already changed rdev->sectors, and if
2401 * we have to change it back, we will have the lock again.
2405 struct list_head
*tmp
;
2407 mddev_unlock(my_mddev
);
2408 for_each_mddev(mddev
, tmp
) {
2412 list_for_each_entry(rdev2
, &mddev
->disks
, same_set
)
2413 if (test_bit(AllReserved
, &rdev2
->flags
) ||
2414 (rdev
->bdev
== rdev2
->bdev
&&
2416 overlaps(rdev
->data_offset
, rdev
->sectors
,
2422 mddev_unlock(mddev
);
2428 mddev_lock(my_mddev
);
2430 /* Someone else could have slipped in a size
2431 * change here, but doing so is just silly.
2432 * We put oldsectors back because we *know* it is
2433 * safe, and trust userspace not to race with
2436 rdev
->sectors
= oldsectors
;
2443 static struct rdev_sysfs_entry rdev_size
=
2444 __ATTR(size
, S_IRUGO
|S_IWUSR
, rdev_size_show
, rdev_size_store
);
2446 static struct attribute
*rdev_default_attrs
[] = {
2455 rdev_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
2457 struct rdev_sysfs_entry
*entry
= container_of(attr
, struct rdev_sysfs_entry
, attr
);
2458 mdk_rdev_t
*rdev
= container_of(kobj
, mdk_rdev_t
, kobj
);
2459 mddev_t
*mddev
= rdev
->mddev
;
2465 rv
= mddev
? mddev_lock(mddev
) : -EBUSY
;
2467 if (rdev
->mddev
== NULL
)
2470 rv
= entry
->show(rdev
, page
);
2471 mddev_unlock(mddev
);
2477 rdev_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
2478 const char *page
, size_t length
)
2480 struct rdev_sysfs_entry
*entry
= container_of(attr
, struct rdev_sysfs_entry
, attr
);
2481 mdk_rdev_t
*rdev
= container_of(kobj
, mdk_rdev_t
, kobj
);
2483 mddev_t
*mddev
= rdev
->mddev
;
2487 if (!capable(CAP_SYS_ADMIN
))
2489 rv
= mddev
? mddev_lock(mddev
): -EBUSY
;
2491 if (rdev
->mddev
== NULL
)
2494 rv
= entry
->store(rdev
, page
, length
);
2495 mddev_unlock(mddev
);
2500 static void rdev_free(struct kobject
*ko
)
2502 mdk_rdev_t
*rdev
= container_of(ko
, mdk_rdev_t
, kobj
);
2505 static struct sysfs_ops rdev_sysfs_ops
= {
2506 .show
= rdev_attr_show
,
2507 .store
= rdev_attr_store
,
2509 static struct kobj_type rdev_ktype
= {
2510 .release
= rdev_free
,
2511 .sysfs_ops
= &rdev_sysfs_ops
,
2512 .default_attrs
= rdev_default_attrs
,
2516 * Import a device. If 'super_format' >= 0, then sanity check the superblock
2518 * mark the device faulty if:
2520 * - the device is nonexistent (zero size)
2521 * - the device has no valid superblock
2523 * a faulty rdev _never_ has rdev->sb set.
2525 static mdk_rdev_t
*md_import_device(dev_t newdev
, int super_format
, int super_minor
)
2527 char b
[BDEVNAME_SIZE
];
2532 rdev
= kzalloc(sizeof(*rdev
), GFP_KERNEL
);
2534 printk(KERN_ERR
"md: could not alloc mem for new device!\n");
2535 return ERR_PTR(-ENOMEM
);
2538 if ((err
= alloc_disk_sb(rdev
)))
2541 err
= lock_rdev(rdev
, newdev
, super_format
== -2);
2545 kobject_init(&rdev
->kobj
, &rdev_ktype
);
2548 rdev
->saved_raid_disk
= -1;
2549 rdev
->raid_disk
= -1;
2551 rdev
->data_offset
= 0;
2552 rdev
->sb_events
= 0;
2553 atomic_set(&rdev
->nr_pending
, 0);
2554 atomic_set(&rdev
->read_errors
, 0);
2555 atomic_set(&rdev
->corrected_errors
, 0);
2557 size
= rdev
->bdev
->bd_inode
->i_size
>> BLOCK_SIZE_BITS
;
2560 "md: %s has zero or unknown size, marking faulty!\n",
2561 bdevname(rdev
->bdev
,b
));
2566 if (super_format
>= 0) {
2567 err
= super_types
[super_format
].
2568 load_super(rdev
, NULL
, super_minor
);
2569 if (err
== -EINVAL
) {
2571 "md: %s does not have a valid v%d.%d "
2572 "superblock, not importing!\n",
2573 bdevname(rdev
->bdev
,b
),
2574 super_format
, super_minor
);
2579 "md: could not read %s's sb, not importing!\n",
2580 bdevname(rdev
->bdev
,b
));
2585 INIT_LIST_HEAD(&rdev
->same_set
);
2586 init_waitqueue_head(&rdev
->blocked_wait
);
2591 if (rdev
->sb_page
) {
2597 return ERR_PTR(err
);
2601 * Check a full RAID array for plausibility
2605 static void analyze_sbs(mddev_t
* mddev
)
2608 mdk_rdev_t
*rdev
, *freshest
, *tmp
;
2609 char b
[BDEVNAME_SIZE
];
2612 rdev_for_each(rdev
, tmp
, mddev
)
2613 switch (super_types
[mddev
->major_version
].
2614 load_super(rdev
, freshest
, mddev
->minor_version
)) {
2622 "md: fatal superblock inconsistency in %s"
2623 " -- removing from array\n",
2624 bdevname(rdev
->bdev
,b
));
2625 kick_rdev_from_array(rdev
);
2629 super_types
[mddev
->major_version
].
2630 validate_super(mddev
, freshest
);
2633 rdev_for_each(rdev
, tmp
, mddev
) {
2634 if (rdev
->desc_nr
>= mddev
->max_disks
||
2635 i
> mddev
->max_disks
) {
2637 "md: %s: %s: only %d devices permitted\n",
2638 mdname(mddev
), bdevname(rdev
->bdev
, b
),
2640 kick_rdev_from_array(rdev
);
2643 if (rdev
!= freshest
)
2644 if (super_types
[mddev
->major_version
].
2645 validate_super(mddev
, rdev
)) {
2646 printk(KERN_WARNING
"md: kicking non-fresh %s"
2648 bdevname(rdev
->bdev
,b
));
2649 kick_rdev_from_array(rdev
);
2652 if (mddev
->level
== LEVEL_MULTIPATH
) {
2653 rdev
->desc_nr
= i
++;
2654 rdev
->raid_disk
= rdev
->desc_nr
;
2655 set_bit(In_sync
, &rdev
->flags
);
2656 } else if (rdev
->raid_disk
>= (mddev
->raid_disks
- min(0, mddev
->delta_disks
))) {
2657 rdev
->raid_disk
= -1;
2658 clear_bit(In_sync
, &rdev
->flags
);
2663 static void md_safemode_timeout(unsigned long data
);
2666 safe_delay_show(mddev_t
*mddev
, char *page
)
2668 int msec
= (mddev
->safemode_delay
*1000)/HZ
;
2669 return sprintf(page
, "%d.%03d\n", msec
/1000, msec
%1000);
2672 safe_delay_store(mddev_t
*mddev
, const char *cbuf
, size_t len
)
2680 /* remove a period, and count digits after it */
2681 if (len
>= sizeof(buf
))
2683 strlcpy(buf
, cbuf
, sizeof(buf
));
2684 for (i
=0; i
<len
; i
++) {
2686 if (isdigit(buf
[i
])) {
2691 } else if (buf
[i
] == '.') {
2696 if (strict_strtoul(buf
, 10, &msec
) < 0)
2698 msec
= (msec
* 1000) / scale
;
2700 mddev
->safemode_delay
= 0;
2702 unsigned long old_delay
= mddev
->safemode_delay
;
2703 mddev
->safemode_delay
= (msec
*HZ
)/1000;
2704 if (mddev
->safemode_delay
== 0)
2705 mddev
->safemode_delay
= 1;
2706 if (mddev
->safemode_delay
< old_delay
)
2707 md_safemode_timeout((unsigned long)mddev
);
2711 static struct md_sysfs_entry md_safe_delay
=
2712 __ATTR(safe_mode_delay
, S_IRUGO
|S_IWUSR
,safe_delay_show
, safe_delay_store
);
2715 level_show(mddev_t
*mddev
, char *page
)
2717 struct mdk_personality
*p
= mddev
->pers
;
2719 return sprintf(page
, "%s\n", p
->name
);
2720 else if (mddev
->clevel
[0])
2721 return sprintf(page
, "%s\n", mddev
->clevel
);
2722 else if (mddev
->level
!= LEVEL_NONE
)
2723 return sprintf(page
, "%d\n", mddev
->level
);
2729 level_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2733 struct mdk_personality
*pers
;
2737 if (mddev
->pers
== NULL
) {
2740 if (len
>= sizeof(mddev
->clevel
))
2742 strncpy(mddev
->clevel
, buf
, len
);
2743 if (mddev
->clevel
[len
-1] == '\n')
2745 mddev
->clevel
[len
] = 0;
2746 mddev
->level
= LEVEL_NONE
;
2750 /* request to change the personality. Need to ensure:
2751 * - array is not engaged in resync/recovery/reshape
2752 * - old personality can be suspended
2753 * - new personality will access other array.
2756 if (mddev
->sync_thread
|| mddev
->reshape_position
!= MaxSector
)
2759 if (!mddev
->pers
->quiesce
) {
2760 printk(KERN_WARNING
"md: %s: %s does not support online personality change\n",
2761 mdname(mddev
), mddev
->pers
->name
);
2765 /* Now find the new personality */
2766 if (len
== 0 || len
>= sizeof(level
))
2768 strncpy(level
, buf
, len
);
2769 if (level
[len
-1] == '\n')
2773 request_module("md-%s", level
);
2774 spin_lock(&pers_lock
);
2775 pers
= find_pers(LEVEL_NONE
, level
);
2776 if (!pers
|| !try_module_get(pers
->owner
)) {
2777 spin_unlock(&pers_lock
);
2778 printk(KERN_WARNING
"md: personality %s not loaded\n", level
);
2781 spin_unlock(&pers_lock
);
2783 if (pers
== mddev
->pers
) {
2784 /* Nothing to do! */
2785 module_put(pers
->owner
);
2788 if (!pers
->takeover
) {
2789 module_put(pers
->owner
);
2790 printk(KERN_WARNING
"md: %s: %s does not support personality takeover\n",
2791 mdname(mddev
), level
);
2795 /* ->takeover must set new_* and/or delta_disks
2796 * if it succeeds, and may set them when it fails.
2798 priv
= pers
->takeover(mddev
);
2800 mddev
->new_level
= mddev
->level
;
2801 mddev
->new_layout
= mddev
->layout
;
2802 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
2803 mddev
->raid_disks
-= mddev
->delta_disks
;
2804 mddev
->delta_disks
= 0;
2805 module_put(pers
->owner
);
2806 printk(KERN_WARNING
"md: %s: %s would not accept array\n",
2807 mdname(mddev
), level
);
2808 return PTR_ERR(priv
);
2811 /* Looks like we have a winner */
2812 mddev_suspend(mddev
);
2813 mddev
->pers
->stop(mddev
);
2814 module_put(mddev
->pers
->owner
);
2815 /* Invalidate devices that are now superfluous */
2816 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
2817 if (rdev
->raid_disk
>= mddev
->raid_disks
) {
2818 rdev
->raid_disk
= -1;
2819 clear_bit(In_sync
, &rdev
->flags
);
2822 mddev
->private = priv
;
2823 strlcpy(mddev
->clevel
, pers
->name
, sizeof(mddev
->clevel
));
2824 mddev
->level
= mddev
->new_level
;
2825 mddev
->layout
= mddev
->new_layout
;
2826 mddev
->chunk_sectors
= mddev
->new_chunk_sectors
;
2827 mddev
->delta_disks
= 0;
2829 mddev_resume(mddev
);
2830 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
2831 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
2832 md_wakeup_thread(mddev
->thread
);
2836 static struct md_sysfs_entry md_level
=
2837 __ATTR(level
, S_IRUGO
|S_IWUSR
, level_show
, level_store
);
2841 layout_show(mddev_t
*mddev
, char *page
)
2843 /* just a number, not meaningful for all levels */
2844 if (mddev
->reshape_position
!= MaxSector
&&
2845 mddev
->layout
!= mddev
->new_layout
)
2846 return sprintf(page
, "%d (%d)\n",
2847 mddev
->new_layout
, mddev
->layout
);
2848 return sprintf(page
, "%d\n", mddev
->layout
);
2852 layout_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2855 unsigned long n
= simple_strtoul(buf
, &e
, 10);
2857 if (!*buf
|| (*e
&& *e
!= '\n'))
2862 if (mddev
->pers
->check_reshape
== NULL
)
2864 mddev
->new_layout
= n
;
2865 err
= mddev
->pers
->check_reshape(mddev
);
2867 mddev
->new_layout
= mddev
->layout
;
2871 mddev
->new_layout
= n
;
2872 if (mddev
->reshape_position
== MaxSector
)
2877 static struct md_sysfs_entry md_layout
=
2878 __ATTR(layout
, S_IRUGO
|S_IWUSR
, layout_show
, layout_store
);
2882 raid_disks_show(mddev_t
*mddev
, char *page
)
2884 if (mddev
->raid_disks
== 0)
2886 if (mddev
->reshape_position
!= MaxSector
&&
2887 mddev
->delta_disks
!= 0)
2888 return sprintf(page
, "%d (%d)\n", mddev
->raid_disks
,
2889 mddev
->raid_disks
- mddev
->delta_disks
);
2890 return sprintf(page
, "%d\n", mddev
->raid_disks
);
2893 static int update_raid_disks(mddev_t
*mddev
, int raid_disks
);
2896 raid_disks_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2900 unsigned long n
= simple_strtoul(buf
, &e
, 10);
2902 if (!*buf
|| (*e
&& *e
!= '\n'))
2906 rv
= update_raid_disks(mddev
, n
);
2907 else if (mddev
->reshape_position
!= MaxSector
) {
2908 int olddisks
= mddev
->raid_disks
- mddev
->delta_disks
;
2909 mddev
->delta_disks
= n
- olddisks
;
2910 mddev
->raid_disks
= n
;
2912 mddev
->raid_disks
= n
;
2913 return rv
? rv
: len
;
2915 static struct md_sysfs_entry md_raid_disks
=
2916 __ATTR(raid_disks
, S_IRUGO
|S_IWUSR
, raid_disks_show
, raid_disks_store
);
2919 chunk_size_show(mddev_t
*mddev
, char *page
)
2921 if (mddev
->reshape_position
!= MaxSector
&&
2922 mddev
->chunk_sectors
!= mddev
->new_chunk_sectors
)
2923 return sprintf(page
, "%d (%d)\n",
2924 mddev
->new_chunk_sectors
<< 9,
2925 mddev
->chunk_sectors
<< 9);
2926 return sprintf(page
, "%d\n", mddev
->chunk_sectors
<< 9);
2930 chunk_size_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2933 unsigned long n
= simple_strtoul(buf
, &e
, 10);
2935 if (!*buf
|| (*e
&& *e
!= '\n'))
2940 if (mddev
->pers
->check_reshape
== NULL
)
2942 mddev
->new_chunk_sectors
= n
>> 9;
2943 err
= mddev
->pers
->check_reshape(mddev
);
2945 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
2949 mddev
->new_chunk_sectors
= n
>> 9;
2950 if (mddev
->reshape_position
== MaxSector
)
2951 mddev
->chunk_sectors
= n
>> 9;
2955 static struct md_sysfs_entry md_chunk_size
=
2956 __ATTR(chunk_size
, S_IRUGO
|S_IWUSR
, chunk_size_show
, chunk_size_store
);
2959 resync_start_show(mddev_t
*mddev
, char *page
)
2961 if (mddev
->recovery_cp
== MaxSector
)
2962 return sprintf(page
, "none\n");
2963 return sprintf(page
, "%llu\n", (unsigned long long)mddev
->recovery_cp
);
2967 resync_start_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2970 unsigned long long n
= simple_strtoull(buf
, &e
, 10);
2974 if (!*buf
|| (*e
&& *e
!= '\n'))
2977 mddev
->recovery_cp
= n
;
2980 static struct md_sysfs_entry md_resync_start
=
2981 __ATTR(resync_start
, S_IRUGO
|S_IWUSR
, resync_start_show
, resync_start_store
);
2984 * The array state can be:
2987 * No devices, no size, no level
2988 * Equivalent to STOP_ARRAY ioctl
2990 * May have some settings, but array is not active
2991 * all IO results in error
2992 * When written, doesn't tear down array, but just stops it
2993 * suspended (not supported yet)
2994 * All IO requests will block. The array can be reconfigured.
2995 * Writing this, if accepted, will block until array is quiescent
2997 * no resync can happen. no superblocks get written.
2998 * write requests fail
3000 * like readonly, but behaves like 'clean' on a write request.
3002 * clean - no pending writes, but otherwise active.
3003 * When written to inactive array, starts without resync
3004 * If a write request arrives then
3005 * if metadata is known, mark 'dirty' and switch to 'active'.
3006 * if not known, block and switch to write-pending
3007 * If written to an active array that has pending writes, then fails.
3009 * fully active: IO and resync can be happening.
3010 * When written to inactive array, starts with resync
3013 * clean, but writes are blocked waiting for 'active' to be written.
3016 * like active, but no writes have been seen for a while (100msec).
3019 enum array_state
{ clear
, inactive
, suspended
, readonly
, read_auto
, clean
, active
,
3020 write_pending
, active_idle
, bad_word
};
3021 static char *array_states
[] = {
3022 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
3023 "write-pending", "active-idle", NULL
};
3025 static int match_word(const char *word
, char **list
)
3028 for (n
=0; list
[n
]; n
++)
3029 if (cmd_match(word
, list
[n
]))
3035 array_state_show(mddev_t
*mddev
, char *page
)
3037 enum array_state st
= inactive
;
3050 else if (test_bit(MD_CHANGE_CLEAN
, &mddev
->flags
))
3052 else if (mddev
->safemode
)
3058 if (list_empty(&mddev
->disks
) &&
3059 mddev
->raid_disks
== 0 &&
3060 mddev
->dev_sectors
== 0)
3065 return sprintf(page
, "%s\n", array_states
[st
]);
3068 static int do_md_stop(mddev_t
* mddev
, int ro
, int is_open
);
3069 static int do_md_run(mddev_t
* mddev
);
3070 static int restart_array(mddev_t
*mddev
);
3073 array_state_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3076 enum array_state st
= match_word(buf
, array_states
);
3081 /* stopping an active array */
3082 if (atomic_read(&mddev
->openers
) > 0)
3084 err
= do_md_stop(mddev
, 0, 0);
3087 /* stopping an active array */
3089 if (atomic_read(&mddev
->openers
) > 0)
3091 err
= do_md_stop(mddev
, 2, 0);
3093 err
= 0; /* already inactive */
3096 break; /* not supported yet */
3099 err
= do_md_stop(mddev
, 1, 0);
3102 set_disk_ro(mddev
->gendisk
, 1);
3103 err
= do_md_run(mddev
);
3109 err
= do_md_stop(mddev
, 1, 0);
3110 else if (mddev
->ro
== 1)
3111 err
= restart_array(mddev
);
3114 set_disk_ro(mddev
->gendisk
, 0);
3118 err
= do_md_run(mddev
);
3123 restart_array(mddev
);
3124 spin_lock_irq(&mddev
->write_lock
);
3125 if (atomic_read(&mddev
->writes_pending
) == 0) {
3126 if (mddev
->in_sync
== 0) {
3128 if (mddev
->safemode
== 1)
3129 mddev
->safemode
= 0;
3130 if (mddev
->persistent
)
3131 set_bit(MD_CHANGE_CLEAN
,
3137 spin_unlock_irq(&mddev
->write_lock
);
3143 restart_array(mddev
);
3144 if (mddev
->external
)
3145 clear_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
3146 wake_up(&mddev
->sb_wait
);
3150 set_disk_ro(mddev
->gendisk
, 0);
3151 err
= do_md_run(mddev
);
3156 /* these cannot be set */
3162 sysfs_notify_dirent(mddev
->sysfs_state
);
3166 static struct md_sysfs_entry md_array_state
=
3167 __ATTR(array_state
, S_IRUGO
|S_IWUSR
, array_state_show
, array_state_store
);
3170 null_show(mddev_t
*mddev
, char *page
)
3176 new_dev_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3178 /* buf must be %d:%d\n? giving major and minor numbers */
3179 /* The new device is added to the array.
3180 * If the array has a persistent superblock, we read the
3181 * superblock to initialise info and check validity.
3182 * Otherwise, only checking done is that in bind_rdev_to_array,
3183 * which mainly checks size.
3186 int major
= simple_strtoul(buf
, &e
, 10);
3192 if (!*buf
|| *e
!= ':' || !e
[1] || e
[1] == '\n')
3194 minor
= simple_strtoul(e
+1, &e
, 10);
3195 if (*e
&& *e
!= '\n')
3197 dev
= MKDEV(major
, minor
);
3198 if (major
!= MAJOR(dev
) ||
3199 minor
!= MINOR(dev
))
3203 if (mddev
->persistent
) {
3204 rdev
= md_import_device(dev
, mddev
->major_version
,
3205 mddev
->minor_version
);
3206 if (!IS_ERR(rdev
) && !list_empty(&mddev
->disks
)) {
3207 mdk_rdev_t
*rdev0
= list_entry(mddev
->disks
.next
,
3208 mdk_rdev_t
, same_set
);
3209 err
= super_types
[mddev
->major_version
]
3210 .load_super(rdev
, rdev0
, mddev
->minor_version
);
3214 } else if (mddev
->external
)
3215 rdev
= md_import_device(dev
, -2, -1);
3217 rdev
= md_import_device(dev
, -1, -1);
3220 return PTR_ERR(rdev
);
3221 err
= bind_rdev_to_array(rdev
, mddev
);
3225 return err
? err
: len
;
3228 static struct md_sysfs_entry md_new_device
=
3229 __ATTR(new_dev
, S_IWUSR
, null_show
, new_dev_store
);
3232 bitmap_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3235 unsigned long chunk
, end_chunk
;
3239 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
3241 chunk
= end_chunk
= simple_strtoul(buf
, &end
, 0);
3242 if (buf
== end
) break;
3243 if (*end
== '-') { /* range */
3245 end_chunk
= simple_strtoul(buf
, &end
, 0);
3246 if (buf
== end
) break;
3248 if (*end
&& !isspace(*end
)) break;
3249 bitmap_dirty_bits(mddev
->bitmap
, chunk
, end_chunk
);
3251 while (isspace(*buf
)) buf
++;
3253 bitmap_unplug(mddev
->bitmap
); /* flush the bits to disk */
3258 static struct md_sysfs_entry md_bitmap
=
3259 __ATTR(bitmap_set_bits
, S_IWUSR
, null_show
, bitmap_store
);
3262 size_show(mddev_t
*mddev
, char *page
)
3264 return sprintf(page
, "%llu\n",
3265 (unsigned long long)mddev
->dev_sectors
/ 2);
3268 static int update_size(mddev_t
*mddev
, sector_t num_sectors
);
3271 size_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3273 /* If array is inactive, we can reduce the component size, but
3274 * not increase it (except from 0).
3275 * If array is active, we can try an on-line resize
3278 int err
= strict_blocks_to_sectors(buf
, §ors
);
3283 err
= update_size(mddev
, sectors
);
3284 md_update_sb(mddev
, 1);
3286 if (mddev
->dev_sectors
== 0 ||
3287 mddev
->dev_sectors
> sectors
)
3288 mddev
->dev_sectors
= sectors
;
3292 return err
? err
: len
;
3295 static struct md_sysfs_entry md_size
=
3296 __ATTR(component_size
, S_IRUGO
|S_IWUSR
, size_show
, size_store
);
3301 * 'none' for arrays with no metadata (good luck...)
3302 * 'external' for arrays with externally managed metadata,
3303 * or N.M for internally known formats
3306 metadata_show(mddev_t
*mddev
, char *page
)
3308 if (mddev
->persistent
)
3309 return sprintf(page
, "%d.%d\n",
3310 mddev
->major_version
, mddev
->minor_version
);
3311 else if (mddev
->external
)
3312 return sprintf(page
, "external:%s\n", mddev
->metadata_type
);
3314 return sprintf(page
, "none\n");
3318 metadata_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3322 /* Changing the details of 'external' metadata is
3323 * always permitted. Otherwise there must be
3324 * no devices attached to the array.
3326 if (mddev
->external
&& strncmp(buf
, "external:", 9) == 0)
3328 else if (!list_empty(&mddev
->disks
))
3331 if (cmd_match(buf
, "none")) {
3332 mddev
->persistent
= 0;
3333 mddev
->external
= 0;
3334 mddev
->major_version
= 0;
3335 mddev
->minor_version
= 90;
3338 if (strncmp(buf
, "external:", 9) == 0) {
3339 size_t namelen
= len
-9;
3340 if (namelen
>= sizeof(mddev
->metadata_type
))
3341 namelen
= sizeof(mddev
->metadata_type
)-1;
3342 strncpy(mddev
->metadata_type
, buf
+9, namelen
);
3343 mddev
->metadata_type
[namelen
] = 0;
3344 if (namelen
&& mddev
->metadata_type
[namelen
-1] == '\n')
3345 mddev
->metadata_type
[--namelen
] = 0;
3346 mddev
->persistent
= 0;
3347 mddev
->external
= 1;
3348 mddev
->major_version
= 0;
3349 mddev
->minor_version
= 90;
3352 major
= simple_strtoul(buf
, &e
, 10);
3353 if (e
==buf
|| *e
!= '.')
3356 minor
= simple_strtoul(buf
, &e
, 10);
3357 if (e
==buf
|| (*e
&& *e
!= '\n') )
3359 if (major
>= ARRAY_SIZE(super_types
) || super_types
[major
].name
== NULL
)
3361 mddev
->major_version
= major
;
3362 mddev
->minor_version
= minor
;
3363 mddev
->persistent
= 1;
3364 mddev
->external
= 0;
3368 static struct md_sysfs_entry md_metadata
=
3369 __ATTR(metadata_version
, S_IRUGO
|S_IWUSR
, metadata_show
, metadata_store
);
3372 action_show(mddev_t
*mddev
, char *page
)
3374 char *type
= "idle";
3375 if (test_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
))
3377 else if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
) ||
3378 (!mddev
->ro
&& test_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
))) {
3379 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
3381 else if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
3382 if (!test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
))
3384 else if (test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
))
3388 } else if (test_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
))
3391 return sprintf(page
, "%s\n", type
);
3395 action_store(mddev_t
*mddev
, const char *page
, size_t len
)
3397 if (!mddev
->pers
|| !mddev
->pers
->sync_request
)
3400 if (cmd_match(page
, "frozen"))
3401 set_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
);
3403 clear_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
);
3405 if (cmd_match(page
, "idle") || cmd_match(page
, "frozen")) {
3406 if (mddev
->sync_thread
) {
3407 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
3408 md_unregister_thread(mddev
->sync_thread
);
3409 mddev
->sync_thread
= NULL
;
3410 mddev
->recovery
= 0;
3412 } else if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
) ||
3413 test_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
))
3415 else if (cmd_match(page
, "resync"))
3416 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
3417 else if (cmd_match(page
, "recover")) {
3418 set_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
);
3419 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
3420 } else if (cmd_match(page
, "reshape")) {
3422 if (mddev
->pers
->start_reshape
== NULL
)
3424 err
= mddev
->pers
->start_reshape(mddev
);
3427 sysfs_notify(&mddev
->kobj
, NULL
, "degraded");
3429 if (cmd_match(page
, "check"))
3430 set_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
3431 else if (!cmd_match(page
, "repair"))
3433 set_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
);
3434 set_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
3436 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
3437 md_wakeup_thread(mddev
->thread
);
3438 sysfs_notify_dirent(mddev
->sysfs_action
);
3443 mismatch_cnt_show(mddev_t
*mddev
, char *page
)
3445 return sprintf(page
, "%llu\n",
3446 (unsigned long long) mddev
->resync_mismatches
);
3449 static struct md_sysfs_entry md_scan_mode
=
3450 __ATTR(sync_action
, S_IRUGO
|S_IWUSR
, action_show
, action_store
);
3453 static struct md_sysfs_entry md_mismatches
= __ATTR_RO(mismatch_cnt
);
3456 sync_min_show(mddev_t
*mddev
, char *page
)
3458 return sprintf(page
, "%d (%s)\n", speed_min(mddev
),
3459 mddev
->sync_speed_min
? "local": "system");
3463 sync_min_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3467 if (strncmp(buf
, "system", 6)==0) {
3468 mddev
->sync_speed_min
= 0;
3471 min
= simple_strtoul(buf
, &e
, 10);
3472 if (buf
== e
|| (*e
&& *e
!= '\n') || min
<= 0)
3474 mddev
->sync_speed_min
= min
;
3478 static struct md_sysfs_entry md_sync_min
=
3479 __ATTR(sync_speed_min
, S_IRUGO
|S_IWUSR
, sync_min_show
, sync_min_store
);
3482 sync_max_show(mddev_t
*mddev
, char *page
)
3484 return sprintf(page
, "%d (%s)\n", speed_max(mddev
),
3485 mddev
->sync_speed_max
? "local": "system");
3489 sync_max_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3493 if (strncmp(buf
, "system", 6)==0) {
3494 mddev
->sync_speed_max
= 0;
3497 max
= simple_strtoul(buf
, &e
, 10);
3498 if (buf
== e
|| (*e
&& *e
!= '\n') || max
<= 0)
3500 mddev
->sync_speed_max
= max
;
3504 static struct md_sysfs_entry md_sync_max
=
3505 __ATTR(sync_speed_max
, S_IRUGO
|S_IWUSR
, sync_max_show
, sync_max_store
);
3508 degraded_show(mddev_t
*mddev
, char *page
)
3510 return sprintf(page
, "%d\n", mddev
->degraded
);
3512 static struct md_sysfs_entry md_degraded
= __ATTR_RO(degraded
);
3515 sync_force_parallel_show(mddev_t
*mddev
, char *page
)
3517 return sprintf(page
, "%d\n", mddev
->parallel_resync
);
3521 sync_force_parallel_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3525 if (strict_strtol(buf
, 10, &n
))
3528 if (n
!= 0 && n
!= 1)
3531 mddev
->parallel_resync
= n
;
3533 if (mddev
->sync_thread
)
3534 wake_up(&resync_wait
);
3539 /* force parallel resync, even with shared block devices */
3540 static struct md_sysfs_entry md_sync_force_parallel
=
3541 __ATTR(sync_force_parallel
, S_IRUGO
|S_IWUSR
,
3542 sync_force_parallel_show
, sync_force_parallel_store
);
3545 sync_speed_show(mddev_t
*mddev
, char *page
)
3547 unsigned long resync
, dt
, db
;
3548 if (mddev
->curr_resync
== 0)
3549 return sprintf(page
, "none\n");
3550 resync
= mddev
->curr_mark_cnt
- atomic_read(&mddev
->recovery_active
);
3551 dt
= (jiffies
- mddev
->resync_mark
) / HZ
;
3553 db
= resync
- mddev
->resync_mark_cnt
;
3554 return sprintf(page
, "%lu\n", db
/dt
/2); /* K/sec */
3557 static struct md_sysfs_entry md_sync_speed
= __ATTR_RO(sync_speed
);
3560 sync_completed_show(mddev_t
*mddev
, char *page
)
3562 unsigned long max_sectors
, resync
;
3564 if (!test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
))
3565 return sprintf(page
, "none\n");
3567 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
))
3568 max_sectors
= mddev
->resync_max_sectors
;
3570 max_sectors
= mddev
->dev_sectors
;
3572 resync
= mddev
->curr_resync_completed
;
3573 return sprintf(page
, "%lu / %lu\n", resync
, max_sectors
);
3576 static struct md_sysfs_entry md_sync_completed
= __ATTR_RO(sync_completed
);
3579 min_sync_show(mddev_t
*mddev
, char *page
)
3581 return sprintf(page
, "%llu\n",
3582 (unsigned long long)mddev
->resync_min
);
3585 min_sync_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3587 unsigned long long min
;
3588 if (strict_strtoull(buf
, 10, &min
))
3590 if (min
> mddev
->resync_max
)
3592 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
))
3595 /* Must be a multiple of chunk_size */
3596 if (mddev
->chunk_sectors
) {
3597 sector_t temp
= min
;
3598 if (sector_div(temp
, mddev
->chunk_sectors
))
3601 mddev
->resync_min
= min
;
3606 static struct md_sysfs_entry md_min_sync
=
3607 __ATTR(sync_min
, S_IRUGO
|S_IWUSR
, min_sync_show
, min_sync_store
);
3610 max_sync_show(mddev_t
*mddev
, char *page
)
3612 if (mddev
->resync_max
== MaxSector
)
3613 return sprintf(page
, "max\n");
3615 return sprintf(page
, "%llu\n",
3616 (unsigned long long)mddev
->resync_max
);
3619 max_sync_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3621 if (strncmp(buf
, "max", 3) == 0)
3622 mddev
->resync_max
= MaxSector
;
3624 unsigned long long max
;
3625 if (strict_strtoull(buf
, 10, &max
))
3627 if (max
< mddev
->resync_min
)
3629 if (max
< mddev
->resync_max
&&
3631 test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
))
3634 /* Must be a multiple of chunk_size */
3635 if (mddev
->chunk_sectors
) {
3636 sector_t temp
= max
;
3637 if (sector_div(temp
, mddev
->chunk_sectors
))
3640 mddev
->resync_max
= max
;
3642 wake_up(&mddev
->recovery_wait
);
3646 static struct md_sysfs_entry md_max_sync
=
3647 __ATTR(sync_max
, S_IRUGO
|S_IWUSR
, max_sync_show
, max_sync_store
);
3650 suspend_lo_show(mddev_t
*mddev
, char *page
)
3652 return sprintf(page
, "%llu\n", (unsigned long long)mddev
->suspend_lo
);
3656 suspend_lo_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3659 unsigned long long new = simple_strtoull(buf
, &e
, 10);
3661 if (mddev
->pers
== NULL
||
3662 mddev
->pers
->quiesce
== NULL
)
3664 if (buf
== e
|| (*e
&& *e
!= '\n'))
3666 if (new >= mddev
->suspend_hi
||
3667 (new > mddev
->suspend_lo
&& new < mddev
->suspend_hi
)) {
3668 mddev
->suspend_lo
= new;
3669 mddev
->pers
->quiesce(mddev
, 2);
3674 static struct md_sysfs_entry md_suspend_lo
=
3675 __ATTR(suspend_lo
, S_IRUGO
|S_IWUSR
, suspend_lo_show
, suspend_lo_store
);
3679 suspend_hi_show(mddev_t
*mddev
, char *page
)
3681 return sprintf(page
, "%llu\n", (unsigned long long)mddev
->suspend_hi
);
3685 suspend_hi_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3688 unsigned long long new = simple_strtoull(buf
, &e
, 10);
3690 if (mddev
->pers
== NULL
||
3691 mddev
->pers
->quiesce
== NULL
)
3693 if (buf
== e
|| (*e
&& *e
!= '\n'))
3695 if ((new <= mddev
->suspend_lo
&& mddev
->suspend_lo
>= mddev
->suspend_hi
) ||
3696 (new > mddev
->suspend_lo
&& new > mddev
->suspend_hi
)) {
3697 mddev
->suspend_hi
= new;
3698 mddev
->pers
->quiesce(mddev
, 1);
3699 mddev
->pers
->quiesce(mddev
, 0);
3704 static struct md_sysfs_entry md_suspend_hi
=
3705 __ATTR(suspend_hi
, S_IRUGO
|S_IWUSR
, suspend_hi_show
, suspend_hi_store
);
3708 reshape_position_show(mddev_t
*mddev
, char *page
)
3710 if (mddev
->reshape_position
!= MaxSector
)
3711 return sprintf(page
, "%llu\n",
3712 (unsigned long long)mddev
->reshape_position
);
3713 strcpy(page
, "none\n");
3718 reshape_position_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3721 unsigned long long new = simple_strtoull(buf
, &e
, 10);
3724 if (buf
== e
|| (*e
&& *e
!= '\n'))
3726 mddev
->reshape_position
= new;
3727 mddev
->delta_disks
= 0;
3728 mddev
->new_level
= mddev
->level
;
3729 mddev
->new_layout
= mddev
->layout
;
3730 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
3734 static struct md_sysfs_entry md_reshape_position
=
3735 __ATTR(reshape_position
, S_IRUGO
|S_IWUSR
, reshape_position_show
,
3736 reshape_position_store
);
3739 array_size_show(mddev_t
*mddev
, char *page
)
3741 if (mddev
->external_size
)
3742 return sprintf(page
, "%llu\n",
3743 (unsigned long long)mddev
->array_sectors
/2);
3745 return sprintf(page
, "default\n");
3749 array_size_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3753 if (strncmp(buf
, "default", 7) == 0) {
3755 sectors
= mddev
->pers
->size(mddev
, 0, 0);
3757 sectors
= mddev
->array_sectors
;
3759 mddev
->external_size
= 0;
3761 if (strict_blocks_to_sectors(buf
, §ors
) < 0)
3763 if (mddev
->pers
&& mddev
->pers
->size(mddev
, 0, 0) < sectors
)
3766 mddev
->external_size
= 1;
3769 mddev
->array_sectors
= sectors
;
3770 set_capacity(mddev
->gendisk
, mddev
->array_sectors
);
3772 revalidate_disk(mddev
->gendisk
);
3777 static struct md_sysfs_entry md_array_size
=
3778 __ATTR(array_size
, S_IRUGO
|S_IWUSR
, array_size_show
,
3781 static struct attribute
*md_default_attrs
[] = {
3784 &md_raid_disks
.attr
,
3785 &md_chunk_size
.attr
,
3787 &md_resync_start
.attr
,
3789 &md_new_device
.attr
,
3790 &md_safe_delay
.attr
,
3791 &md_array_state
.attr
,
3792 &md_reshape_position
.attr
,
3793 &md_array_size
.attr
,
3797 static struct attribute
*md_redundancy_attrs
[] = {
3799 &md_mismatches
.attr
,
3802 &md_sync_speed
.attr
,
3803 &md_sync_force_parallel
.attr
,
3804 &md_sync_completed
.attr
,
3807 &md_suspend_lo
.attr
,
3808 &md_suspend_hi
.attr
,
3813 static struct attribute_group md_redundancy_group
= {
3815 .attrs
= md_redundancy_attrs
,
3820 md_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
3822 struct md_sysfs_entry
*entry
= container_of(attr
, struct md_sysfs_entry
, attr
);
3823 mddev_t
*mddev
= container_of(kobj
, struct mddev_s
, kobj
);
3828 rv
= mddev_lock(mddev
);
3830 rv
= entry
->show(mddev
, page
);
3831 mddev_unlock(mddev
);
3837 md_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
3838 const char *page
, size_t length
)
3840 struct md_sysfs_entry
*entry
= container_of(attr
, struct md_sysfs_entry
, attr
);
3841 mddev_t
*mddev
= container_of(kobj
, struct mddev_s
, kobj
);
3846 if (!capable(CAP_SYS_ADMIN
))
3848 rv
= mddev_lock(mddev
);
3849 if (mddev
->hold_active
== UNTIL_IOCTL
)
3850 mddev
->hold_active
= 0;
3852 rv
= entry
->store(mddev
, page
, length
);
3853 mddev_unlock(mddev
);
3858 static void md_free(struct kobject
*ko
)
3860 mddev_t
*mddev
= container_of(ko
, mddev_t
, kobj
);
3862 if (mddev
->sysfs_state
)
3863 sysfs_put(mddev
->sysfs_state
);
3865 if (mddev
->gendisk
) {
3866 del_gendisk(mddev
->gendisk
);
3867 put_disk(mddev
->gendisk
);
3870 blk_cleanup_queue(mddev
->queue
);
3875 static struct sysfs_ops md_sysfs_ops
= {
3876 .show
= md_attr_show
,
3877 .store
= md_attr_store
,
3879 static struct kobj_type md_ktype
= {
3881 .sysfs_ops
= &md_sysfs_ops
,
3882 .default_attrs
= md_default_attrs
,
3887 static void mddev_delayed_delete(struct work_struct
*ws
)
3889 mddev_t
*mddev
= container_of(ws
, mddev_t
, del_work
);
3891 if (mddev
->private == &md_redundancy_group
) {
3892 sysfs_remove_group(&mddev
->kobj
, &md_redundancy_group
);
3893 if (mddev
->sysfs_action
)
3894 sysfs_put(mddev
->sysfs_action
);
3895 mddev
->sysfs_action
= NULL
;
3896 mddev
->private = NULL
;
3898 kobject_del(&mddev
->kobj
);
3899 kobject_put(&mddev
->kobj
);
3902 static int md_alloc(dev_t dev
, char *name
)
3904 static DEFINE_MUTEX(disks_mutex
);
3905 mddev_t
*mddev
= mddev_find(dev
);
3906 struct gendisk
*disk
;
3915 partitioned
= (MAJOR(mddev
->unit
) != MD_MAJOR
);
3916 shift
= partitioned
? MdpMinorShift
: 0;
3917 unit
= MINOR(mddev
->unit
) >> shift
;
3919 /* wait for any previous instance if this device
3920 * to be completed removed (mddev_delayed_delete).
3922 flush_scheduled_work();
3924 mutex_lock(&disks_mutex
);
3930 /* Need to ensure that 'name' is not a duplicate.
3933 spin_lock(&all_mddevs_lock
);
3935 list_for_each_entry(mddev2
, &all_mddevs
, all_mddevs
)
3936 if (mddev2
->gendisk
&&
3937 strcmp(mddev2
->gendisk
->disk_name
, name
) == 0) {
3938 spin_unlock(&all_mddevs_lock
);
3941 spin_unlock(&all_mddevs_lock
);
3945 mddev
->queue
= blk_alloc_queue(GFP_KERNEL
);
3948 mddev
->queue
->queuedata
= mddev
;
3950 /* Can be unlocked because the queue is new: no concurrency */
3951 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER
, mddev
->queue
);
3953 blk_queue_make_request(mddev
->queue
, md_make_request
);
3955 disk
= alloc_disk(1 << shift
);
3957 blk_cleanup_queue(mddev
->queue
);
3958 mddev
->queue
= NULL
;
3961 disk
->major
= MAJOR(mddev
->unit
);
3962 disk
->first_minor
= unit
<< shift
;
3964 strcpy(disk
->disk_name
, name
);
3965 else if (partitioned
)
3966 sprintf(disk
->disk_name
, "md_d%d", unit
);
3968 sprintf(disk
->disk_name
, "md%d", unit
);
3969 disk
->fops
= &md_fops
;
3970 disk
->private_data
= mddev
;
3971 disk
->queue
= mddev
->queue
;
3972 /* Allow extended partitions. This makes the
3973 * 'mdp' device redundant, but we can't really
3976 disk
->flags
|= GENHD_FL_EXT_DEVT
;
3978 mddev
->gendisk
= disk
;
3979 error
= kobject_init_and_add(&mddev
->kobj
, &md_ktype
,
3980 &disk_to_dev(disk
)->kobj
, "%s", "md");
3982 /* This isn't possible, but as kobject_init_and_add is marked
3983 * __must_check, we must do something with the result
3985 printk(KERN_WARNING
"md: cannot register %s/md - name in use\n",
3990 mutex_unlock(&disks_mutex
);
3992 kobject_uevent(&mddev
->kobj
, KOBJ_ADD
);
3993 mddev
->sysfs_state
= sysfs_get_dirent(mddev
->kobj
.sd
, "array_state");
3999 static struct kobject
*md_probe(dev_t dev
, int *part
, void *data
)
4001 md_alloc(dev
, NULL
);
4005 static int add_named_array(const char *val
, struct kernel_param
*kp
)
4007 /* val must be "md_*" where * is not all digits.
4008 * We allocate an array with a large free minor number, and
4009 * set the name to val. val must not already be an active name.
4011 int len
= strlen(val
);
4012 char buf
[DISK_NAME_LEN
];
4014 while (len
&& val
[len
-1] == '\n')
4016 if (len
>= DISK_NAME_LEN
)
4018 strlcpy(buf
, val
, len
+1);
4019 if (strncmp(buf
, "md_", 3) != 0)
4021 return md_alloc(0, buf
);
4024 static void md_safemode_timeout(unsigned long data
)
4026 mddev_t
*mddev
= (mddev_t
*) data
;
4028 if (!atomic_read(&mddev
->writes_pending
)) {
4029 mddev
->safemode
= 1;
4030 if (mddev
->external
)
4031 sysfs_notify_dirent(mddev
->sysfs_state
);
4033 md_wakeup_thread(mddev
->thread
);
4036 static int start_dirty_degraded
;
4038 static int do_md_run(mddev_t
* mddev
)
4042 struct gendisk
*disk
;
4043 struct mdk_personality
*pers
;
4045 if (list_empty(&mddev
->disks
))
4046 /* cannot run an array with no devices.. */
4053 * Analyze all RAID superblock(s)
4055 if (!mddev
->raid_disks
) {
4056 if (!mddev
->persistent
)
4061 if (mddev
->level
!= LEVEL_NONE
)
4062 request_module("md-level-%d", mddev
->level
);
4063 else if (mddev
->clevel
[0])
4064 request_module("md-%s", mddev
->clevel
);
4067 * Drop all container device buffers, from now on
4068 * the only valid external interface is through the md
4071 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
4072 if (test_bit(Faulty
, &rdev
->flags
))
4074 sync_blockdev(rdev
->bdev
);
4075 invalidate_bdev(rdev
->bdev
);
4077 /* perform some consistency tests on the device.
4078 * We don't want the data to overlap the metadata,
4079 * Internal Bitmap issues have been handled elsewhere.
4081 if (rdev
->data_offset
< rdev
->sb_start
) {
4082 if (mddev
->dev_sectors
&&
4083 rdev
->data_offset
+ mddev
->dev_sectors
4085 printk("md: %s: data overlaps metadata\n",
4090 if (rdev
->sb_start
+ rdev
->sb_size
/512
4091 > rdev
->data_offset
) {
4092 printk("md: %s: metadata overlaps data\n",
4097 sysfs_notify_dirent(rdev
->sysfs_state
);
4100 md_probe(mddev
->unit
, NULL
, NULL
);
4101 disk
= mddev
->gendisk
;
4105 spin_lock(&pers_lock
);
4106 pers
= find_pers(mddev
->level
, mddev
->clevel
);
4107 if (!pers
|| !try_module_get(pers
->owner
)) {
4108 spin_unlock(&pers_lock
);
4109 if (mddev
->level
!= LEVEL_NONE
)
4110 printk(KERN_WARNING
"md: personality for level %d is not loaded!\n",
4113 printk(KERN_WARNING
"md: personality for level %s is not loaded!\n",
4118 spin_unlock(&pers_lock
);
4119 if (mddev
->level
!= pers
->level
) {
4120 mddev
->level
= pers
->level
;
4121 mddev
->new_level
= pers
->level
;
4123 strlcpy(mddev
->clevel
, pers
->name
, sizeof(mddev
->clevel
));
4125 if (mddev
->reshape_position
!= MaxSector
&&
4126 pers
->start_reshape
== NULL
) {
4127 /* This personality cannot handle reshaping... */
4129 module_put(pers
->owner
);
4133 if (pers
->sync_request
) {
4134 /* Warn if this is a potentially silly
4137 char b
[BDEVNAME_SIZE
], b2
[BDEVNAME_SIZE
];
4141 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
4142 list_for_each_entry(rdev2
, &mddev
->disks
, same_set
) {
4144 rdev
->bdev
->bd_contains
==
4145 rdev2
->bdev
->bd_contains
) {
4147 "%s: WARNING: %s appears to be"
4148 " on the same physical disk as"
4151 bdevname(rdev
->bdev
,b
),
4152 bdevname(rdev2
->bdev
,b2
));
4159 "True protection against single-disk"
4160 " failure might be compromised.\n");
4163 mddev
->recovery
= 0;
4164 /* may be over-ridden by personality */
4165 mddev
->resync_max_sectors
= mddev
->dev_sectors
;
4167 mddev
->barriers_work
= 1;
4168 mddev
->ok_start_degraded
= start_dirty_degraded
;
4171 mddev
->ro
= 2; /* read-only, but switch on first write */
4173 err
= mddev
->pers
->run(mddev
);
4175 printk(KERN_ERR
"md: pers->run() failed ...\n");
4176 else if (mddev
->pers
->size(mddev
, 0, 0) < mddev
->array_sectors
) {
4177 WARN_ONCE(!mddev
->external_size
, "%s: default size too small,"
4178 " but 'external_size' not in effect?\n", __func__
);
4180 "md: invalid array_size %llu > default size %llu\n",
4181 (unsigned long long)mddev
->array_sectors
/ 2,
4182 (unsigned long long)mddev
->pers
->size(mddev
, 0, 0) / 2);
4184 mddev
->pers
->stop(mddev
);
4186 if (err
== 0 && mddev
->pers
->sync_request
) {
4187 err
= bitmap_create(mddev
);
4189 printk(KERN_ERR
"%s: failed to create bitmap (%d)\n",
4190 mdname(mddev
), err
);
4191 mddev
->pers
->stop(mddev
);
4195 module_put(mddev
->pers
->owner
);
4197 bitmap_destroy(mddev
);
4200 if (mddev
->pers
->sync_request
) {
4201 if (sysfs_create_group(&mddev
->kobj
, &md_redundancy_group
))
4203 "md: cannot register extra attributes for %s\n",
4205 mddev
->sysfs_action
= sysfs_get_dirent(mddev
->kobj
.sd
, "sync_action");
4206 } else if (mddev
->ro
== 2) /* auto-readonly not meaningful */
4209 atomic_set(&mddev
->writes_pending
,0);
4210 mddev
->safemode
= 0;
4211 mddev
->safemode_timer
.function
= md_safemode_timeout
;
4212 mddev
->safemode_timer
.data
= (unsigned long) mddev
;
4213 mddev
->safemode_delay
= (200 * HZ
)/1000 +1; /* 200 msec delay */
4216 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
4217 if (rdev
->raid_disk
>= 0) {
4219 sprintf(nm
, "rd%d", rdev
->raid_disk
);
4220 if (sysfs_create_link(&mddev
->kobj
, &rdev
->kobj
, nm
))
4221 printk("md: cannot register %s for %s\n",
4225 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
4228 md_update_sb(mddev
, 0);
4230 set_capacity(disk
, mddev
->array_sectors
);
4232 /* If there is a partially-recovered drive we need to
4233 * start recovery here. If we leave it to md_check_recovery,
4234 * it will remove the drives and not do the right thing
4236 if (mddev
->degraded
&& !mddev
->sync_thread
) {
4238 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
4239 if (rdev
->raid_disk
>= 0 &&
4240 !test_bit(In_sync
, &rdev
->flags
) &&
4241 !test_bit(Faulty
, &rdev
->flags
))
4242 /* complete an interrupted recovery */
4244 if (spares
&& mddev
->pers
->sync_request
) {
4245 mddev
->recovery
= 0;
4246 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
4247 mddev
->sync_thread
= md_register_thread(md_do_sync
,
4250 if (!mddev
->sync_thread
) {
4251 printk(KERN_ERR
"%s: could not start resync"
4254 /* leave the spares where they are, it shouldn't hurt */
4255 mddev
->recovery
= 0;
4259 md_wakeup_thread(mddev
->thread
);
4260 md_wakeup_thread(mddev
->sync_thread
); /* possibly kick off a reshape */
4262 revalidate_disk(mddev
->gendisk
);
4264 md_new_event(mddev
);
4265 sysfs_notify_dirent(mddev
->sysfs_state
);
4266 if (mddev
->sysfs_action
)
4267 sysfs_notify_dirent(mddev
->sysfs_action
);
4268 sysfs_notify(&mddev
->kobj
, NULL
, "degraded");
4269 kobject_uevent(&disk_to_dev(mddev
->gendisk
)->kobj
, KOBJ_CHANGE
);
4273 static int restart_array(mddev_t
*mddev
)
4275 struct gendisk
*disk
= mddev
->gendisk
;
4277 /* Complain if it has no devices */
4278 if (list_empty(&mddev
->disks
))
4284 mddev
->safemode
= 0;
4286 set_disk_ro(disk
, 0);
4287 printk(KERN_INFO
"md: %s switched to read-write mode.\n",
4289 /* Kick recovery or resync if necessary */
4290 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
4291 md_wakeup_thread(mddev
->thread
);
4292 md_wakeup_thread(mddev
->sync_thread
);
4293 sysfs_notify_dirent(mddev
->sysfs_state
);
4297 /* similar to deny_write_access, but accounts for our holding a reference
4298 * to the file ourselves */
4299 static int deny_bitmap_write_access(struct file
* file
)
4301 struct inode
*inode
= file
->f_mapping
->host
;
4303 spin_lock(&inode
->i_lock
);
4304 if (atomic_read(&inode
->i_writecount
) > 1) {
4305 spin_unlock(&inode
->i_lock
);
4308 atomic_set(&inode
->i_writecount
, -1);
4309 spin_unlock(&inode
->i_lock
);
4314 static void restore_bitmap_write_access(struct file
*file
)
4316 struct inode
*inode
= file
->f_mapping
->host
;
4318 spin_lock(&inode
->i_lock
);
4319 atomic_set(&inode
->i_writecount
, 1);
4320 spin_unlock(&inode
->i_lock
);
4324 * 0 - completely stop and dis-assemble array
4325 * 1 - switch to readonly
4326 * 2 - stop but do not disassemble array
4328 static int do_md_stop(mddev_t
* mddev
, int mode
, int is_open
)
4331 struct gendisk
*disk
= mddev
->gendisk
;
4334 mutex_lock(&mddev
->open_mutex
);
4335 if (atomic_read(&mddev
->openers
) > is_open
) {
4336 printk("md: %s still in use.\n",mdname(mddev
));
4338 } else if (mddev
->pers
) {
4340 if (mddev
->sync_thread
) {
4341 set_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
);
4342 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
4343 md_unregister_thread(mddev
->sync_thread
);
4344 mddev
->sync_thread
= NULL
;
4347 del_timer_sync(&mddev
->safemode_timer
);
4350 case 1: /* readonly */
4356 case 0: /* disassemble */
4358 bitmap_flush(mddev
);
4359 md_super_wait(mddev
);
4361 set_disk_ro(disk
, 0);
4363 mddev
->pers
->stop(mddev
);
4364 mddev
->queue
->merge_bvec_fn
= NULL
;
4365 mddev
->queue
->unplug_fn
= NULL
;
4366 mddev
->queue
->backing_dev_info
.congested_fn
= NULL
;
4367 module_put(mddev
->pers
->owner
);
4368 if (mddev
->pers
->sync_request
)
4369 mddev
->private = &md_redundancy_group
;
4371 /* tell userspace to handle 'inactive' */
4372 sysfs_notify_dirent(mddev
->sysfs_state
);
4374 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
4375 if (rdev
->raid_disk
>= 0) {
4377 sprintf(nm
, "rd%d", rdev
->raid_disk
);
4378 sysfs_remove_link(&mddev
->kobj
, nm
);
4381 set_capacity(disk
, 0);
4387 if (!mddev
->in_sync
|| mddev
->flags
) {
4388 /* mark array as shutdown cleanly */
4390 md_update_sb(mddev
, 1);
4393 set_disk_ro(disk
, 1);
4394 clear_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
);
4398 mutex_unlock(&mddev
->open_mutex
);
4402 * Free resources if final stop
4406 printk(KERN_INFO
"md: %s stopped.\n", mdname(mddev
));
4408 bitmap_destroy(mddev
);
4409 if (mddev
->bitmap_file
) {
4410 restore_bitmap_write_access(mddev
->bitmap_file
);
4411 fput(mddev
->bitmap_file
);
4412 mddev
->bitmap_file
= NULL
;
4414 mddev
->bitmap_offset
= 0;
4416 /* make sure all md_delayed_delete calls have finished */
4417 flush_scheduled_work();
4419 export_array(mddev
);
4421 mddev
->array_sectors
= 0;
4422 mddev
->external_size
= 0;
4423 mddev
->dev_sectors
= 0;
4424 mddev
->raid_disks
= 0;
4425 mddev
->recovery_cp
= 0;
4426 mddev
->resync_min
= 0;
4427 mddev
->resync_max
= MaxSector
;
4428 mddev
->reshape_position
= MaxSector
;
4429 mddev
->external
= 0;
4430 mddev
->persistent
= 0;
4431 mddev
->level
= LEVEL_NONE
;
4432 mddev
->clevel
[0] = 0;
4435 mddev
->metadata_type
[0] = 0;
4436 mddev
->chunk_sectors
= 0;
4437 mddev
->ctime
= mddev
->utime
= 0;
4439 mddev
->max_disks
= 0;
4441 mddev
->delta_disks
= 0;
4442 mddev
->new_level
= LEVEL_NONE
;
4443 mddev
->new_layout
= 0;
4444 mddev
->new_chunk_sectors
= 0;
4445 mddev
->curr_resync
= 0;
4446 mddev
->resync_mismatches
= 0;
4447 mddev
->suspend_lo
= mddev
->suspend_hi
= 0;
4448 mddev
->sync_speed_min
= mddev
->sync_speed_max
= 0;
4449 mddev
->recovery
= 0;
4452 mddev
->degraded
= 0;
4453 mddev
->barriers_work
= 0;
4454 mddev
->safemode
= 0;
4455 kobject_uevent(&disk_to_dev(mddev
->gendisk
)->kobj
, KOBJ_CHANGE
);
4456 if (mddev
->hold_active
== UNTIL_STOP
)
4457 mddev
->hold_active
= 0;
4459 } else if (mddev
->pers
)
4460 printk(KERN_INFO
"md: %s switched to read-only mode.\n",
4463 blk_integrity_unregister(disk
);
4464 md_new_event(mddev
);
4465 sysfs_notify_dirent(mddev
->sysfs_state
);
4470 static void autorun_array(mddev_t
*mddev
)
4475 if (list_empty(&mddev
->disks
))
4478 printk(KERN_INFO
"md: running: ");
4480 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
4481 char b
[BDEVNAME_SIZE
];
4482 printk("<%s>", bdevname(rdev
->bdev
,b
));
4486 err
= do_md_run(mddev
);
4488 printk(KERN_WARNING
"md: do_md_run() returned %d\n", err
);
4489 do_md_stop(mddev
, 0, 0);
4494 * lets try to run arrays based on all disks that have arrived
4495 * until now. (those are in pending_raid_disks)
4497 * the method: pick the first pending disk, collect all disks with
4498 * the same UUID, remove all from the pending list and put them into
4499 * the 'same_array' list. Then order this list based on superblock
4500 * update time (freshest comes first), kick out 'old' disks and
4501 * compare superblocks. If everything's fine then run it.
4503 * If "unit" is allocated, then bump its reference count
4505 static void autorun_devices(int part
)
4507 mdk_rdev_t
*rdev0
, *rdev
, *tmp
;
4509 char b
[BDEVNAME_SIZE
];
4511 printk(KERN_INFO
"md: autorun ...\n");
4512 while (!list_empty(&pending_raid_disks
)) {
4515 LIST_HEAD(candidates
);
4516 rdev0
= list_entry(pending_raid_disks
.next
,
4517 mdk_rdev_t
, same_set
);
4519 printk(KERN_INFO
"md: considering %s ...\n",
4520 bdevname(rdev0
->bdev
,b
));
4521 INIT_LIST_HEAD(&candidates
);
4522 rdev_for_each_list(rdev
, tmp
, &pending_raid_disks
)
4523 if (super_90_load(rdev
, rdev0
, 0) >= 0) {
4524 printk(KERN_INFO
"md: adding %s ...\n",
4525 bdevname(rdev
->bdev
,b
));
4526 list_move(&rdev
->same_set
, &candidates
);
4529 * now we have a set of devices, with all of them having
4530 * mostly sane superblocks. It's time to allocate the
4534 dev
= MKDEV(mdp_major
,
4535 rdev0
->preferred_minor
<< MdpMinorShift
);
4536 unit
= MINOR(dev
) >> MdpMinorShift
;
4538 dev
= MKDEV(MD_MAJOR
, rdev0
->preferred_minor
);
4541 if (rdev0
->preferred_minor
!= unit
) {
4542 printk(KERN_INFO
"md: unit number in %s is bad: %d\n",
4543 bdevname(rdev0
->bdev
, b
), rdev0
->preferred_minor
);
4547 md_probe(dev
, NULL
, NULL
);
4548 mddev
= mddev_find(dev
);
4549 if (!mddev
|| !mddev
->gendisk
) {
4553 "md: cannot allocate memory for md drive.\n");
4556 if (mddev_lock(mddev
))
4557 printk(KERN_WARNING
"md: %s locked, cannot run\n",
4559 else if (mddev
->raid_disks
|| mddev
->major_version
4560 || !list_empty(&mddev
->disks
)) {
4562 "md: %s already running, cannot run %s\n",
4563 mdname(mddev
), bdevname(rdev0
->bdev
,b
));
4564 mddev_unlock(mddev
);
4566 printk(KERN_INFO
"md: created %s\n", mdname(mddev
));
4567 mddev
->persistent
= 1;
4568 rdev_for_each_list(rdev
, tmp
, &candidates
) {
4569 list_del_init(&rdev
->same_set
);
4570 if (bind_rdev_to_array(rdev
, mddev
))
4573 autorun_array(mddev
);
4574 mddev_unlock(mddev
);
4576 /* on success, candidates will be empty, on error
4579 rdev_for_each_list(rdev
, tmp
, &candidates
) {
4580 list_del_init(&rdev
->same_set
);
4585 printk(KERN_INFO
"md: ... autorun DONE.\n");
4587 #endif /* !MODULE */
4589 static int get_version(void __user
* arg
)
4593 ver
.major
= MD_MAJOR_VERSION
;
4594 ver
.minor
= MD_MINOR_VERSION
;
4595 ver
.patchlevel
= MD_PATCHLEVEL_VERSION
;
4597 if (copy_to_user(arg
, &ver
, sizeof(ver
)))
4603 static int get_array_info(mddev_t
* mddev
, void __user
* arg
)
4605 mdu_array_info_t info
;
4606 int nr
,working
,insync
,failed
,spare
;
4609 nr
=working
=insync
=failed
=spare
=0;
4610 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
4612 if (test_bit(Faulty
, &rdev
->flags
))
4616 if (test_bit(In_sync
, &rdev
->flags
))
4623 info
.major_version
= mddev
->major_version
;
4624 info
.minor_version
= mddev
->minor_version
;
4625 info
.patch_version
= MD_PATCHLEVEL_VERSION
;
4626 info
.ctime
= mddev
->ctime
;
4627 info
.level
= mddev
->level
;
4628 info
.size
= mddev
->dev_sectors
/ 2;
4629 if (info
.size
!= mddev
->dev_sectors
/ 2) /* overflow */
4632 info
.raid_disks
= mddev
->raid_disks
;
4633 info
.md_minor
= mddev
->md_minor
;
4634 info
.not_persistent
= !mddev
->persistent
;
4636 info
.utime
= mddev
->utime
;
4639 info
.state
= (1<<MD_SB_CLEAN
);
4640 if (mddev
->bitmap
&& mddev
->bitmap_offset
)
4641 info
.state
= (1<<MD_SB_BITMAP_PRESENT
);
4642 info
.active_disks
= insync
;
4643 info
.working_disks
= working
;
4644 info
.failed_disks
= failed
;
4645 info
.spare_disks
= spare
;
4647 info
.layout
= mddev
->layout
;
4648 info
.chunk_size
= mddev
->chunk_sectors
<< 9;
4650 if (copy_to_user(arg
, &info
, sizeof(info
)))
4656 static int get_bitmap_file(mddev_t
* mddev
, void __user
* arg
)
4658 mdu_bitmap_file_t
*file
= NULL
; /* too big for stack allocation */
4659 char *ptr
, *buf
= NULL
;
4662 if (md_allow_write(mddev
))
4663 file
= kmalloc(sizeof(*file
), GFP_NOIO
);
4665 file
= kmalloc(sizeof(*file
), GFP_KERNEL
);
4670 /* bitmap disabled, zero the first byte and copy out */
4671 if (!mddev
->bitmap
|| !mddev
->bitmap
->file
) {
4672 file
->pathname
[0] = '\0';
4676 buf
= kmalloc(sizeof(file
->pathname
), GFP_KERNEL
);
4680 ptr
= d_path(&mddev
->bitmap
->file
->f_path
, buf
, sizeof(file
->pathname
));
4684 strcpy(file
->pathname
, ptr
);
4688 if (copy_to_user(arg
, file
, sizeof(*file
)))
4696 static int get_disk_info(mddev_t
* mddev
, void __user
* arg
)
4698 mdu_disk_info_t info
;
4701 if (copy_from_user(&info
, arg
, sizeof(info
)))
4704 rdev
= find_rdev_nr(mddev
, info
.number
);
4706 info
.major
= MAJOR(rdev
->bdev
->bd_dev
);
4707 info
.minor
= MINOR(rdev
->bdev
->bd_dev
);
4708 info
.raid_disk
= rdev
->raid_disk
;
4710 if (test_bit(Faulty
, &rdev
->flags
))
4711 info
.state
|= (1<<MD_DISK_FAULTY
);
4712 else if (test_bit(In_sync
, &rdev
->flags
)) {
4713 info
.state
|= (1<<MD_DISK_ACTIVE
);
4714 info
.state
|= (1<<MD_DISK_SYNC
);
4716 if (test_bit(WriteMostly
, &rdev
->flags
))
4717 info
.state
|= (1<<MD_DISK_WRITEMOSTLY
);
4719 info
.major
= info
.minor
= 0;
4720 info
.raid_disk
= -1;
4721 info
.state
= (1<<MD_DISK_REMOVED
);
4724 if (copy_to_user(arg
, &info
, sizeof(info
)))
4730 static int add_new_disk(mddev_t
* mddev
, mdu_disk_info_t
*info
)
4732 char b
[BDEVNAME_SIZE
], b2
[BDEVNAME_SIZE
];
4734 dev_t dev
= MKDEV(info
->major
,info
->minor
);
4736 if (info
->major
!= MAJOR(dev
) || info
->minor
!= MINOR(dev
))
4739 if (!mddev
->raid_disks
) {
4741 /* expecting a device which has a superblock */
4742 rdev
= md_import_device(dev
, mddev
->major_version
, mddev
->minor_version
);
4745 "md: md_import_device returned %ld\n",
4747 return PTR_ERR(rdev
);
4749 if (!list_empty(&mddev
->disks
)) {
4750 mdk_rdev_t
*rdev0
= list_entry(mddev
->disks
.next
,
4751 mdk_rdev_t
, same_set
);
4752 err
= super_types
[mddev
->major_version
]
4753 .load_super(rdev
, rdev0
, mddev
->minor_version
);
4756 "md: %s has different UUID to %s\n",
4757 bdevname(rdev
->bdev
,b
),
4758 bdevname(rdev0
->bdev
,b2
));
4763 err
= bind_rdev_to_array(rdev
, mddev
);
4770 * add_new_disk can be used once the array is assembled
4771 * to add "hot spares". They must already have a superblock
4776 if (!mddev
->pers
->hot_add_disk
) {
4778 "%s: personality does not support diskops!\n",
4782 if (mddev
->persistent
)
4783 rdev
= md_import_device(dev
, mddev
->major_version
,
4784 mddev
->minor_version
);
4786 rdev
= md_import_device(dev
, -1, -1);
4789 "md: md_import_device returned %ld\n",
4791 return PTR_ERR(rdev
);
4793 /* set save_raid_disk if appropriate */
4794 if (!mddev
->persistent
) {
4795 if (info
->state
& (1<<MD_DISK_SYNC
) &&
4796 info
->raid_disk
< mddev
->raid_disks
)
4797 rdev
->raid_disk
= info
->raid_disk
;
4799 rdev
->raid_disk
= -1;
4801 super_types
[mddev
->major_version
].
4802 validate_super(mddev
, rdev
);
4803 rdev
->saved_raid_disk
= rdev
->raid_disk
;
4805 clear_bit(In_sync
, &rdev
->flags
); /* just to be sure */
4806 if (info
->state
& (1<<MD_DISK_WRITEMOSTLY
))
4807 set_bit(WriteMostly
, &rdev
->flags
);
4809 clear_bit(WriteMostly
, &rdev
->flags
);
4811 rdev
->raid_disk
= -1;
4812 err
= bind_rdev_to_array(rdev
, mddev
);
4813 if (!err
&& !mddev
->pers
->hot_remove_disk
) {
4814 /* If there is hot_add_disk but no hot_remove_disk
4815 * then added disks for geometry changes,
4816 * and should be added immediately.
4818 super_types
[mddev
->major_version
].
4819 validate_super(mddev
, rdev
);
4820 err
= mddev
->pers
->hot_add_disk(mddev
, rdev
);
4822 unbind_rdev_from_array(rdev
);
4827 sysfs_notify_dirent(rdev
->sysfs_state
);
4829 md_update_sb(mddev
, 1);
4830 if (mddev
->degraded
)
4831 set_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
);
4832 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
4833 md_wakeup_thread(mddev
->thread
);
4837 /* otherwise, add_new_disk is only allowed
4838 * for major_version==0 superblocks
4840 if (mddev
->major_version
!= 0) {
4841 printk(KERN_WARNING
"%s: ADD_NEW_DISK not supported\n",
4846 if (!(info
->state
& (1<<MD_DISK_FAULTY
))) {
4848 rdev
= md_import_device(dev
, -1, 0);
4851 "md: error, md_import_device() returned %ld\n",
4853 return PTR_ERR(rdev
);
4855 rdev
->desc_nr
= info
->number
;
4856 if (info
->raid_disk
< mddev
->raid_disks
)
4857 rdev
->raid_disk
= info
->raid_disk
;
4859 rdev
->raid_disk
= -1;
4861 if (rdev
->raid_disk
< mddev
->raid_disks
)
4862 if (info
->state
& (1<<MD_DISK_SYNC
))
4863 set_bit(In_sync
, &rdev
->flags
);
4865 if (info
->state
& (1<<MD_DISK_WRITEMOSTLY
))
4866 set_bit(WriteMostly
, &rdev
->flags
);
4868 if (!mddev
->persistent
) {
4869 printk(KERN_INFO
"md: nonpersistent superblock ...\n");
4870 rdev
->sb_start
= rdev
->bdev
->bd_inode
->i_size
/ 512;
4872 rdev
->sb_start
= calc_dev_sboffset(rdev
->bdev
);
4873 rdev
->sectors
= rdev
->sb_start
;
4875 err
= bind_rdev_to_array(rdev
, mddev
);
4885 static int hot_remove_disk(mddev_t
* mddev
, dev_t dev
)
4887 char b
[BDEVNAME_SIZE
];
4890 rdev
= find_rdev(mddev
, dev
);
4894 if (rdev
->raid_disk
>= 0)
4897 kick_rdev_from_array(rdev
);
4898 md_update_sb(mddev
, 1);
4899 md_new_event(mddev
);
4903 printk(KERN_WARNING
"md: cannot remove active disk %s from %s ...\n",
4904 bdevname(rdev
->bdev
,b
), mdname(mddev
));
4908 static int hot_add_disk(mddev_t
* mddev
, dev_t dev
)
4910 char b
[BDEVNAME_SIZE
];
4917 if (mddev
->major_version
!= 0) {
4918 printk(KERN_WARNING
"%s: HOT_ADD may only be used with"
4919 " version-0 superblocks.\n",
4923 if (!mddev
->pers
->hot_add_disk
) {
4925 "%s: personality does not support diskops!\n",
4930 rdev
= md_import_device(dev
, -1, 0);
4933 "md: error, md_import_device() returned %ld\n",
4938 if (mddev
->persistent
)
4939 rdev
->sb_start
= calc_dev_sboffset(rdev
->bdev
);
4941 rdev
->sb_start
= rdev
->bdev
->bd_inode
->i_size
/ 512;
4943 rdev
->sectors
= rdev
->sb_start
;
4945 if (test_bit(Faulty
, &rdev
->flags
)) {
4947 "md: can not hot-add faulty %s disk to %s!\n",
4948 bdevname(rdev
->bdev
,b
), mdname(mddev
));
4952 clear_bit(In_sync
, &rdev
->flags
);
4954 rdev
->saved_raid_disk
= -1;
4955 err
= bind_rdev_to_array(rdev
, mddev
);
4960 * The rest should better be atomic, we can have disk failures
4961 * noticed in interrupt contexts ...
4964 rdev
->raid_disk
= -1;
4966 md_update_sb(mddev
, 1);
4969 * Kick recovery, maybe this spare has to be added to the
4970 * array immediately.
4972 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
4973 md_wakeup_thread(mddev
->thread
);
4974 md_new_event(mddev
);
4982 static int set_bitmap_file(mddev_t
*mddev
, int fd
)
4987 if (!mddev
->pers
->quiesce
)
4989 if (mddev
->recovery
|| mddev
->sync_thread
)
4991 /* we should be able to change the bitmap.. */
4997 return -EEXIST
; /* cannot add when bitmap is present */
4998 mddev
->bitmap_file
= fget(fd
);
5000 if (mddev
->bitmap_file
== NULL
) {
5001 printk(KERN_ERR
"%s: error: failed to get bitmap file\n",
5006 err
= deny_bitmap_write_access(mddev
->bitmap_file
);
5008 printk(KERN_ERR
"%s: error: bitmap file is already in use\n",
5010 fput(mddev
->bitmap_file
);
5011 mddev
->bitmap_file
= NULL
;
5014 mddev
->bitmap_offset
= 0; /* file overrides offset */
5015 } else if (mddev
->bitmap
== NULL
)
5016 return -ENOENT
; /* cannot remove what isn't there */
5019 mddev
->pers
->quiesce(mddev
, 1);
5021 err
= bitmap_create(mddev
);
5022 if (fd
< 0 || err
) {
5023 bitmap_destroy(mddev
);
5024 fd
= -1; /* make sure to put the file */
5026 mddev
->pers
->quiesce(mddev
, 0);
5029 if (mddev
->bitmap_file
) {
5030 restore_bitmap_write_access(mddev
->bitmap_file
);
5031 fput(mddev
->bitmap_file
);
5033 mddev
->bitmap_file
= NULL
;
5040 * set_array_info is used two different ways
5041 * The original usage is when creating a new array.
5042 * In this usage, raid_disks is > 0 and it together with
5043 * level, size, not_persistent,layout,chunksize determine the
5044 * shape of the array.
5045 * This will always create an array with a type-0.90.0 superblock.
5046 * The newer usage is when assembling an array.
5047 * In this case raid_disks will be 0, and the major_version field is
5048 * use to determine which style super-blocks are to be found on the devices.
5049 * The minor and patch _version numbers are also kept incase the
5050 * super_block handler wishes to interpret them.
5052 static int set_array_info(mddev_t
* mddev
, mdu_array_info_t
*info
)
5055 if (info
->raid_disks
== 0) {
5056 /* just setting version number for superblock loading */
5057 if (info
->major_version
< 0 ||
5058 info
->major_version
>= ARRAY_SIZE(super_types
) ||
5059 super_types
[info
->major_version
].name
== NULL
) {
5060 /* maybe try to auto-load a module? */
5062 "md: superblock version %d not known\n",
5063 info
->major_version
);
5066 mddev
->major_version
= info
->major_version
;
5067 mddev
->minor_version
= info
->minor_version
;
5068 mddev
->patch_version
= info
->patch_version
;
5069 mddev
->persistent
= !info
->not_persistent
;
5072 mddev
->major_version
= MD_MAJOR_VERSION
;
5073 mddev
->minor_version
= MD_MINOR_VERSION
;
5074 mddev
->patch_version
= MD_PATCHLEVEL_VERSION
;
5075 mddev
->ctime
= get_seconds();
5077 mddev
->level
= info
->level
;
5078 mddev
->clevel
[0] = 0;
5079 mddev
->dev_sectors
= 2 * (sector_t
)info
->size
;
5080 mddev
->raid_disks
= info
->raid_disks
;
5081 /* don't set md_minor, it is determined by which /dev/md* was
5084 if (info
->state
& (1<<MD_SB_CLEAN
))
5085 mddev
->recovery_cp
= MaxSector
;
5087 mddev
->recovery_cp
= 0;
5088 mddev
->persistent
= ! info
->not_persistent
;
5089 mddev
->external
= 0;
5091 mddev
->layout
= info
->layout
;
5092 mddev
->chunk_sectors
= info
->chunk_size
>> 9;
5094 mddev
->max_disks
= MD_SB_DISKS
;
5096 if (mddev
->persistent
)
5098 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
5100 mddev
->default_bitmap_offset
= MD_SB_BYTES
>> 9;
5101 mddev
->bitmap_offset
= 0;
5103 mddev
->reshape_position
= MaxSector
;
5106 * Generate a 128 bit UUID
5108 get_random_bytes(mddev
->uuid
, 16);
5110 mddev
->new_level
= mddev
->level
;
5111 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
5112 mddev
->new_layout
= mddev
->layout
;
5113 mddev
->delta_disks
= 0;
5118 void md_set_array_sectors(mddev_t
*mddev
, sector_t array_sectors
)
5120 WARN(!mddev_is_locked(mddev
), "%s: unlocked mddev!\n", __func__
);
5122 if (mddev
->external_size
)
5125 mddev
->array_sectors
= array_sectors
;
5127 EXPORT_SYMBOL(md_set_array_sectors
);
5129 static int update_size(mddev_t
*mddev
, sector_t num_sectors
)
5133 int fit
= (num_sectors
== 0);
5135 if (mddev
->pers
->resize
== NULL
)
5137 /* The "num_sectors" is the number of sectors of each device that
5138 * is used. This can only make sense for arrays with redundancy.
5139 * linear and raid0 always use whatever space is available. We can only
5140 * consider changing this number if no resync or reconstruction is
5141 * happening, and if the new size is acceptable. It must fit before the
5142 * sb_start or, if that is <data_offset, it must fit before the size
5143 * of each device. If num_sectors is zero, we find the largest size
5147 if (mddev
->sync_thread
)
5150 /* Sorry, cannot grow a bitmap yet, just remove it,
5154 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
5155 sector_t avail
= rdev
->sectors
;
5157 if (fit
&& (num_sectors
== 0 || num_sectors
> avail
))
5158 num_sectors
= avail
;
5159 if (avail
< num_sectors
)
5162 rv
= mddev
->pers
->resize(mddev
, num_sectors
);
5164 revalidate_disk(mddev
->gendisk
);
5168 static int update_raid_disks(mddev_t
*mddev
, int raid_disks
)
5171 /* change the number of raid disks */
5172 if (mddev
->pers
->check_reshape
== NULL
)
5174 if (raid_disks
<= 0 ||
5175 raid_disks
>= mddev
->max_disks
)
5177 if (mddev
->sync_thread
|| mddev
->reshape_position
!= MaxSector
)
5179 mddev
->delta_disks
= raid_disks
- mddev
->raid_disks
;
5181 rv
= mddev
->pers
->check_reshape(mddev
);
5187 * update_array_info is used to change the configuration of an
5189 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
5190 * fields in the info are checked against the array.
5191 * Any differences that cannot be handled will cause an error.
5192 * Normally, only one change can be managed at a time.
5194 static int update_array_info(mddev_t
*mddev
, mdu_array_info_t
*info
)
5200 /* calculate expected state,ignoring low bits */
5201 if (mddev
->bitmap
&& mddev
->bitmap_offset
)
5202 state
|= (1 << MD_SB_BITMAP_PRESENT
);
5204 if (mddev
->major_version
!= info
->major_version
||
5205 mddev
->minor_version
!= info
->minor_version
||
5206 /* mddev->patch_version != info->patch_version || */
5207 mddev
->ctime
!= info
->ctime
||
5208 mddev
->level
!= info
->level
||
5209 /* mddev->layout != info->layout || */
5210 !mddev
->persistent
!= info
->not_persistent
||
5211 mddev
->chunk_sectors
!= info
->chunk_size
>> 9 ||
5212 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
5213 ((state
^info
->state
) & 0xfffffe00)
5216 /* Check there is only one change */
5217 if (info
->size
>= 0 && mddev
->dev_sectors
/ 2 != info
->size
)
5219 if (mddev
->raid_disks
!= info
->raid_disks
)
5221 if (mddev
->layout
!= info
->layout
)
5223 if ((state
^ info
->state
) & (1<<MD_SB_BITMAP_PRESENT
))
5230 if (mddev
->layout
!= info
->layout
) {
5232 * we don't need to do anything at the md level, the
5233 * personality will take care of it all.
5235 if (mddev
->pers
->check_reshape
== NULL
)
5238 mddev
->new_layout
= info
->layout
;
5239 rv
= mddev
->pers
->check_reshape(mddev
);
5241 mddev
->new_layout
= mddev
->layout
;
5245 if (info
->size
>= 0 && mddev
->dev_sectors
/ 2 != info
->size
)
5246 rv
= update_size(mddev
, (sector_t
)info
->size
* 2);
5248 if (mddev
->raid_disks
!= info
->raid_disks
)
5249 rv
= update_raid_disks(mddev
, info
->raid_disks
);
5251 if ((state
^ info
->state
) & (1<<MD_SB_BITMAP_PRESENT
)) {
5252 if (mddev
->pers
->quiesce
== NULL
)
5254 if (mddev
->recovery
|| mddev
->sync_thread
)
5256 if (info
->state
& (1<<MD_SB_BITMAP_PRESENT
)) {
5257 /* add the bitmap */
5260 if (mddev
->default_bitmap_offset
== 0)
5262 mddev
->bitmap_offset
= mddev
->default_bitmap_offset
;
5263 mddev
->pers
->quiesce(mddev
, 1);
5264 rv
= bitmap_create(mddev
);
5266 bitmap_destroy(mddev
);
5267 mddev
->pers
->quiesce(mddev
, 0);
5269 /* remove the bitmap */
5272 if (mddev
->bitmap
->file
)
5274 mddev
->pers
->quiesce(mddev
, 1);
5275 bitmap_destroy(mddev
);
5276 mddev
->pers
->quiesce(mddev
, 0);
5277 mddev
->bitmap_offset
= 0;
5280 md_update_sb(mddev
, 1);
5284 static int set_disk_faulty(mddev_t
*mddev
, dev_t dev
)
5288 if (mddev
->pers
== NULL
)
5291 rdev
= find_rdev(mddev
, dev
);
5295 md_error(mddev
, rdev
);
5300 * We have a problem here : there is no easy way to give a CHS
5301 * virtual geometry. We currently pretend that we have a 2 heads
5302 * 4 sectors (with a BIG number of cylinders...). This drives
5303 * dosfs just mad... ;-)
5305 static int md_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
5307 mddev_t
*mddev
= bdev
->bd_disk
->private_data
;
5311 geo
->cylinders
= get_capacity(mddev
->gendisk
) / 8;
5315 static int md_ioctl(struct block_device
*bdev
, fmode_t mode
,
5316 unsigned int cmd
, unsigned long arg
)
5319 void __user
*argp
= (void __user
*)arg
;
5320 mddev_t
*mddev
= NULL
;
5322 if (!capable(CAP_SYS_ADMIN
))
5326 * Commands dealing with the RAID driver but not any
5332 err
= get_version(argp
);
5335 case PRINT_RAID_DEBUG
:
5343 autostart_arrays(arg
);
5350 * Commands creating/starting a new array:
5353 mddev
= bdev
->bd_disk
->private_data
;
5360 err
= mddev_lock(mddev
);
5363 "md: ioctl lock interrupted, reason %d, cmd %d\n",
5370 case SET_ARRAY_INFO
:
5372 mdu_array_info_t info
;
5374 memset(&info
, 0, sizeof(info
));
5375 else if (copy_from_user(&info
, argp
, sizeof(info
))) {
5380 err
= update_array_info(mddev
, &info
);
5382 printk(KERN_WARNING
"md: couldn't update"
5383 " array info. %d\n", err
);
5388 if (!list_empty(&mddev
->disks
)) {
5390 "md: array %s already has disks!\n",
5395 if (mddev
->raid_disks
) {
5397 "md: array %s already initialised!\n",
5402 err
= set_array_info(mddev
, &info
);
5404 printk(KERN_WARNING
"md: couldn't set"
5405 " array info. %d\n", err
);
5415 * Commands querying/configuring an existing array:
5417 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
5418 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
5419 if ((!mddev
->raid_disks
&& !mddev
->external
)
5420 && cmd
!= ADD_NEW_DISK
&& cmd
!= STOP_ARRAY
5421 && cmd
!= RUN_ARRAY
&& cmd
!= SET_BITMAP_FILE
5422 && cmd
!= GET_BITMAP_FILE
) {
5428 * Commands even a read-only array can execute:
5432 case GET_ARRAY_INFO
:
5433 err
= get_array_info(mddev
, argp
);
5436 case GET_BITMAP_FILE
:
5437 err
= get_bitmap_file(mddev
, argp
);
5441 err
= get_disk_info(mddev
, argp
);
5444 case RESTART_ARRAY_RW
:
5445 err
= restart_array(mddev
);
5449 err
= do_md_stop(mddev
, 0, 1);
5453 err
= do_md_stop(mddev
, 1, 1);
5459 * The remaining ioctls are changing the state of the
5460 * superblock, so we do not allow them on read-only arrays.
5461 * However non-MD ioctls (e.g. get-size) will still come through
5462 * here and hit the 'default' below, so only disallow
5463 * 'md' ioctls, and switch to rw mode if started auto-readonly.
5465 if (_IOC_TYPE(cmd
) == MD_MAJOR
&& mddev
->ro
&& mddev
->pers
) {
5466 if (mddev
->ro
== 2) {
5468 sysfs_notify_dirent(mddev
->sysfs_state
);
5469 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
5470 md_wakeup_thread(mddev
->thread
);
5481 mdu_disk_info_t info
;
5482 if (copy_from_user(&info
, argp
, sizeof(info
)))
5485 err
= add_new_disk(mddev
, &info
);
5489 case HOT_REMOVE_DISK
:
5490 err
= hot_remove_disk(mddev
, new_decode_dev(arg
));
5494 err
= hot_add_disk(mddev
, new_decode_dev(arg
));
5497 case SET_DISK_FAULTY
:
5498 err
= set_disk_faulty(mddev
, new_decode_dev(arg
));
5502 err
= do_md_run(mddev
);
5505 case SET_BITMAP_FILE
:
5506 err
= set_bitmap_file(mddev
, (int)arg
);
5516 if (mddev
->hold_active
== UNTIL_IOCTL
&&
5518 mddev
->hold_active
= 0;
5519 mddev_unlock(mddev
);
5529 static int md_open(struct block_device
*bdev
, fmode_t mode
)
5532 * Succeed if we can lock the mddev, which confirms that
5533 * it isn't being stopped right now.
5535 mddev_t
*mddev
= mddev_find(bdev
->bd_dev
);
5538 if (mddev
->gendisk
!= bdev
->bd_disk
) {
5539 /* we are racing with mddev_put which is discarding this
5543 /* Wait until bdev->bd_disk is definitely gone */
5544 flush_scheduled_work();
5545 /* Then retry the open from the top */
5546 return -ERESTARTSYS
;
5548 BUG_ON(mddev
!= bdev
->bd_disk
->private_data
);
5550 if ((err
= mutex_lock_interruptible(&mddev
->open_mutex
)))
5554 atomic_inc(&mddev
->openers
);
5555 mutex_unlock(&mddev
->open_mutex
);
5557 check_disk_change(bdev
);
5562 static int md_release(struct gendisk
*disk
, fmode_t mode
)
5564 mddev_t
*mddev
= disk
->private_data
;
5567 atomic_dec(&mddev
->openers
);
5573 static int md_media_changed(struct gendisk
*disk
)
5575 mddev_t
*mddev
= disk
->private_data
;
5577 return mddev
->changed
;
5580 static int md_revalidate(struct gendisk
*disk
)
5582 mddev_t
*mddev
= disk
->private_data
;
5587 static const struct block_device_operations md_fops
=
5589 .owner
= THIS_MODULE
,
5591 .release
= md_release
,
5593 .getgeo
= md_getgeo
,
5594 .media_changed
= md_media_changed
,
5595 .revalidate_disk
= md_revalidate
,
5598 static int md_thread(void * arg
)
5600 mdk_thread_t
*thread
= arg
;
5603 * md_thread is a 'system-thread', it's priority should be very
5604 * high. We avoid resource deadlocks individually in each
5605 * raid personality. (RAID5 does preallocation) We also use RR and
5606 * the very same RT priority as kswapd, thus we will never get
5607 * into a priority inversion deadlock.
5609 * we definitely have to have equal or higher priority than
5610 * bdflush, otherwise bdflush will deadlock if there are too
5611 * many dirty RAID5 blocks.
5614 allow_signal(SIGKILL
);
5615 while (!kthread_should_stop()) {
5617 /* We need to wait INTERRUPTIBLE so that
5618 * we don't add to the load-average.
5619 * That means we need to be sure no signals are
5622 if (signal_pending(current
))
5623 flush_signals(current
);
5625 wait_event_interruptible_timeout
5627 test_bit(THREAD_WAKEUP
, &thread
->flags
)
5628 || kthread_should_stop(),
5631 clear_bit(THREAD_WAKEUP
, &thread
->flags
);
5633 thread
->run(thread
->mddev
);
5639 void md_wakeup_thread(mdk_thread_t
*thread
)
5642 dprintk("md: waking up MD thread %s.\n", thread
->tsk
->comm
);
5643 set_bit(THREAD_WAKEUP
, &thread
->flags
);
5644 wake_up(&thread
->wqueue
);
5648 mdk_thread_t
*md_register_thread(void (*run
) (mddev_t
*), mddev_t
*mddev
,
5651 mdk_thread_t
*thread
;
5653 thread
= kzalloc(sizeof(mdk_thread_t
), GFP_KERNEL
);
5657 init_waitqueue_head(&thread
->wqueue
);
5660 thread
->mddev
= mddev
;
5661 thread
->timeout
= MAX_SCHEDULE_TIMEOUT
;
5662 thread
->tsk
= kthread_run(md_thread
, thread
,
5664 mdname(thread
->mddev
),
5665 name
?: mddev
->pers
->name
);
5666 if (IS_ERR(thread
->tsk
)) {
5673 void md_unregister_thread(mdk_thread_t
*thread
)
5677 dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread
->tsk
));
5679 kthread_stop(thread
->tsk
);
5683 void md_error(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
5690 if (!rdev
|| test_bit(Faulty
, &rdev
->flags
))
5693 if (mddev
->external
)
5694 set_bit(Blocked
, &rdev
->flags
);
5696 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
5698 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
5699 __builtin_return_address(0),__builtin_return_address(1),
5700 __builtin_return_address(2),__builtin_return_address(3));
5704 if (!mddev
->pers
->error_handler
)
5706 mddev
->pers
->error_handler(mddev
,rdev
);
5707 if (mddev
->degraded
)
5708 set_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
);
5709 set_bit(StateChanged
, &rdev
->flags
);
5710 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
5711 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
5712 md_wakeup_thread(mddev
->thread
);
5713 md_new_event_inintr(mddev
);
5716 /* seq_file implementation /proc/mdstat */
5718 static void status_unused(struct seq_file
*seq
)
5723 seq_printf(seq
, "unused devices: ");
5725 list_for_each_entry(rdev
, &pending_raid_disks
, same_set
) {
5726 char b
[BDEVNAME_SIZE
];
5728 seq_printf(seq
, "%s ",
5729 bdevname(rdev
->bdev
,b
));
5732 seq_printf(seq
, "<none>");
5734 seq_printf(seq
, "\n");
5738 static void status_resync(struct seq_file
*seq
, mddev_t
* mddev
)
5740 sector_t max_sectors
, resync
, res
;
5741 unsigned long dt
, db
;
5744 unsigned int per_milli
;
5746 resync
= mddev
->curr_resync
- atomic_read(&mddev
->recovery_active
);
5748 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
))
5749 max_sectors
= mddev
->resync_max_sectors
;
5751 max_sectors
= mddev
->dev_sectors
;
5754 * Should not happen.
5760 /* Pick 'scale' such that (resync>>scale)*1000 will fit
5761 * in a sector_t, and (max_sectors>>scale) will fit in a
5762 * u32, as those are the requirements for sector_div.
5763 * Thus 'scale' must be at least 10
5766 if (sizeof(sector_t
) > sizeof(unsigned long)) {
5767 while ( max_sectors
/2 > (1ULL<<(scale
+32)))
5770 res
= (resync
>>scale
)*1000;
5771 sector_div(res
, (u32
)((max_sectors
>>scale
)+1));
5775 int i
, x
= per_milli
/50, y
= 20-x
;
5776 seq_printf(seq
, "[");
5777 for (i
= 0; i
< x
; i
++)
5778 seq_printf(seq
, "=");
5779 seq_printf(seq
, ">");
5780 for (i
= 0; i
< y
; i
++)
5781 seq_printf(seq
, ".");
5782 seq_printf(seq
, "] ");
5784 seq_printf(seq
, " %s =%3u.%u%% (%llu/%llu)",
5785 (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
)?
5787 (test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
)?
5789 (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
) ?
5790 "resync" : "recovery"))),
5791 per_milli
/10, per_milli
% 10,
5792 (unsigned long long) resync
/2,
5793 (unsigned long long) max_sectors
/2);
5796 * dt: time from mark until now
5797 * db: blocks written from mark until now
5798 * rt: remaining time
5800 * rt is a sector_t, so could be 32bit or 64bit.
5801 * So we divide before multiply in case it is 32bit and close
5803 * We scale the divisor (db) by 32 to avoid loosing precision
5804 * near the end of resync when the number of remaining sectors
5806 * We then divide rt by 32 after multiplying by db to compensate.
5807 * The '+1' avoids division by zero if db is very small.
5809 dt
= ((jiffies
- mddev
->resync_mark
) / HZ
);
5811 db
= (mddev
->curr_mark_cnt
- atomic_read(&mddev
->recovery_active
))
5812 - mddev
->resync_mark_cnt
;
5814 rt
= max_sectors
- resync
; /* number of remaining sectors */
5815 sector_div(rt
, db
/32+1);
5819 seq_printf(seq
, " finish=%lu.%lumin", (unsigned long)rt
/ 60,
5820 ((unsigned long)rt
% 60)/6);
5822 seq_printf(seq
, " speed=%ldK/sec", db
/2/dt
);
5825 static void *md_seq_start(struct seq_file
*seq
, loff_t
*pos
)
5827 struct list_head
*tmp
;
5837 spin_lock(&all_mddevs_lock
);
5838 list_for_each(tmp
,&all_mddevs
)
5840 mddev
= list_entry(tmp
, mddev_t
, all_mddevs
);
5842 spin_unlock(&all_mddevs_lock
);
5845 spin_unlock(&all_mddevs_lock
);
5847 return (void*)2;/* tail */
5851 static void *md_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
5853 struct list_head
*tmp
;
5854 mddev_t
*next_mddev
, *mddev
= v
;
5860 spin_lock(&all_mddevs_lock
);
5862 tmp
= all_mddevs
.next
;
5864 tmp
= mddev
->all_mddevs
.next
;
5865 if (tmp
!= &all_mddevs
)
5866 next_mddev
= mddev_get(list_entry(tmp
,mddev_t
,all_mddevs
));
5868 next_mddev
= (void*)2;
5871 spin_unlock(&all_mddevs_lock
);
5879 static void md_seq_stop(struct seq_file
*seq
, void *v
)
5883 if (mddev
&& v
!= (void*)1 && v
!= (void*)2)
5887 struct mdstat_info
{
5891 static int md_seq_show(struct seq_file
*seq
, void *v
)
5896 struct mdstat_info
*mi
= seq
->private;
5897 struct bitmap
*bitmap
;
5899 if (v
== (void*)1) {
5900 struct mdk_personality
*pers
;
5901 seq_printf(seq
, "Personalities : ");
5902 spin_lock(&pers_lock
);
5903 list_for_each_entry(pers
, &pers_list
, list
)
5904 seq_printf(seq
, "[%s] ", pers
->name
);
5906 spin_unlock(&pers_lock
);
5907 seq_printf(seq
, "\n");
5908 mi
->event
= atomic_read(&md_event_count
);
5911 if (v
== (void*)2) {
5916 if (mddev_lock(mddev
) < 0)
5919 if (mddev
->pers
|| mddev
->raid_disks
|| !list_empty(&mddev
->disks
)) {
5920 seq_printf(seq
, "%s : %sactive", mdname(mddev
),
5921 mddev
->pers
? "" : "in");
5924 seq_printf(seq
, " (read-only)");
5926 seq_printf(seq
, " (auto-read-only)");
5927 seq_printf(seq
, " %s", mddev
->pers
->name
);
5931 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
5932 char b
[BDEVNAME_SIZE
];
5933 seq_printf(seq
, " %s[%d]",
5934 bdevname(rdev
->bdev
,b
), rdev
->desc_nr
);
5935 if (test_bit(WriteMostly
, &rdev
->flags
))
5936 seq_printf(seq
, "(W)");
5937 if (test_bit(Faulty
, &rdev
->flags
)) {
5938 seq_printf(seq
, "(F)");
5940 } else if (rdev
->raid_disk
< 0)
5941 seq_printf(seq
, "(S)"); /* spare */
5942 sectors
+= rdev
->sectors
;
5945 if (!list_empty(&mddev
->disks
)) {
5947 seq_printf(seq
, "\n %llu blocks",
5948 (unsigned long long)
5949 mddev
->array_sectors
/ 2);
5951 seq_printf(seq
, "\n %llu blocks",
5952 (unsigned long long)sectors
/ 2);
5954 if (mddev
->persistent
) {
5955 if (mddev
->major_version
!= 0 ||
5956 mddev
->minor_version
!= 90) {
5957 seq_printf(seq
," super %d.%d",
5958 mddev
->major_version
,
5959 mddev
->minor_version
);
5961 } else if (mddev
->external
)
5962 seq_printf(seq
, " super external:%s",
5963 mddev
->metadata_type
);
5965 seq_printf(seq
, " super non-persistent");
5968 mddev
->pers
->status(seq
, mddev
);
5969 seq_printf(seq
, "\n ");
5970 if (mddev
->pers
->sync_request
) {
5971 if (mddev
->curr_resync
> 2) {
5972 status_resync(seq
, mddev
);
5973 seq_printf(seq
, "\n ");
5974 } else if (mddev
->curr_resync
== 1 || mddev
->curr_resync
== 2)
5975 seq_printf(seq
, "\tresync=DELAYED\n ");
5976 else if (mddev
->recovery_cp
< MaxSector
)
5977 seq_printf(seq
, "\tresync=PENDING\n ");
5980 seq_printf(seq
, "\n ");
5982 if ((bitmap
= mddev
->bitmap
)) {
5983 unsigned long chunk_kb
;
5984 unsigned long flags
;
5985 spin_lock_irqsave(&bitmap
->lock
, flags
);
5986 chunk_kb
= bitmap
->chunksize
>> 10;
5987 seq_printf(seq
, "bitmap: %lu/%lu pages [%luKB], "
5989 bitmap
->pages
- bitmap
->missing_pages
,
5991 (bitmap
->pages
- bitmap
->missing_pages
)
5992 << (PAGE_SHIFT
- 10),
5993 chunk_kb
? chunk_kb
: bitmap
->chunksize
,
5994 chunk_kb
? "KB" : "B");
5996 seq_printf(seq
, ", file: ");
5997 seq_path(seq
, &bitmap
->file
->f_path
, " \t\n");
6000 seq_printf(seq
, "\n");
6001 spin_unlock_irqrestore(&bitmap
->lock
, flags
);
6004 seq_printf(seq
, "\n");
6006 mddev_unlock(mddev
);
6011 static const struct seq_operations md_seq_ops
= {
6012 .start
= md_seq_start
,
6013 .next
= md_seq_next
,
6014 .stop
= md_seq_stop
,
6015 .show
= md_seq_show
,
6018 static int md_seq_open(struct inode
*inode
, struct file
*file
)
6021 struct mdstat_info
*mi
= kmalloc(sizeof(*mi
), GFP_KERNEL
);
6025 error
= seq_open(file
, &md_seq_ops
);
6029 struct seq_file
*p
= file
->private_data
;
6031 mi
->event
= atomic_read(&md_event_count
);
6036 static unsigned int mdstat_poll(struct file
*filp
, poll_table
*wait
)
6038 struct seq_file
*m
= filp
->private_data
;
6039 struct mdstat_info
*mi
= m
->private;
6042 poll_wait(filp
, &md_event_waiters
, wait
);
6044 /* always allow read */
6045 mask
= POLLIN
| POLLRDNORM
;
6047 if (mi
->event
!= atomic_read(&md_event_count
))
6048 mask
|= POLLERR
| POLLPRI
;
6052 static const struct file_operations md_seq_fops
= {
6053 .owner
= THIS_MODULE
,
6054 .open
= md_seq_open
,
6056 .llseek
= seq_lseek
,
6057 .release
= seq_release_private
,
6058 .poll
= mdstat_poll
,
6061 int register_md_personality(struct mdk_personality
*p
)
6063 spin_lock(&pers_lock
);
6064 list_add_tail(&p
->list
, &pers_list
);
6065 printk(KERN_INFO
"md: %s personality registered for level %d\n", p
->name
, p
->level
);
6066 spin_unlock(&pers_lock
);
6070 int unregister_md_personality(struct mdk_personality
*p
)
6072 printk(KERN_INFO
"md: %s personality unregistered\n", p
->name
);
6073 spin_lock(&pers_lock
);
6074 list_del_init(&p
->list
);
6075 spin_unlock(&pers_lock
);
6079 static int is_mddev_idle(mddev_t
*mddev
, int init
)
6087 rdev_for_each_rcu(rdev
, mddev
) {
6088 struct gendisk
*disk
= rdev
->bdev
->bd_contains
->bd_disk
;
6089 curr_events
= (int)part_stat_read(&disk
->part0
, sectors
[0]) +
6090 (int)part_stat_read(&disk
->part0
, sectors
[1]) -
6091 atomic_read(&disk
->sync_io
);
6092 /* sync IO will cause sync_io to increase before the disk_stats
6093 * as sync_io is counted when a request starts, and
6094 * disk_stats is counted when it completes.
6095 * So resync activity will cause curr_events to be smaller than
6096 * when there was no such activity.
6097 * non-sync IO will cause disk_stat to increase without
6098 * increasing sync_io so curr_events will (eventually)
6099 * be larger than it was before. Once it becomes
6100 * substantially larger, the test below will cause
6101 * the array to appear non-idle, and resync will slow
6103 * If there is a lot of outstanding resync activity when
6104 * we set last_event to curr_events, then all that activity
6105 * completing might cause the array to appear non-idle
6106 * and resync will be slowed down even though there might
6107 * not have been non-resync activity. This will only
6108 * happen once though. 'last_events' will soon reflect
6109 * the state where there is little or no outstanding
6110 * resync requests, and further resync activity will
6111 * always make curr_events less than last_events.
6114 if (init
|| curr_events
- rdev
->last_events
> 64) {
6115 rdev
->last_events
= curr_events
;
6123 void md_done_sync(mddev_t
*mddev
, int blocks
, int ok
)
6125 /* another "blocks" (512byte) blocks have been synced */
6126 atomic_sub(blocks
, &mddev
->recovery_active
);
6127 wake_up(&mddev
->recovery_wait
);
6129 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
6130 md_wakeup_thread(mddev
->thread
);
6131 // stop recovery, signal do_sync ....
6136 /* md_write_start(mddev, bi)
6137 * If we need to update some array metadata (e.g. 'active' flag
6138 * in superblock) before writing, schedule a superblock update
6139 * and wait for it to complete.
6141 void md_write_start(mddev_t
*mddev
, struct bio
*bi
)
6144 if (bio_data_dir(bi
) != WRITE
)
6147 BUG_ON(mddev
->ro
== 1);
6148 if (mddev
->ro
== 2) {
6149 /* need to switch to read/write */
6151 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
6152 md_wakeup_thread(mddev
->thread
);
6153 md_wakeup_thread(mddev
->sync_thread
);
6156 atomic_inc(&mddev
->writes_pending
);
6157 if (mddev
->safemode
== 1)
6158 mddev
->safemode
= 0;
6159 if (mddev
->in_sync
) {
6160 spin_lock_irq(&mddev
->write_lock
);
6161 if (mddev
->in_sync
) {
6163 set_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
6164 md_wakeup_thread(mddev
->thread
);
6167 spin_unlock_irq(&mddev
->write_lock
);
6170 sysfs_notify_dirent(mddev
->sysfs_state
);
6171 wait_event(mddev
->sb_wait
,
6172 !test_bit(MD_CHANGE_CLEAN
, &mddev
->flags
) &&
6173 !test_bit(MD_CHANGE_PENDING
, &mddev
->flags
));
6176 void md_write_end(mddev_t
*mddev
)
6178 if (atomic_dec_and_test(&mddev
->writes_pending
)) {
6179 if (mddev
->safemode
== 2)
6180 md_wakeup_thread(mddev
->thread
);
6181 else if (mddev
->safemode_delay
)
6182 mod_timer(&mddev
->safemode_timer
, jiffies
+ mddev
->safemode_delay
);
6186 /* md_allow_write(mddev)
6187 * Calling this ensures that the array is marked 'active' so that writes
6188 * may proceed without blocking. It is important to call this before
6189 * attempting a GFP_KERNEL allocation while holding the mddev lock.
6190 * Must be called with mddev_lock held.
6192 * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
6193 * is dropped, so return -EAGAIN after notifying userspace.
6195 int md_allow_write(mddev_t
*mddev
)
6201 if (!mddev
->pers
->sync_request
)
6204 spin_lock_irq(&mddev
->write_lock
);
6205 if (mddev
->in_sync
) {
6207 set_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
6208 if (mddev
->safemode_delay
&&
6209 mddev
->safemode
== 0)
6210 mddev
->safemode
= 1;
6211 spin_unlock_irq(&mddev
->write_lock
);
6212 md_update_sb(mddev
, 0);
6213 sysfs_notify_dirent(mddev
->sysfs_state
);
6215 spin_unlock_irq(&mddev
->write_lock
);
6217 if (test_bit(MD_CHANGE_CLEAN
, &mddev
->flags
))
6222 EXPORT_SYMBOL_GPL(md_allow_write
);
6224 #define SYNC_MARKS 10
6225 #define SYNC_MARK_STEP (3*HZ)
6226 void md_do_sync(mddev_t
*mddev
)
6229 unsigned int currspeed
= 0,
6231 sector_t max_sectors
,j
, io_sectors
;
6232 unsigned long mark
[SYNC_MARKS
];
6233 sector_t mark_cnt
[SYNC_MARKS
];
6235 struct list_head
*tmp
;
6236 sector_t last_check
;
6241 /* just incase thread restarts... */
6242 if (test_bit(MD_RECOVERY_DONE
, &mddev
->recovery
))
6244 if (mddev
->ro
) /* never try to sync a read-only array */
6247 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
6248 if (test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
))
6249 desc
= "data-check";
6250 else if (test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
))
6251 desc
= "requested-resync";
6254 } else if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
6259 /* we overload curr_resync somewhat here.
6260 * 0 == not engaged in resync at all
6261 * 2 == checking that there is no conflict with another sync
6262 * 1 == like 2, but have yielded to allow conflicting resync to
6264 * other == active in resync - this many blocks
6266 * Before starting a resync we must have set curr_resync to
6267 * 2, and then checked that every "conflicting" array has curr_resync
6268 * less than ours. When we find one that is the same or higher
6269 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
6270 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
6271 * This will mean we have to start checking from the beginning again.
6276 mddev
->curr_resync
= 2;
6279 if (kthread_should_stop()) {
6280 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
6283 for_each_mddev(mddev2
, tmp
) {
6284 if (mddev2
== mddev
)
6286 if (!mddev
->parallel_resync
6287 && mddev2
->curr_resync
6288 && match_mddev_units(mddev
, mddev2
)) {
6290 if (mddev
< mddev2
&& mddev
->curr_resync
== 2) {
6291 /* arbitrarily yield */
6292 mddev
->curr_resync
= 1;
6293 wake_up(&resync_wait
);
6295 if (mddev
> mddev2
&& mddev
->curr_resync
== 1)
6296 /* no need to wait here, we can wait the next
6297 * time 'round when curr_resync == 2
6300 /* We need to wait 'interruptible' so as not to
6301 * contribute to the load average, and not to
6302 * be caught by 'softlockup'
6304 prepare_to_wait(&resync_wait
, &wq
, TASK_INTERRUPTIBLE
);
6305 if (!kthread_should_stop() &&
6306 mddev2
->curr_resync
>= mddev
->curr_resync
) {
6307 printk(KERN_INFO
"md: delaying %s of %s"
6308 " until %s has finished (they"
6309 " share one or more physical units)\n",
6310 desc
, mdname(mddev
), mdname(mddev2
));
6312 if (signal_pending(current
))
6313 flush_signals(current
);
6315 finish_wait(&resync_wait
, &wq
);
6318 finish_wait(&resync_wait
, &wq
);
6321 } while (mddev
->curr_resync
< 2);
6324 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
6325 /* resync follows the size requested by the personality,
6326 * which defaults to physical size, but can be virtual size
6328 max_sectors
= mddev
->resync_max_sectors
;
6329 mddev
->resync_mismatches
= 0;
6330 /* we don't use the checkpoint if there's a bitmap */
6331 if (test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
))
6332 j
= mddev
->resync_min
;
6333 else if (!mddev
->bitmap
)
6334 j
= mddev
->recovery_cp
;
6336 } else if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
6337 max_sectors
= mddev
->dev_sectors
;
6339 /* recovery follows the physical size of devices */
6340 max_sectors
= mddev
->dev_sectors
;
6342 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
6343 if (rdev
->raid_disk
>= 0 &&
6344 !test_bit(Faulty
, &rdev
->flags
) &&
6345 !test_bit(In_sync
, &rdev
->flags
) &&
6346 rdev
->recovery_offset
< j
)
6347 j
= rdev
->recovery_offset
;
6350 printk(KERN_INFO
"md: %s of RAID array %s\n", desc
, mdname(mddev
));
6351 printk(KERN_INFO
"md: minimum _guaranteed_ speed:"
6352 " %d KB/sec/disk.\n", speed_min(mddev
));
6353 printk(KERN_INFO
"md: using maximum available idle IO bandwidth "
6354 "(but not more than %d KB/sec) for %s.\n",
6355 speed_max(mddev
), desc
);
6357 is_mddev_idle(mddev
, 1); /* this initializes IO event counters */
6360 for (m
= 0; m
< SYNC_MARKS
; m
++) {
6362 mark_cnt
[m
] = io_sectors
;
6365 mddev
->resync_mark
= mark
[last_mark
];
6366 mddev
->resync_mark_cnt
= mark_cnt
[last_mark
];
6369 * Tune reconstruction:
6371 window
= 32*(PAGE_SIZE
/512);
6372 printk(KERN_INFO
"md: using %dk window, over a total of %llu blocks.\n",
6373 window
/2,(unsigned long long) max_sectors
/2);
6375 atomic_set(&mddev
->recovery_active
, 0);
6380 "md: resuming %s of %s from checkpoint.\n",
6381 desc
, mdname(mddev
));
6382 mddev
->curr_resync
= j
;
6385 while (j
< max_sectors
) {
6390 if (!test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
) &&
6391 ((mddev
->curr_resync
> mddev
->curr_resync_completed
&&
6392 (mddev
->curr_resync
- mddev
->curr_resync_completed
)
6393 > (max_sectors
>> 4)) ||
6394 (j
- mddev
->curr_resync_completed
)*2
6395 >= mddev
->resync_max
- mddev
->curr_resync_completed
6397 /* time to update curr_resync_completed */
6398 blk_unplug(mddev
->queue
);
6399 wait_event(mddev
->recovery_wait
,
6400 atomic_read(&mddev
->recovery_active
) == 0);
6401 mddev
->curr_resync_completed
=
6403 set_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
6404 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
6407 while (j
>= mddev
->resync_max
&& !kthread_should_stop()) {
6408 /* As this condition is controlled by user-space,
6409 * we can block indefinitely, so use '_interruptible'
6410 * to avoid triggering warnings.
6412 flush_signals(current
); /* just in case */
6413 wait_event_interruptible(mddev
->recovery_wait
,
6414 mddev
->resync_max
> j
6415 || kthread_should_stop());
6418 if (kthread_should_stop())
6421 sectors
= mddev
->pers
->sync_request(mddev
, j
, &skipped
,
6422 currspeed
< speed_min(mddev
));
6424 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
6428 if (!skipped
) { /* actual IO requested */
6429 io_sectors
+= sectors
;
6430 atomic_add(sectors
, &mddev
->recovery_active
);
6434 if (j
>1) mddev
->curr_resync
= j
;
6435 mddev
->curr_mark_cnt
= io_sectors
;
6436 if (last_check
== 0)
6437 /* this is the earliers that rebuilt will be
6438 * visible in /proc/mdstat
6440 md_new_event(mddev
);
6442 if (last_check
+ window
> io_sectors
|| j
== max_sectors
)
6445 last_check
= io_sectors
;
6447 if (test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
))
6451 if (time_after_eq(jiffies
, mark
[last_mark
] + SYNC_MARK_STEP
)) {
6453 int next
= (last_mark
+1) % SYNC_MARKS
;
6455 mddev
->resync_mark
= mark
[next
];
6456 mddev
->resync_mark_cnt
= mark_cnt
[next
];
6457 mark
[next
] = jiffies
;
6458 mark_cnt
[next
] = io_sectors
- atomic_read(&mddev
->recovery_active
);
6463 if (kthread_should_stop())
6468 * this loop exits only if either when we are slower than
6469 * the 'hard' speed limit, or the system was IO-idle for
6471 * the system might be non-idle CPU-wise, but we only care
6472 * about not overloading the IO subsystem. (things like an
6473 * e2fsck being done on the RAID array should execute fast)
6475 blk_unplug(mddev
->queue
);
6478 currspeed
= ((unsigned long)(io_sectors
-mddev
->resync_mark_cnt
))/2
6479 /((jiffies
-mddev
->resync_mark
)/HZ
+1) +1;
6481 if (currspeed
> speed_min(mddev
)) {
6482 if ((currspeed
> speed_max(mddev
)) ||
6483 !is_mddev_idle(mddev
, 0)) {
6489 printk(KERN_INFO
"md: %s: %s done.\n",mdname(mddev
), desc
);
6491 * this also signals 'finished resyncing' to md_stop
6494 blk_unplug(mddev
->queue
);
6496 wait_event(mddev
->recovery_wait
, !atomic_read(&mddev
->recovery_active
));
6498 /* tell personality that we are finished */
6499 mddev
->pers
->sync_request(mddev
, max_sectors
, &skipped
, 1);
6501 if (!test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
) &&
6502 mddev
->curr_resync
> 2) {
6503 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
6504 if (test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
)) {
6505 if (mddev
->curr_resync
>= mddev
->recovery_cp
) {
6507 "md: checkpointing %s of %s.\n",
6508 desc
, mdname(mddev
));
6509 mddev
->recovery_cp
= mddev
->curr_resync
;
6512 mddev
->recovery_cp
= MaxSector
;
6514 if (!test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
))
6515 mddev
->curr_resync
= MaxSector
;
6516 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
6517 if (rdev
->raid_disk
>= 0 &&
6518 !test_bit(Faulty
, &rdev
->flags
) &&
6519 !test_bit(In_sync
, &rdev
->flags
) &&
6520 rdev
->recovery_offset
< mddev
->curr_resync
)
6521 rdev
->recovery_offset
= mddev
->curr_resync
;
6524 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
6527 if (!test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
)) {
6528 /* We completed so min/max setting can be forgotten if used. */
6529 if (test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
))
6530 mddev
->resync_min
= 0;
6531 mddev
->resync_max
= MaxSector
;
6532 } else if (test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
))
6533 mddev
->resync_min
= mddev
->curr_resync_completed
;
6534 mddev
->curr_resync
= 0;
6535 mddev
->curr_resync_completed
= 0;
6536 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
6537 wake_up(&resync_wait
);
6538 set_bit(MD_RECOVERY_DONE
, &mddev
->recovery
);
6539 md_wakeup_thread(mddev
->thread
);
6544 * got a signal, exit.
6547 "md: md_do_sync() got signal ... exiting\n");
6548 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
6552 EXPORT_SYMBOL_GPL(md_do_sync
);
6555 static int remove_and_add_spares(mddev_t
*mddev
)
6560 mddev
->curr_resync_completed
= 0;
6562 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
6563 if (rdev
->raid_disk
>= 0 &&
6564 !test_bit(Blocked
, &rdev
->flags
) &&
6565 (test_bit(Faulty
, &rdev
->flags
) ||
6566 ! test_bit(In_sync
, &rdev
->flags
)) &&
6567 atomic_read(&rdev
->nr_pending
)==0) {
6568 if (mddev
->pers
->hot_remove_disk(
6569 mddev
, rdev
->raid_disk
)==0) {
6571 sprintf(nm
,"rd%d", rdev
->raid_disk
);
6572 sysfs_remove_link(&mddev
->kobj
, nm
);
6573 rdev
->raid_disk
= -1;
6577 if (mddev
->degraded
&& ! mddev
->ro
&& !mddev
->recovery_disabled
) {
6578 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
6579 if (rdev
->raid_disk
>= 0 &&
6580 !test_bit(In_sync
, &rdev
->flags
) &&
6581 !test_bit(Blocked
, &rdev
->flags
))
6583 if (rdev
->raid_disk
< 0
6584 && !test_bit(Faulty
, &rdev
->flags
)) {
6585 rdev
->recovery_offset
= 0;
6587 hot_add_disk(mddev
, rdev
) == 0) {
6589 sprintf(nm
, "rd%d", rdev
->raid_disk
);
6590 if (sysfs_create_link(&mddev
->kobj
,
6593 "md: cannot register "
6597 md_new_event(mddev
);
6606 * This routine is regularly called by all per-raid-array threads to
6607 * deal with generic issues like resync and super-block update.
6608 * Raid personalities that don't have a thread (linear/raid0) do not
6609 * need this as they never do any recovery or update the superblock.
6611 * It does not do any resync itself, but rather "forks" off other threads
6612 * to do that as needed.
6613 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
6614 * "->recovery" and create a thread at ->sync_thread.
6615 * When the thread finishes it sets MD_RECOVERY_DONE
6616 * and wakeups up this thread which will reap the thread and finish up.
6617 * This thread also removes any faulty devices (with nr_pending == 0).
6619 * The overall approach is:
6620 * 1/ if the superblock needs updating, update it.
6621 * 2/ If a recovery thread is running, don't do anything else.
6622 * 3/ If recovery has finished, clean up, possibly marking spares active.
6623 * 4/ If there are any faulty devices, remove them.
6624 * 5/ If array is degraded, try to add spares devices
6625 * 6/ If array has spares or is not in-sync, start a resync thread.
6627 void md_check_recovery(mddev_t
*mddev
)
6633 bitmap_daemon_work(mddev
);
6638 if (signal_pending(current
)) {
6639 if (mddev
->pers
->sync_request
&& !mddev
->external
) {
6640 printk(KERN_INFO
"md: %s in immediate safe mode\n",
6642 mddev
->safemode
= 2;
6644 flush_signals(current
);
6647 if (mddev
->ro
&& !test_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
))
6650 (mddev
->flags
&& !mddev
->external
) ||
6651 test_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
) ||
6652 test_bit(MD_RECOVERY_DONE
, &mddev
->recovery
) ||
6653 (mddev
->external
== 0 && mddev
->safemode
== 1) ||
6654 (mddev
->safemode
== 2 && ! atomic_read(&mddev
->writes_pending
)
6655 && !mddev
->in_sync
&& mddev
->recovery_cp
== MaxSector
)
6659 if (mddev_trylock(mddev
)) {
6663 /* Only thing we do on a ro array is remove
6666 remove_and_add_spares(mddev
);
6667 clear_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
6671 if (!mddev
->external
) {
6673 spin_lock_irq(&mddev
->write_lock
);
6674 if (mddev
->safemode
&&
6675 !atomic_read(&mddev
->writes_pending
) &&
6677 mddev
->recovery_cp
== MaxSector
) {
6680 if (mddev
->persistent
)
6681 set_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
6683 if (mddev
->safemode
== 1)
6684 mddev
->safemode
= 0;
6685 spin_unlock_irq(&mddev
->write_lock
);
6687 sysfs_notify_dirent(mddev
->sysfs_state
);
6691 md_update_sb(mddev
, 0);
6693 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
6694 if (test_and_clear_bit(StateChanged
, &rdev
->flags
))
6695 sysfs_notify_dirent(rdev
->sysfs_state
);
6698 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
) &&
6699 !test_bit(MD_RECOVERY_DONE
, &mddev
->recovery
)) {
6700 /* resync/recovery still happening */
6701 clear_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
6704 if (mddev
->sync_thread
) {
6705 /* resync has finished, collect result */
6706 md_unregister_thread(mddev
->sync_thread
);
6707 mddev
->sync_thread
= NULL
;
6708 if (!test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
) &&
6709 !test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
)) {
6711 /* activate any spares */
6712 if (mddev
->pers
->spare_active(mddev
))
6713 sysfs_notify(&mddev
->kobj
, NULL
,
6716 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
) &&
6717 mddev
->pers
->finish_reshape
)
6718 mddev
->pers
->finish_reshape(mddev
);
6719 md_update_sb(mddev
, 1);
6721 /* if array is no-longer degraded, then any saved_raid_disk
6722 * information must be scrapped
6724 if (!mddev
->degraded
)
6725 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
6726 rdev
->saved_raid_disk
= -1;
6728 mddev
->recovery
= 0;
6729 /* flag recovery needed just to double check */
6730 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
6731 sysfs_notify_dirent(mddev
->sysfs_action
);
6732 md_new_event(mddev
);
6735 /* Set RUNNING before clearing NEEDED to avoid
6736 * any transients in the value of "sync_action".
6738 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
6739 clear_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
6740 /* Clear some bits that don't mean anything, but
6743 clear_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
6744 clear_bit(MD_RECOVERY_DONE
, &mddev
->recovery
);
6746 if (test_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
))
6748 /* no recovery is running.
6749 * remove any failed drives, then
6750 * add spares if possible.
6751 * Spare are also removed and re-added, to allow
6752 * the personality to fail the re-add.
6755 if (mddev
->reshape_position
!= MaxSector
) {
6756 if (mddev
->pers
->check_reshape
== NULL
||
6757 mddev
->pers
->check_reshape(mddev
) != 0)
6758 /* Cannot proceed */
6760 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
6761 clear_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
);
6762 } else if ((spares
= remove_and_add_spares(mddev
))) {
6763 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
6764 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
6765 clear_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
);
6766 set_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
);
6767 } else if (mddev
->recovery_cp
< MaxSector
) {
6768 set_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
6769 clear_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
);
6770 } else if (!test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
))
6771 /* nothing to be done ... */
6774 if (mddev
->pers
->sync_request
) {
6775 if (spares
&& mddev
->bitmap
&& ! mddev
->bitmap
->file
) {
6776 /* We are adding a device or devices to an array
6777 * which has the bitmap stored on all devices.
6778 * So make sure all bitmap pages get written
6780 bitmap_write_all(mddev
->bitmap
);
6782 mddev
->sync_thread
= md_register_thread(md_do_sync
,
6785 if (!mddev
->sync_thread
) {
6786 printk(KERN_ERR
"%s: could not start resync"
6789 /* leave the spares where they are, it shouldn't hurt */
6790 mddev
->recovery
= 0;
6792 md_wakeup_thread(mddev
->sync_thread
);
6793 sysfs_notify_dirent(mddev
->sysfs_action
);
6794 md_new_event(mddev
);
6797 if (!mddev
->sync_thread
) {
6798 clear_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
6799 if (test_and_clear_bit(MD_RECOVERY_RECOVER
,
6801 if (mddev
->sysfs_action
)
6802 sysfs_notify_dirent(mddev
->sysfs_action
);
6804 mddev_unlock(mddev
);
6808 void md_wait_for_blocked_rdev(mdk_rdev_t
*rdev
, mddev_t
*mddev
)
6810 sysfs_notify_dirent(rdev
->sysfs_state
);
6811 wait_event_timeout(rdev
->blocked_wait
,
6812 !test_bit(Blocked
, &rdev
->flags
),
6813 msecs_to_jiffies(5000));
6814 rdev_dec_pending(rdev
, mddev
);
6816 EXPORT_SYMBOL(md_wait_for_blocked_rdev
);
6818 static int md_notify_reboot(struct notifier_block
*this,
6819 unsigned long code
, void *x
)
6821 struct list_head
*tmp
;
6824 if ((code
== SYS_DOWN
) || (code
== SYS_HALT
) || (code
== SYS_POWER_OFF
)) {
6826 printk(KERN_INFO
"md: stopping all md devices.\n");
6828 for_each_mddev(mddev
, tmp
)
6829 if (mddev_trylock(mddev
)) {
6830 /* Force a switch to readonly even array
6831 * appears to still be in use. Hence
6834 do_md_stop(mddev
, 1, 100);
6835 mddev_unlock(mddev
);
6838 * certain more exotic SCSI devices are known to be
6839 * volatile wrt too early system reboots. While the
6840 * right place to handle this issue is the given
6841 * driver, we do want to have a safe RAID driver ...
6848 static struct notifier_block md_notifier
= {
6849 .notifier_call
= md_notify_reboot
,
6851 .priority
= INT_MAX
, /* before any real devices */
6854 static void md_geninit(void)
6856 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t
));
6858 proc_create("mdstat", S_IRUGO
, NULL
, &md_seq_fops
);
6861 static int __init
md_init(void)
6863 if (register_blkdev(MD_MAJOR
, "md"))
6865 if ((mdp_major
=register_blkdev(0, "mdp"))<=0) {
6866 unregister_blkdev(MD_MAJOR
, "md");
6869 blk_register_region(MKDEV(MD_MAJOR
, 0), 1UL<<MINORBITS
, THIS_MODULE
,
6870 md_probe
, NULL
, NULL
);
6871 blk_register_region(MKDEV(mdp_major
, 0), 1UL<<MINORBITS
, THIS_MODULE
,
6872 md_probe
, NULL
, NULL
);
6874 register_reboot_notifier(&md_notifier
);
6875 raid_table_header
= register_sysctl_table(raid_root_table
);
6885 * Searches all registered partitions for autorun RAID arrays
6889 static LIST_HEAD(all_detected_devices
);
6890 struct detected_devices_node
{
6891 struct list_head list
;
6895 void md_autodetect_dev(dev_t dev
)
6897 struct detected_devices_node
*node_detected_dev
;
6899 node_detected_dev
= kzalloc(sizeof(*node_detected_dev
), GFP_KERNEL
);
6900 if (node_detected_dev
) {
6901 node_detected_dev
->dev
= dev
;
6902 list_add_tail(&node_detected_dev
->list
, &all_detected_devices
);
6904 printk(KERN_CRIT
"md: md_autodetect_dev: kzalloc failed"
6905 ", skipping dev(%d,%d)\n", MAJOR(dev
), MINOR(dev
));
6910 static void autostart_arrays(int part
)
6913 struct detected_devices_node
*node_detected_dev
;
6915 int i_scanned
, i_passed
;
6920 printk(KERN_INFO
"md: Autodetecting RAID arrays.\n");
6922 while (!list_empty(&all_detected_devices
) && i_scanned
< INT_MAX
) {
6924 node_detected_dev
= list_entry(all_detected_devices
.next
,
6925 struct detected_devices_node
, list
);
6926 list_del(&node_detected_dev
->list
);
6927 dev
= node_detected_dev
->dev
;
6928 kfree(node_detected_dev
);
6929 rdev
= md_import_device(dev
,0, 90);
6933 if (test_bit(Faulty
, &rdev
->flags
)) {
6937 set_bit(AutoDetected
, &rdev
->flags
);
6938 list_add(&rdev
->same_set
, &pending_raid_disks
);
6942 printk(KERN_INFO
"md: Scanned %d and added %d devices.\n",
6943 i_scanned
, i_passed
);
6945 autorun_devices(part
);
6948 #endif /* !MODULE */
6950 static __exit
void md_exit(void)
6953 struct list_head
*tmp
;
6955 blk_unregister_region(MKDEV(MD_MAJOR
,0), 1U << MINORBITS
);
6956 blk_unregister_region(MKDEV(mdp_major
,0), 1U << MINORBITS
);
6958 unregister_blkdev(MD_MAJOR
,"md");
6959 unregister_blkdev(mdp_major
, "mdp");
6960 unregister_reboot_notifier(&md_notifier
);
6961 unregister_sysctl_table(raid_table_header
);
6962 remove_proc_entry("mdstat", NULL
);
6963 for_each_mddev(mddev
, tmp
) {
6964 export_array(mddev
);
6965 mddev
->hold_active
= 0;
6969 subsys_initcall(md_init
);
6970 module_exit(md_exit
)
6972 static int get_ro(char *buffer
, struct kernel_param
*kp
)
6974 return sprintf(buffer
, "%d", start_readonly
);
6976 static int set_ro(const char *val
, struct kernel_param
*kp
)
6979 int num
= simple_strtoul(val
, &e
, 10);
6980 if (*val
&& (*e
== '\0' || *e
== '\n')) {
6981 start_readonly
= num
;
6987 module_param_call(start_ro
, set_ro
, get_ro
, NULL
, S_IRUSR
|S_IWUSR
);
6988 module_param(start_dirty_degraded
, int, S_IRUGO
|S_IWUSR
);
6990 module_param_call(new_array
, add_named_array
, NULL
, NULL
, S_IWUSR
);
6992 EXPORT_SYMBOL(register_md_personality
);
6993 EXPORT_SYMBOL(unregister_md_personality
);
6994 EXPORT_SYMBOL(md_error
);
6995 EXPORT_SYMBOL(md_done_sync
);
6996 EXPORT_SYMBOL(md_write_start
);
6997 EXPORT_SYMBOL(md_write_end
);
6998 EXPORT_SYMBOL(md_register_thread
);
6999 EXPORT_SYMBOL(md_unregister_thread
);
7000 EXPORT_SYMBOL(md_wakeup_thread
);
7001 EXPORT_SYMBOL(md_check_recovery
);
7002 MODULE_LICENSE("GPL");
7004 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR
);