2 md.c : Multiple Devices driver for Linux
3 Copyright (C) 1998, 1999, 2000 Ingo Molnar
5 completely rewritten, based on the MD driver code from Marc Zyngier
9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13 - kmod support by: Cyrus Durgin
14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17 - lots of fixes and improvements to the RAID1/RAID5 and generic
18 RAID code (such as request based resynchronization):
20 Neil Brown <neilb@cse.unsw.edu.au>.
22 - persistent bitmap code
23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25 This program is free software; you can redistribute it and/or modify
26 it under the terms of the GNU General Public License as published by
27 the Free Software Foundation; either version 2, or (at your option)
30 You should have received a copy of the GNU General Public License
31 (for example /usr/src/linux/COPYING); if not, write to the Free
32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 #include <linux/kthread.h>
36 #include <linux/blkdev.h>
37 #include <linux/sysctl.h>
38 #include <linux/seq_file.h>
39 #include <linux/buffer_head.h> /* for invalidate_bdev */
40 #include <linux/poll.h>
41 #include <linux/ctype.h>
42 #include <linux/hdreg.h>
43 #include <linux/proc_fs.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
46 #include <linux/file.h>
47 #include <linux/delay.h>
48 #include <linux/raid/md_p.h>
49 #include <linux/raid/md_u.h>
54 #define dprintk(x...) ((void)(DEBUG && printk(x)))
58 static void autostart_arrays(int part
);
61 static LIST_HEAD(pers_list
);
62 static DEFINE_SPINLOCK(pers_lock
);
64 static void md_print_devices(void);
66 static DECLARE_WAIT_QUEUE_HEAD(resync_wait
);
68 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
71 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
72 * is 1000 KB/sec, so the extra system load does not show up that much.
73 * Increase it if you want to have more _guaranteed_ speed. Note that
74 * the RAID driver will use the maximum available bandwidth if the IO
75 * subsystem is idle. There is also an 'absolute maximum' reconstruction
76 * speed limit - in case reconstruction slows down your system despite
79 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
80 * or /sys/block/mdX/md/sync_speed_{min,max}
83 static int sysctl_speed_limit_min
= 1000;
84 static int sysctl_speed_limit_max
= 200000;
85 static inline int speed_min(mddev_t
*mddev
)
87 return mddev
->sync_speed_min
?
88 mddev
->sync_speed_min
: sysctl_speed_limit_min
;
91 static inline int speed_max(mddev_t
*mddev
)
93 return mddev
->sync_speed_max
?
94 mddev
->sync_speed_max
: sysctl_speed_limit_max
;
97 static struct ctl_table_header
*raid_table_header
;
99 static ctl_table raid_table
[] = {
101 .ctl_name
= DEV_RAID_SPEED_LIMIT_MIN
,
102 .procname
= "speed_limit_min",
103 .data
= &sysctl_speed_limit_min
,
104 .maxlen
= sizeof(int),
105 .mode
= S_IRUGO
|S_IWUSR
,
106 .proc_handler
= &proc_dointvec
,
109 .ctl_name
= DEV_RAID_SPEED_LIMIT_MAX
,
110 .procname
= "speed_limit_max",
111 .data
= &sysctl_speed_limit_max
,
112 .maxlen
= sizeof(int),
113 .mode
= S_IRUGO
|S_IWUSR
,
114 .proc_handler
= &proc_dointvec
,
119 static ctl_table raid_dir_table
[] = {
121 .ctl_name
= DEV_RAID
,
124 .mode
= S_IRUGO
|S_IXUGO
,
130 static ctl_table raid_root_table
[] = {
136 .child
= raid_dir_table
,
141 static struct block_device_operations md_fops
;
143 static int start_readonly
;
146 * We have a system wide 'event count' that is incremented
147 * on any 'interesting' event, and readers of /proc/mdstat
148 * can use 'poll' or 'select' to find out when the event
152 * start array, stop array, error, add device, remove device,
153 * start build, activate spare
155 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters
);
156 static atomic_t md_event_count
;
157 void md_new_event(mddev_t
*mddev
)
159 atomic_inc(&md_event_count
);
160 wake_up(&md_event_waiters
);
162 EXPORT_SYMBOL_GPL(md_new_event
);
164 /* Alternate version that can be called from interrupts
165 * when calling sysfs_notify isn't needed.
167 static void md_new_event_inintr(mddev_t
*mddev
)
169 atomic_inc(&md_event_count
);
170 wake_up(&md_event_waiters
);
174 * Enables to iterate over all existing md arrays
175 * all_mddevs_lock protects this list.
177 static LIST_HEAD(all_mddevs
);
178 static DEFINE_SPINLOCK(all_mddevs_lock
);
182 * iterates through all used mddevs in the system.
183 * We take care to grab the all_mddevs_lock whenever navigating
184 * the list, and to always hold a refcount when unlocked.
185 * Any code which breaks out of this loop while own
186 * a reference to the current mddev and must mddev_put it.
188 #define for_each_mddev(mddev,tmp) \
190 for (({ spin_lock(&all_mddevs_lock); \
191 tmp = all_mddevs.next; \
193 ({ if (tmp != &all_mddevs) \
194 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
195 spin_unlock(&all_mddevs_lock); \
196 if (mddev) mddev_put(mddev); \
197 mddev = list_entry(tmp, mddev_t, all_mddevs); \
198 tmp != &all_mddevs;}); \
199 ({ spin_lock(&all_mddevs_lock); \
204 /* Rather than calling directly into the personality make_request function,
205 * IO requests come here first so that we can check if the device is
206 * being suspended pending a reconfiguration.
207 * We hold a refcount over the call to ->make_request. By the time that
208 * call has finished, the bio has been linked into some internal structure
209 * and so is visible to ->quiesce(), so we don't need the refcount any more.
211 static int md_make_request(struct request_queue
*q
, struct bio
*bio
)
213 mddev_t
*mddev
= q
->queuedata
;
215 if (mddev
== NULL
|| mddev
->pers
== NULL
) {
220 if (mddev
->suspended
) {
223 prepare_to_wait(&mddev
->sb_wait
, &__wait
,
224 TASK_UNINTERRUPTIBLE
);
225 if (!mddev
->suspended
)
231 finish_wait(&mddev
->sb_wait
, &__wait
);
233 atomic_inc(&mddev
->active_io
);
235 rv
= mddev
->pers
->make_request(q
, bio
);
236 if (atomic_dec_and_test(&mddev
->active_io
) && mddev
->suspended
)
237 wake_up(&mddev
->sb_wait
);
242 static void mddev_suspend(mddev_t
*mddev
)
244 BUG_ON(mddev
->suspended
);
245 mddev
->suspended
= 1;
247 wait_event(mddev
->sb_wait
, atomic_read(&mddev
->active_io
) == 0);
248 mddev
->pers
->quiesce(mddev
, 1);
249 md_unregister_thread(mddev
->thread
);
250 mddev
->thread
= NULL
;
251 /* we now know that no code is executing in the personality module,
252 * except possibly the tail end of a ->bi_end_io function, but that
253 * is certain to complete before the module has a chance to get
258 static void mddev_resume(mddev_t
*mddev
)
260 mddev
->suspended
= 0;
261 wake_up(&mddev
->sb_wait
);
262 mddev
->pers
->quiesce(mddev
, 0);
266 static inline mddev_t
*mddev_get(mddev_t
*mddev
)
268 atomic_inc(&mddev
->active
);
272 static void mddev_delayed_delete(struct work_struct
*ws
);
274 static void mddev_put(mddev_t
*mddev
)
276 if (!atomic_dec_and_lock(&mddev
->active
, &all_mddevs_lock
))
278 if (!mddev
->raid_disks
&& list_empty(&mddev
->disks
) &&
279 !mddev
->hold_active
) {
280 list_del(&mddev
->all_mddevs
);
281 if (mddev
->gendisk
) {
282 /* we did a probe so need to clean up.
283 * Call schedule_work inside the spinlock
284 * so that flush_scheduled_work() after
285 * mddev_find will succeed in waiting for the
288 INIT_WORK(&mddev
->del_work
, mddev_delayed_delete
);
289 schedule_work(&mddev
->del_work
);
293 spin_unlock(&all_mddevs_lock
);
296 static mddev_t
* mddev_find(dev_t unit
)
298 mddev_t
*mddev
, *new = NULL
;
301 spin_lock(&all_mddevs_lock
);
304 list_for_each_entry(mddev
, &all_mddevs
, all_mddevs
)
305 if (mddev
->unit
== unit
) {
307 spin_unlock(&all_mddevs_lock
);
313 list_add(&new->all_mddevs
, &all_mddevs
);
314 spin_unlock(&all_mddevs_lock
);
315 new->hold_active
= UNTIL_IOCTL
;
319 /* find an unused unit number */
320 static int next_minor
= 512;
321 int start
= next_minor
;
325 dev
= MKDEV(MD_MAJOR
, next_minor
);
327 if (next_minor
> MINORMASK
)
329 if (next_minor
== start
) {
330 /* Oh dear, all in use. */
331 spin_unlock(&all_mddevs_lock
);
337 list_for_each_entry(mddev
, &all_mddevs
, all_mddevs
)
338 if (mddev
->unit
== dev
) {
344 new->md_minor
= MINOR(dev
);
345 new->hold_active
= UNTIL_STOP
;
346 list_add(&new->all_mddevs
, &all_mddevs
);
347 spin_unlock(&all_mddevs_lock
);
350 spin_unlock(&all_mddevs_lock
);
352 new = kzalloc(sizeof(*new), GFP_KERNEL
);
357 if (MAJOR(unit
) == MD_MAJOR
)
358 new->md_minor
= MINOR(unit
);
360 new->md_minor
= MINOR(unit
) >> MdpMinorShift
;
362 mutex_init(&new->reconfig_mutex
);
363 INIT_LIST_HEAD(&new->disks
);
364 INIT_LIST_HEAD(&new->all_mddevs
);
365 init_timer(&new->safemode_timer
);
366 atomic_set(&new->active
, 1);
367 atomic_set(&new->openers
, 0);
368 atomic_set(&new->active_io
, 0);
369 spin_lock_init(&new->write_lock
);
370 init_waitqueue_head(&new->sb_wait
);
371 init_waitqueue_head(&new->recovery_wait
);
372 new->reshape_position
= MaxSector
;
374 new->resync_max
= MaxSector
;
375 new->level
= LEVEL_NONE
;
380 static inline int mddev_lock(mddev_t
* mddev
)
382 return mutex_lock_interruptible(&mddev
->reconfig_mutex
);
385 static inline int mddev_is_locked(mddev_t
*mddev
)
387 return mutex_is_locked(&mddev
->reconfig_mutex
);
390 static inline int mddev_trylock(mddev_t
* mddev
)
392 return mutex_trylock(&mddev
->reconfig_mutex
);
395 static inline void mddev_unlock(mddev_t
* mddev
)
397 mutex_unlock(&mddev
->reconfig_mutex
);
399 md_wakeup_thread(mddev
->thread
);
402 static mdk_rdev_t
* find_rdev_nr(mddev_t
*mddev
, int nr
)
406 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
407 if (rdev
->desc_nr
== nr
)
413 static mdk_rdev_t
* find_rdev(mddev_t
* mddev
, dev_t dev
)
417 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
418 if (rdev
->bdev
->bd_dev
== dev
)
424 static struct mdk_personality
*find_pers(int level
, char *clevel
)
426 struct mdk_personality
*pers
;
427 list_for_each_entry(pers
, &pers_list
, list
) {
428 if (level
!= LEVEL_NONE
&& pers
->level
== level
)
430 if (strcmp(pers
->name
, clevel
)==0)
436 /* return the offset of the super block in 512byte sectors */
437 static inline sector_t
calc_dev_sboffset(struct block_device
*bdev
)
439 sector_t num_sectors
= bdev
->bd_inode
->i_size
/ 512;
440 return MD_NEW_SIZE_SECTORS(num_sectors
);
443 static sector_t
calc_num_sectors(mdk_rdev_t
*rdev
, unsigned chunk_size
)
445 sector_t num_sectors
= rdev
->sb_start
;
448 unsigned chunk_sects
= chunk_size
>>9;
449 sector_div(num_sectors
, chunk_sects
);
450 num_sectors
*= chunk_sects
;
455 static int alloc_disk_sb(mdk_rdev_t
* rdev
)
460 rdev
->sb_page
= alloc_page(GFP_KERNEL
);
461 if (!rdev
->sb_page
) {
462 printk(KERN_ALERT
"md: out of memory.\n");
469 static void free_disk_sb(mdk_rdev_t
* rdev
)
472 put_page(rdev
->sb_page
);
474 rdev
->sb_page
= NULL
;
481 static void super_written(struct bio
*bio
, int error
)
483 mdk_rdev_t
*rdev
= bio
->bi_private
;
484 mddev_t
*mddev
= rdev
->mddev
;
486 if (error
|| !test_bit(BIO_UPTODATE
, &bio
->bi_flags
)) {
487 printk("md: super_written gets error=%d, uptodate=%d\n",
488 error
, test_bit(BIO_UPTODATE
, &bio
->bi_flags
));
489 WARN_ON(test_bit(BIO_UPTODATE
, &bio
->bi_flags
));
490 md_error(mddev
, rdev
);
493 if (atomic_dec_and_test(&mddev
->pending_writes
))
494 wake_up(&mddev
->sb_wait
);
498 static void super_written_barrier(struct bio
*bio
, int error
)
500 struct bio
*bio2
= bio
->bi_private
;
501 mdk_rdev_t
*rdev
= bio2
->bi_private
;
502 mddev_t
*mddev
= rdev
->mddev
;
504 if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
) &&
505 error
== -EOPNOTSUPP
) {
507 /* barriers don't appear to be supported :-( */
508 set_bit(BarriersNotsupp
, &rdev
->flags
);
509 mddev
->barriers_work
= 0;
510 spin_lock_irqsave(&mddev
->write_lock
, flags
);
511 bio2
->bi_next
= mddev
->biolist
;
512 mddev
->biolist
= bio2
;
513 spin_unlock_irqrestore(&mddev
->write_lock
, flags
);
514 wake_up(&mddev
->sb_wait
);
518 bio
->bi_private
= rdev
;
519 super_written(bio
, error
);
523 void md_super_write(mddev_t
*mddev
, mdk_rdev_t
*rdev
,
524 sector_t sector
, int size
, struct page
*page
)
526 /* write first size bytes of page to sector of rdev
527 * Increment mddev->pending_writes before returning
528 * and decrement it on completion, waking up sb_wait
529 * if zero is reached.
530 * If an error occurred, call md_error
532 * As we might need to resubmit the request if BIO_RW_BARRIER
533 * causes ENOTSUPP, we allocate a spare bio...
535 struct bio
*bio
= bio_alloc(GFP_NOIO
, 1);
536 int rw
= (1<<BIO_RW
) | (1<<BIO_RW_SYNCIO
) | (1<<BIO_RW_UNPLUG
);
538 bio
->bi_bdev
= rdev
->bdev
;
539 bio
->bi_sector
= sector
;
540 bio_add_page(bio
, page
, size
, 0);
541 bio
->bi_private
= rdev
;
542 bio
->bi_end_io
= super_written
;
545 atomic_inc(&mddev
->pending_writes
);
546 if (!test_bit(BarriersNotsupp
, &rdev
->flags
)) {
548 rw
|= (1<<BIO_RW_BARRIER
);
549 rbio
= bio_clone(bio
, GFP_NOIO
);
550 rbio
->bi_private
= bio
;
551 rbio
->bi_end_io
= super_written_barrier
;
552 submit_bio(rw
, rbio
);
557 void md_super_wait(mddev_t
*mddev
)
559 /* wait for all superblock writes that were scheduled to complete.
560 * if any had to be retried (due to BARRIER problems), retry them
564 prepare_to_wait(&mddev
->sb_wait
, &wq
, TASK_UNINTERRUPTIBLE
);
565 if (atomic_read(&mddev
->pending_writes
)==0)
567 while (mddev
->biolist
) {
569 spin_lock_irq(&mddev
->write_lock
);
570 bio
= mddev
->biolist
;
571 mddev
->biolist
= bio
->bi_next
;
573 spin_unlock_irq(&mddev
->write_lock
);
574 submit_bio(bio
->bi_rw
, bio
);
578 finish_wait(&mddev
->sb_wait
, &wq
);
581 static void bi_complete(struct bio
*bio
, int error
)
583 complete((struct completion
*)bio
->bi_private
);
586 int sync_page_io(struct block_device
*bdev
, sector_t sector
, int size
,
587 struct page
*page
, int rw
)
589 struct bio
*bio
= bio_alloc(GFP_NOIO
, 1);
590 struct completion event
;
593 rw
|= (1 << BIO_RW_SYNCIO
) | (1 << BIO_RW_UNPLUG
);
596 bio
->bi_sector
= sector
;
597 bio_add_page(bio
, page
, size
, 0);
598 init_completion(&event
);
599 bio
->bi_private
= &event
;
600 bio
->bi_end_io
= bi_complete
;
602 wait_for_completion(&event
);
604 ret
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
608 EXPORT_SYMBOL_GPL(sync_page_io
);
610 static int read_disk_sb(mdk_rdev_t
* rdev
, int size
)
612 char b
[BDEVNAME_SIZE
];
613 if (!rdev
->sb_page
) {
621 if (!sync_page_io(rdev
->bdev
, rdev
->sb_start
, size
, rdev
->sb_page
, READ
))
627 printk(KERN_WARNING
"md: disabled device %s, could not read superblock.\n",
628 bdevname(rdev
->bdev
,b
));
632 static int uuid_equal(mdp_super_t
*sb1
, mdp_super_t
*sb2
)
634 return sb1
->set_uuid0
== sb2
->set_uuid0
&&
635 sb1
->set_uuid1
== sb2
->set_uuid1
&&
636 sb1
->set_uuid2
== sb2
->set_uuid2
&&
637 sb1
->set_uuid3
== sb2
->set_uuid3
;
640 static int sb_equal(mdp_super_t
*sb1
, mdp_super_t
*sb2
)
643 mdp_super_t
*tmp1
, *tmp2
;
645 tmp1
= kmalloc(sizeof(*tmp1
),GFP_KERNEL
);
646 tmp2
= kmalloc(sizeof(*tmp2
),GFP_KERNEL
);
648 if (!tmp1
|| !tmp2
) {
650 printk(KERN_INFO
"md.c sb_equal(): failed to allocate memory!\n");
658 * nr_disks is not constant
663 ret
= (memcmp(tmp1
, tmp2
, MD_SB_GENERIC_CONSTANT_WORDS
* 4) == 0);
671 static u32
md_csum_fold(u32 csum
)
673 csum
= (csum
& 0xffff) + (csum
>> 16);
674 return (csum
& 0xffff) + (csum
>> 16);
677 static unsigned int calc_sb_csum(mdp_super_t
* sb
)
680 u32
*sb32
= (u32
*)sb
;
682 unsigned int disk_csum
, csum
;
684 disk_csum
= sb
->sb_csum
;
687 for (i
= 0; i
< MD_SB_BYTES
/4 ; i
++)
689 csum
= (newcsum
& 0xffffffff) + (newcsum
>>32);
693 /* This used to use csum_partial, which was wrong for several
694 * reasons including that different results are returned on
695 * different architectures. It isn't critical that we get exactly
696 * the same return value as before (we always csum_fold before
697 * testing, and that removes any differences). However as we
698 * know that csum_partial always returned a 16bit value on
699 * alphas, do a fold to maximise conformity to previous behaviour.
701 sb
->sb_csum
= md_csum_fold(disk_csum
);
703 sb
->sb_csum
= disk_csum
;
710 * Handle superblock details.
711 * We want to be able to handle multiple superblock formats
712 * so we have a common interface to them all, and an array of
713 * different handlers.
714 * We rely on user-space to write the initial superblock, and support
715 * reading and updating of superblocks.
716 * Interface methods are:
717 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
718 * loads and validates a superblock on dev.
719 * if refdev != NULL, compare superblocks on both devices
721 * 0 - dev has a superblock that is compatible with refdev
722 * 1 - dev has a superblock that is compatible and newer than refdev
723 * so dev should be used as the refdev in future
724 * -EINVAL superblock incompatible or invalid
725 * -othererror e.g. -EIO
727 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
728 * Verify that dev is acceptable into mddev.
729 * The first time, mddev->raid_disks will be 0, and data from
730 * dev should be merged in. Subsequent calls check that dev
731 * is new enough. Return 0 or -EINVAL
733 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
734 * Update the superblock for rdev with data in mddev
735 * This does not write to disc.
741 struct module
*owner
;
742 int (*load_super
)(mdk_rdev_t
*rdev
, mdk_rdev_t
*refdev
,
744 int (*validate_super
)(mddev_t
*mddev
, mdk_rdev_t
*rdev
);
745 void (*sync_super
)(mddev_t
*mddev
, mdk_rdev_t
*rdev
);
746 unsigned long long (*rdev_size_change
)(mdk_rdev_t
*rdev
,
747 sector_t num_sectors
);
751 * load_super for 0.90.0
753 static int super_90_load(mdk_rdev_t
*rdev
, mdk_rdev_t
*refdev
, int minor_version
)
755 char b
[BDEVNAME_SIZE
], b2
[BDEVNAME_SIZE
];
760 * Calculate the position of the superblock (512byte sectors),
761 * it's at the end of the disk.
763 * It also happens to be a multiple of 4Kb.
765 rdev
->sb_start
= calc_dev_sboffset(rdev
->bdev
);
767 ret
= read_disk_sb(rdev
, MD_SB_BYTES
);
772 bdevname(rdev
->bdev
, b
);
773 sb
= (mdp_super_t
*)page_address(rdev
->sb_page
);
775 if (sb
->md_magic
!= MD_SB_MAGIC
) {
776 printk(KERN_ERR
"md: invalid raid superblock magic on %s\n",
781 if (sb
->major_version
!= 0 ||
782 sb
->minor_version
< 90 ||
783 sb
->minor_version
> 91) {
784 printk(KERN_WARNING
"Bad version number %d.%d on %s\n",
785 sb
->major_version
, sb
->minor_version
,
790 if (sb
->raid_disks
<= 0)
793 if (md_csum_fold(calc_sb_csum(sb
)) != md_csum_fold(sb
->sb_csum
)) {
794 printk(KERN_WARNING
"md: invalid superblock checksum on %s\n",
799 rdev
->preferred_minor
= sb
->md_minor
;
800 rdev
->data_offset
= 0;
801 rdev
->sb_size
= MD_SB_BYTES
;
803 if (sb
->state
& (1<<MD_SB_BITMAP_PRESENT
)) {
804 if (sb
->level
!= 1 && sb
->level
!= 4
805 && sb
->level
!= 5 && sb
->level
!= 6
806 && sb
->level
!= 10) {
807 /* FIXME use a better test */
809 "md: bitmaps not supported for this level.\n");
814 if (sb
->level
== LEVEL_MULTIPATH
)
817 rdev
->desc_nr
= sb
->this_disk
.number
;
823 mdp_super_t
*refsb
= (mdp_super_t
*)page_address(refdev
->sb_page
);
824 if (!uuid_equal(refsb
, sb
)) {
825 printk(KERN_WARNING
"md: %s has different UUID to %s\n",
826 b
, bdevname(refdev
->bdev
,b2
));
829 if (!sb_equal(refsb
, sb
)) {
830 printk(KERN_WARNING
"md: %s has same UUID"
831 " but different superblock to %s\n",
832 b
, bdevname(refdev
->bdev
, b2
));
836 ev2
= md_event(refsb
);
842 rdev
->sectors
= calc_num_sectors(rdev
, sb
->chunk_size
);
844 if (rdev
->sectors
< sb
->size
* 2 && sb
->level
> 1)
845 /* "this cannot possibly happen" ... */
853 * validate_super for 0.90.0
855 static int super_90_validate(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
858 mdp_super_t
*sb
= (mdp_super_t
*)page_address(rdev
->sb_page
);
859 __u64 ev1
= md_event(sb
);
861 rdev
->raid_disk
= -1;
862 clear_bit(Faulty
, &rdev
->flags
);
863 clear_bit(In_sync
, &rdev
->flags
);
864 clear_bit(WriteMostly
, &rdev
->flags
);
865 clear_bit(BarriersNotsupp
, &rdev
->flags
);
867 if (mddev
->raid_disks
== 0) {
868 mddev
->major_version
= 0;
869 mddev
->minor_version
= sb
->minor_version
;
870 mddev
->patch_version
= sb
->patch_version
;
872 mddev
->chunk_sectors
= sb
->chunk_size
>> 9;
873 mddev
->ctime
= sb
->ctime
;
874 mddev
->utime
= sb
->utime
;
875 mddev
->level
= sb
->level
;
876 mddev
->clevel
[0] = 0;
877 mddev
->layout
= sb
->layout
;
878 mddev
->raid_disks
= sb
->raid_disks
;
879 mddev
->dev_sectors
= sb
->size
* 2;
881 mddev
->bitmap_offset
= 0;
882 mddev
->default_bitmap_offset
= MD_SB_BYTES
>> 9;
884 if (mddev
->minor_version
>= 91) {
885 mddev
->reshape_position
= sb
->reshape_position
;
886 mddev
->delta_disks
= sb
->delta_disks
;
887 mddev
->new_level
= sb
->new_level
;
888 mddev
->new_layout
= sb
->new_layout
;
889 mddev
->new_chunk_sectors
= sb
->new_chunk
>> 9;
891 mddev
->reshape_position
= MaxSector
;
892 mddev
->delta_disks
= 0;
893 mddev
->new_level
= mddev
->level
;
894 mddev
->new_layout
= mddev
->layout
;
895 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
898 if (sb
->state
& (1<<MD_SB_CLEAN
))
899 mddev
->recovery_cp
= MaxSector
;
901 if (sb
->events_hi
== sb
->cp_events_hi
&&
902 sb
->events_lo
== sb
->cp_events_lo
) {
903 mddev
->recovery_cp
= sb
->recovery_cp
;
905 mddev
->recovery_cp
= 0;
908 memcpy(mddev
->uuid
+0, &sb
->set_uuid0
, 4);
909 memcpy(mddev
->uuid
+4, &sb
->set_uuid1
, 4);
910 memcpy(mddev
->uuid
+8, &sb
->set_uuid2
, 4);
911 memcpy(mddev
->uuid
+12,&sb
->set_uuid3
, 4);
913 mddev
->max_disks
= MD_SB_DISKS
;
915 if (sb
->state
& (1<<MD_SB_BITMAP_PRESENT
) &&
916 mddev
->bitmap_file
== NULL
)
917 mddev
->bitmap_offset
= mddev
->default_bitmap_offset
;
919 } else if (mddev
->pers
== NULL
) {
920 /* Insist on good event counter while assembling */
922 if (ev1
< mddev
->events
)
924 } else if (mddev
->bitmap
) {
925 /* if adding to array with a bitmap, then we can accept an
926 * older device ... but not too old.
928 if (ev1
< mddev
->bitmap
->events_cleared
)
931 if (ev1
< mddev
->events
)
932 /* just a hot-add of a new device, leave raid_disk at -1 */
936 if (mddev
->level
!= LEVEL_MULTIPATH
) {
937 desc
= sb
->disks
+ rdev
->desc_nr
;
939 if (desc
->state
& (1<<MD_DISK_FAULTY
))
940 set_bit(Faulty
, &rdev
->flags
);
941 else if (desc
->state
& (1<<MD_DISK_SYNC
) /* &&
942 desc->raid_disk < mddev->raid_disks */) {
943 set_bit(In_sync
, &rdev
->flags
);
944 rdev
->raid_disk
= desc
->raid_disk
;
946 if (desc
->state
& (1<<MD_DISK_WRITEMOSTLY
))
947 set_bit(WriteMostly
, &rdev
->flags
);
948 } else /* MULTIPATH are always insync */
949 set_bit(In_sync
, &rdev
->flags
);
954 * sync_super for 0.90.0
956 static void super_90_sync(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
960 int next_spare
= mddev
->raid_disks
;
963 /* make rdev->sb match mddev data..
966 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
967 * 3/ any empty disks < next_spare become removed
969 * disks[0] gets initialised to REMOVED because
970 * we cannot be sure from other fields if it has
971 * been initialised or not.
974 int active
=0, working
=0,failed
=0,spare
=0,nr_disks
=0;
976 rdev
->sb_size
= MD_SB_BYTES
;
978 sb
= (mdp_super_t
*)page_address(rdev
->sb_page
);
980 memset(sb
, 0, sizeof(*sb
));
982 sb
->md_magic
= MD_SB_MAGIC
;
983 sb
->major_version
= mddev
->major_version
;
984 sb
->patch_version
= mddev
->patch_version
;
985 sb
->gvalid_words
= 0; /* ignored */
986 memcpy(&sb
->set_uuid0
, mddev
->uuid
+0, 4);
987 memcpy(&sb
->set_uuid1
, mddev
->uuid
+4, 4);
988 memcpy(&sb
->set_uuid2
, mddev
->uuid
+8, 4);
989 memcpy(&sb
->set_uuid3
, mddev
->uuid
+12,4);
991 sb
->ctime
= mddev
->ctime
;
992 sb
->level
= mddev
->level
;
993 sb
->size
= mddev
->dev_sectors
/ 2;
994 sb
->raid_disks
= mddev
->raid_disks
;
995 sb
->md_minor
= mddev
->md_minor
;
996 sb
->not_persistent
= 0;
997 sb
->utime
= mddev
->utime
;
999 sb
->events_hi
= (mddev
->events
>>32);
1000 sb
->events_lo
= (u32
)mddev
->events
;
1002 if (mddev
->reshape_position
== MaxSector
)
1003 sb
->minor_version
= 90;
1005 sb
->minor_version
= 91;
1006 sb
->reshape_position
= mddev
->reshape_position
;
1007 sb
->new_level
= mddev
->new_level
;
1008 sb
->delta_disks
= mddev
->delta_disks
;
1009 sb
->new_layout
= mddev
->new_layout
;
1010 sb
->new_chunk
= mddev
->new_chunk_sectors
<< 9;
1012 mddev
->minor_version
= sb
->minor_version
;
1015 sb
->recovery_cp
= mddev
->recovery_cp
;
1016 sb
->cp_events_hi
= (mddev
->events
>>32);
1017 sb
->cp_events_lo
= (u32
)mddev
->events
;
1018 if (mddev
->recovery_cp
== MaxSector
)
1019 sb
->state
= (1<< MD_SB_CLEAN
);
1021 sb
->recovery_cp
= 0;
1023 sb
->layout
= mddev
->layout
;
1024 sb
->chunk_size
= mddev
->chunk_sectors
<< 9;
1026 if (mddev
->bitmap
&& mddev
->bitmap_file
== NULL
)
1027 sb
->state
|= (1<<MD_SB_BITMAP_PRESENT
);
1029 sb
->disks
[0].state
= (1<<MD_DISK_REMOVED
);
1030 list_for_each_entry(rdev2
, &mddev
->disks
, same_set
) {
1033 if (rdev2
->raid_disk
>= 0 && test_bit(In_sync
, &rdev2
->flags
)
1034 && !test_bit(Faulty
, &rdev2
->flags
))
1035 desc_nr
= rdev2
->raid_disk
;
1037 desc_nr
= next_spare
++;
1038 rdev2
->desc_nr
= desc_nr
;
1039 d
= &sb
->disks
[rdev2
->desc_nr
];
1041 d
->number
= rdev2
->desc_nr
;
1042 d
->major
= MAJOR(rdev2
->bdev
->bd_dev
);
1043 d
->minor
= MINOR(rdev2
->bdev
->bd_dev
);
1044 if (rdev2
->raid_disk
>= 0 && test_bit(In_sync
, &rdev2
->flags
)
1045 && !test_bit(Faulty
, &rdev2
->flags
))
1046 d
->raid_disk
= rdev2
->raid_disk
;
1048 d
->raid_disk
= rdev2
->desc_nr
; /* compatibility */
1049 if (test_bit(Faulty
, &rdev2
->flags
))
1050 d
->state
= (1<<MD_DISK_FAULTY
);
1051 else if (test_bit(In_sync
, &rdev2
->flags
)) {
1052 d
->state
= (1<<MD_DISK_ACTIVE
);
1053 d
->state
|= (1<<MD_DISK_SYNC
);
1061 if (test_bit(WriteMostly
, &rdev2
->flags
))
1062 d
->state
|= (1<<MD_DISK_WRITEMOSTLY
);
1064 /* now set the "removed" and "faulty" bits on any missing devices */
1065 for (i
=0 ; i
< mddev
->raid_disks
; i
++) {
1066 mdp_disk_t
*d
= &sb
->disks
[i
];
1067 if (d
->state
== 0 && d
->number
== 0) {
1070 d
->state
= (1<<MD_DISK_REMOVED
);
1071 d
->state
|= (1<<MD_DISK_FAULTY
);
1075 sb
->nr_disks
= nr_disks
;
1076 sb
->active_disks
= active
;
1077 sb
->working_disks
= working
;
1078 sb
->failed_disks
= failed
;
1079 sb
->spare_disks
= spare
;
1081 sb
->this_disk
= sb
->disks
[rdev
->desc_nr
];
1082 sb
->sb_csum
= calc_sb_csum(sb
);
1086 * rdev_size_change for 0.90.0
1088 static unsigned long long
1089 super_90_rdev_size_change(mdk_rdev_t
*rdev
, sector_t num_sectors
)
1091 if (num_sectors
&& num_sectors
< rdev
->mddev
->dev_sectors
)
1092 return 0; /* component must fit device */
1093 if (rdev
->mddev
->bitmap_offset
)
1094 return 0; /* can't move bitmap */
1095 rdev
->sb_start
= calc_dev_sboffset(rdev
->bdev
);
1096 if (!num_sectors
|| num_sectors
> rdev
->sb_start
)
1097 num_sectors
= rdev
->sb_start
;
1098 md_super_write(rdev
->mddev
, rdev
, rdev
->sb_start
, rdev
->sb_size
,
1100 md_super_wait(rdev
->mddev
);
1101 return num_sectors
/ 2; /* kB for sysfs */
1106 * version 1 superblock
1109 static __le32
calc_sb_1_csum(struct mdp_superblock_1
* sb
)
1113 unsigned long long newcsum
;
1114 int size
= 256 + le32_to_cpu(sb
->max_dev
)*2;
1115 __le32
*isuper
= (__le32
*)sb
;
1118 disk_csum
= sb
->sb_csum
;
1121 for (i
=0; size
>=4; size
-= 4 )
1122 newcsum
+= le32_to_cpu(*isuper
++);
1125 newcsum
+= le16_to_cpu(*(__le16
*) isuper
);
1127 csum
= (newcsum
& 0xffffffff) + (newcsum
>> 32);
1128 sb
->sb_csum
= disk_csum
;
1129 return cpu_to_le32(csum
);
1132 static int super_1_load(mdk_rdev_t
*rdev
, mdk_rdev_t
*refdev
, int minor_version
)
1134 struct mdp_superblock_1
*sb
;
1137 char b
[BDEVNAME_SIZE
], b2
[BDEVNAME_SIZE
];
1141 * Calculate the position of the superblock in 512byte sectors.
1142 * It is always aligned to a 4K boundary and
1143 * depeding on minor_version, it can be:
1144 * 0: At least 8K, but less than 12K, from end of device
1145 * 1: At start of device
1146 * 2: 4K from start of device.
1148 switch(minor_version
) {
1150 sb_start
= rdev
->bdev
->bd_inode
->i_size
>> 9;
1152 sb_start
&= ~(sector_t
)(4*2-1);
1163 rdev
->sb_start
= sb_start
;
1165 /* superblock is rarely larger than 1K, but it can be larger,
1166 * and it is safe to read 4k, so we do that
1168 ret
= read_disk_sb(rdev
, 4096);
1169 if (ret
) return ret
;
1172 sb
= (struct mdp_superblock_1
*)page_address(rdev
->sb_page
);
1174 if (sb
->magic
!= cpu_to_le32(MD_SB_MAGIC
) ||
1175 sb
->major_version
!= cpu_to_le32(1) ||
1176 le32_to_cpu(sb
->max_dev
) > (4096-256)/2 ||
1177 le64_to_cpu(sb
->super_offset
) != rdev
->sb_start
||
1178 (le32_to_cpu(sb
->feature_map
) & ~MD_FEATURE_ALL
) != 0)
1181 if (calc_sb_1_csum(sb
) != sb
->sb_csum
) {
1182 printk("md: invalid superblock checksum on %s\n",
1183 bdevname(rdev
->bdev
,b
));
1186 if (le64_to_cpu(sb
->data_size
) < 10) {
1187 printk("md: data_size too small on %s\n",
1188 bdevname(rdev
->bdev
,b
));
1191 if ((le32_to_cpu(sb
->feature_map
) & MD_FEATURE_BITMAP_OFFSET
)) {
1192 if (sb
->level
!= cpu_to_le32(1) &&
1193 sb
->level
!= cpu_to_le32(4) &&
1194 sb
->level
!= cpu_to_le32(5) &&
1195 sb
->level
!= cpu_to_le32(6) &&
1196 sb
->level
!= cpu_to_le32(10)) {
1198 "md: bitmaps not supported for this level.\n");
1203 rdev
->preferred_minor
= 0xffff;
1204 rdev
->data_offset
= le64_to_cpu(sb
->data_offset
);
1205 atomic_set(&rdev
->corrected_errors
, le32_to_cpu(sb
->cnt_corrected_read
));
1207 rdev
->sb_size
= le32_to_cpu(sb
->max_dev
) * 2 + 256;
1208 bmask
= queue_logical_block_size(rdev
->bdev
->bd_disk
->queue
)-1;
1209 if (rdev
->sb_size
& bmask
)
1210 rdev
->sb_size
= (rdev
->sb_size
| bmask
) + 1;
1213 && rdev
->data_offset
< sb_start
+ (rdev
->sb_size
/512))
1216 if (sb
->level
== cpu_to_le32(LEVEL_MULTIPATH
))
1219 rdev
->desc_nr
= le32_to_cpu(sb
->dev_number
);
1225 struct mdp_superblock_1
*refsb
=
1226 (struct mdp_superblock_1
*)page_address(refdev
->sb_page
);
1228 if (memcmp(sb
->set_uuid
, refsb
->set_uuid
, 16) != 0 ||
1229 sb
->level
!= refsb
->level
||
1230 sb
->layout
!= refsb
->layout
||
1231 sb
->chunksize
!= refsb
->chunksize
) {
1232 printk(KERN_WARNING
"md: %s has strangely different"
1233 " superblock to %s\n",
1234 bdevname(rdev
->bdev
,b
),
1235 bdevname(refdev
->bdev
,b2
));
1238 ev1
= le64_to_cpu(sb
->events
);
1239 ev2
= le64_to_cpu(refsb
->events
);
1247 rdev
->sectors
= (rdev
->bdev
->bd_inode
->i_size
>> 9) -
1248 le64_to_cpu(sb
->data_offset
);
1250 rdev
->sectors
= rdev
->sb_start
;
1251 if (rdev
->sectors
< le64_to_cpu(sb
->data_size
))
1253 rdev
->sectors
= le64_to_cpu(sb
->data_size
);
1254 if (le32_to_cpu(sb
->chunksize
)) {
1255 int chunk_sects
= le32_to_cpu(sb
->chunksize
);
1256 sector_t chunks
= rdev
->sectors
;
1257 sector_div(chunks
, chunk_sects
);
1258 rdev
->sectors
= chunks
* chunk_sects
;
1261 if (le64_to_cpu(sb
->size
) > rdev
->sectors
)
1266 static int super_1_validate(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1268 struct mdp_superblock_1
*sb
= (struct mdp_superblock_1
*)page_address(rdev
->sb_page
);
1269 __u64 ev1
= le64_to_cpu(sb
->events
);
1271 rdev
->raid_disk
= -1;
1272 clear_bit(Faulty
, &rdev
->flags
);
1273 clear_bit(In_sync
, &rdev
->flags
);
1274 clear_bit(WriteMostly
, &rdev
->flags
);
1275 clear_bit(BarriersNotsupp
, &rdev
->flags
);
1277 if (mddev
->raid_disks
== 0) {
1278 mddev
->major_version
= 1;
1279 mddev
->patch_version
= 0;
1280 mddev
->external
= 0;
1281 mddev
->chunk_sectors
= le32_to_cpu(sb
->chunksize
);
1282 mddev
->ctime
= le64_to_cpu(sb
->ctime
) & ((1ULL << 32)-1);
1283 mddev
->utime
= le64_to_cpu(sb
->utime
) & ((1ULL << 32)-1);
1284 mddev
->level
= le32_to_cpu(sb
->level
);
1285 mddev
->clevel
[0] = 0;
1286 mddev
->layout
= le32_to_cpu(sb
->layout
);
1287 mddev
->raid_disks
= le32_to_cpu(sb
->raid_disks
);
1288 mddev
->dev_sectors
= le64_to_cpu(sb
->size
);
1289 mddev
->events
= ev1
;
1290 mddev
->bitmap_offset
= 0;
1291 mddev
->default_bitmap_offset
= 1024 >> 9;
1293 mddev
->recovery_cp
= le64_to_cpu(sb
->resync_offset
);
1294 memcpy(mddev
->uuid
, sb
->set_uuid
, 16);
1296 mddev
->max_disks
= (4096-256)/2;
1298 if ((le32_to_cpu(sb
->feature_map
) & MD_FEATURE_BITMAP_OFFSET
) &&
1299 mddev
->bitmap_file
== NULL
)
1300 mddev
->bitmap_offset
= (__s32
)le32_to_cpu(sb
->bitmap_offset
);
1302 if ((le32_to_cpu(sb
->feature_map
) & MD_FEATURE_RESHAPE_ACTIVE
)) {
1303 mddev
->reshape_position
= le64_to_cpu(sb
->reshape_position
);
1304 mddev
->delta_disks
= le32_to_cpu(sb
->delta_disks
);
1305 mddev
->new_level
= le32_to_cpu(sb
->new_level
);
1306 mddev
->new_layout
= le32_to_cpu(sb
->new_layout
);
1307 mddev
->new_chunk_sectors
= le32_to_cpu(sb
->new_chunk
);
1309 mddev
->reshape_position
= MaxSector
;
1310 mddev
->delta_disks
= 0;
1311 mddev
->new_level
= mddev
->level
;
1312 mddev
->new_layout
= mddev
->layout
;
1313 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
1316 } else if (mddev
->pers
== NULL
) {
1317 /* Insist of good event counter while assembling */
1319 if (ev1
< mddev
->events
)
1321 } else if (mddev
->bitmap
) {
1322 /* If adding to array with a bitmap, then we can accept an
1323 * older device, but not too old.
1325 if (ev1
< mddev
->bitmap
->events_cleared
)
1328 if (ev1
< mddev
->events
)
1329 /* just a hot-add of a new device, leave raid_disk at -1 */
1332 if (mddev
->level
!= LEVEL_MULTIPATH
) {
1334 role
= le16_to_cpu(sb
->dev_roles
[rdev
->desc_nr
]);
1336 case 0xffff: /* spare */
1338 case 0xfffe: /* faulty */
1339 set_bit(Faulty
, &rdev
->flags
);
1342 if ((le32_to_cpu(sb
->feature_map
) &
1343 MD_FEATURE_RECOVERY_OFFSET
))
1344 rdev
->recovery_offset
= le64_to_cpu(sb
->recovery_offset
);
1346 set_bit(In_sync
, &rdev
->flags
);
1347 rdev
->raid_disk
= role
;
1350 if (sb
->devflags
& WriteMostly1
)
1351 set_bit(WriteMostly
, &rdev
->flags
);
1352 } else /* MULTIPATH are always insync */
1353 set_bit(In_sync
, &rdev
->flags
);
1358 static void super_1_sync(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1360 struct mdp_superblock_1
*sb
;
1363 /* make rdev->sb match mddev and rdev data. */
1365 sb
= (struct mdp_superblock_1
*)page_address(rdev
->sb_page
);
1367 sb
->feature_map
= 0;
1369 sb
->recovery_offset
= cpu_to_le64(0);
1370 memset(sb
->pad1
, 0, sizeof(sb
->pad1
));
1371 memset(sb
->pad2
, 0, sizeof(sb
->pad2
));
1372 memset(sb
->pad3
, 0, sizeof(sb
->pad3
));
1374 sb
->utime
= cpu_to_le64((__u64
)mddev
->utime
);
1375 sb
->events
= cpu_to_le64(mddev
->events
);
1377 sb
->resync_offset
= cpu_to_le64(mddev
->recovery_cp
);
1379 sb
->resync_offset
= cpu_to_le64(0);
1381 sb
->cnt_corrected_read
= cpu_to_le32(atomic_read(&rdev
->corrected_errors
));
1383 sb
->raid_disks
= cpu_to_le32(mddev
->raid_disks
);
1384 sb
->size
= cpu_to_le64(mddev
->dev_sectors
);
1385 sb
->chunksize
= cpu_to_le32(mddev
->chunk_sectors
);
1386 sb
->level
= cpu_to_le32(mddev
->level
);
1387 sb
->layout
= cpu_to_le32(mddev
->layout
);
1389 if (mddev
->bitmap
&& mddev
->bitmap_file
== NULL
) {
1390 sb
->bitmap_offset
= cpu_to_le32((__u32
)mddev
->bitmap_offset
);
1391 sb
->feature_map
= cpu_to_le32(MD_FEATURE_BITMAP_OFFSET
);
1394 if (rdev
->raid_disk
>= 0 &&
1395 !test_bit(In_sync
, &rdev
->flags
)) {
1396 if (mddev
->curr_resync_completed
> rdev
->recovery_offset
)
1397 rdev
->recovery_offset
= mddev
->curr_resync_completed
;
1398 if (rdev
->recovery_offset
> 0) {
1400 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET
);
1401 sb
->recovery_offset
=
1402 cpu_to_le64(rdev
->recovery_offset
);
1406 if (mddev
->reshape_position
!= MaxSector
) {
1407 sb
->feature_map
|= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE
);
1408 sb
->reshape_position
= cpu_to_le64(mddev
->reshape_position
);
1409 sb
->new_layout
= cpu_to_le32(mddev
->new_layout
);
1410 sb
->delta_disks
= cpu_to_le32(mddev
->delta_disks
);
1411 sb
->new_level
= cpu_to_le32(mddev
->new_level
);
1412 sb
->new_chunk
= cpu_to_le32(mddev
->new_chunk_sectors
);
1416 list_for_each_entry(rdev2
, &mddev
->disks
, same_set
)
1417 if (rdev2
->desc_nr
+1 > max_dev
)
1418 max_dev
= rdev2
->desc_nr
+1;
1420 if (max_dev
> le32_to_cpu(sb
->max_dev
))
1421 sb
->max_dev
= cpu_to_le32(max_dev
);
1422 for (i
=0; i
<max_dev
;i
++)
1423 sb
->dev_roles
[i
] = cpu_to_le16(0xfffe);
1425 list_for_each_entry(rdev2
, &mddev
->disks
, same_set
) {
1427 if (test_bit(Faulty
, &rdev2
->flags
))
1428 sb
->dev_roles
[i
] = cpu_to_le16(0xfffe);
1429 else if (test_bit(In_sync
, &rdev2
->flags
))
1430 sb
->dev_roles
[i
] = cpu_to_le16(rdev2
->raid_disk
);
1431 else if (rdev2
->raid_disk
>= 0 && rdev2
->recovery_offset
> 0)
1432 sb
->dev_roles
[i
] = cpu_to_le16(rdev2
->raid_disk
);
1434 sb
->dev_roles
[i
] = cpu_to_le16(0xffff);
1437 sb
->sb_csum
= calc_sb_1_csum(sb
);
1440 static unsigned long long
1441 super_1_rdev_size_change(mdk_rdev_t
*rdev
, sector_t num_sectors
)
1443 struct mdp_superblock_1
*sb
;
1444 sector_t max_sectors
;
1445 if (num_sectors
&& num_sectors
< rdev
->mddev
->dev_sectors
)
1446 return 0; /* component must fit device */
1447 if (rdev
->sb_start
< rdev
->data_offset
) {
1448 /* minor versions 1 and 2; superblock before data */
1449 max_sectors
= rdev
->bdev
->bd_inode
->i_size
>> 9;
1450 max_sectors
-= rdev
->data_offset
;
1451 if (!num_sectors
|| num_sectors
> max_sectors
)
1452 num_sectors
= max_sectors
;
1453 } else if (rdev
->mddev
->bitmap_offset
) {
1454 /* minor version 0 with bitmap we can't move */
1457 /* minor version 0; superblock after data */
1459 sb_start
= (rdev
->bdev
->bd_inode
->i_size
>> 9) - 8*2;
1460 sb_start
&= ~(sector_t
)(4*2 - 1);
1461 max_sectors
= rdev
->sectors
+ sb_start
- rdev
->sb_start
;
1462 if (!num_sectors
|| num_sectors
> max_sectors
)
1463 num_sectors
= max_sectors
;
1464 rdev
->sb_start
= sb_start
;
1466 sb
= (struct mdp_superblock_1
*) page_address(rdev
->sb_page
);
1467 sb
->data_size
= cpu_to_le64(num_sectors
);
1468 sb
->super_offset
= rdev
->sb_start
;
1469 sb
->sb_csum
= calc_sb_1_csum(sb
);
1470 md_super_write(rdev
->mddev
, rdev
, rdev
->sb_start
, rdev
->sb_size
,
1472 md_super_wait(rdev
->mddev
);
1473 return num_sectors
/ 2; /* kB for sysfs */
1476 static struct super_type super_types
[] = {
1479 .owner
= THIS_MODULE
,
1480 .load_super
= super_90_load
,
1481 .validate_super
= super_90_validate
,
1482 .sync_super
= super_90_sync
,
1483 .rdev_size_change
= super_90_rdev_size_change
,
1487 .owner
= THIS_MODULE
,
1488 .load_super
= super_1_load
,
1489 .validate_super
= super_1_validate
,
1490 .sync_super
= super_1_sync
,
1491 .rdev_size_change
= super_1_rdev_size_change
,
1495 static int match_mddev_units(mddev_t
*mddev1
, mddev_t
*mddev2
)
1497 mdk_rdev_t
*rdev
, *rdev2
;
1500 rdev_for_each_rcu(rdev
, mddev1
)
1501 rdev_for_each_rcu(rdev2
, mddev2
)
1502 if (rdev
->bdev
->bd_contains
==
1503 rdev2
->bdev
->bd_contains
) {
1511 static LIST_HEAD(pending_raid_disks
);
1513 static void md_integrity_check(mdk_rdev_t
*rdev
, mddev_t
*mddev
)
1515 struct mdk_personality
*pers
= mddev
->pers
;
1516 struct gendisk
*disk
= mddev
->gendisk
;
1517 struct blk_integrity
*bi_rdev
= bdev_get_integrity(rdev
->bdev
);
1518 struct blk_integrity
*bi_mddev
= blk_get_integrity(disk
);
1520 /* Data integrity passthrough not supported on RAID 4, 5 and 6 */
1521 if (pers
&& pers
->level
>= 4 && pers
->level
<= 6)
1524 /* If rdev is integrity capable, register profile for mddev */
1525 if (!bi_mddev
&& bi_rdev
) {
1526 if (blk_integrity_register(disk
, bi_rdev
))
1527 printk(KERN_ERR
"%s: %s Could not register integrity!\n",
1528 __func__
, disk
->disk_name
);
1530 printk(KERN_NOTICE
"Enabling data integrity on %s\n",
1535 /* Check that mddev and rdev have matching profiles */
1536 if (blk_integrity_compare(disk
, rdev
->bdev
->bd_disk
) < 0) {
1537 printk(KERN_ERR
"%s: %s/%s integrity mismatch!\n", __func__
,
1538 disk
->disk_name
, rdev
->bdev
->bd_disk
->disk_name
);
1539 printk(KERN_NOTICE
"Disabling data integrity on %s\n",
1541 blk_integrity_unregister(disk
);
1545 static int bind_rdev_to_array(mdk_rdev_t
* rdev
, mddev_t
* mddev
)
1547 char b
[BDEVNAME_SIZE
];
1557 /* prevent duplicates */
1558 if (find_rdev(mddev
, rdev
->bdev
->bd_dev
))
1561 /* make sure rdev->sectors exceeds mddev->dev_sectors */
1562 if (rdev
->sectors
&& (mddev
->dev_sectors
== 0 ||
1563 rdev
->sectors
< mddev
->dev_sectors
)) {
1565 /* Cannot change size, so fail
1566 * If mddev->level <= 0, then we don't care
1567 * about aligning sizes (e.g. linear)
1569 if (mddev
->level
> 0)
1572 mddev
->dev_sectors
= rdev
->sectors
;
1575 /* Verify rdev->desc_nr is unique.
1576 * If it is -1, assign a free number, else
1577 * check number is not in use
1579 if (rdev
->desc_nr
< 0) {
1581 if (mddev
->pers
) choice
= mddev
->raid_disks
;
1582 while (find_rdev_nr(mddev
, choice
))
1584 rdev
->desc_nr
= choice
;
1586 if (find_rdev_nr(mddev
, rdev
->desc_nr
))
1589 if (mddev
->max_disks
&& rdev
->desc_nr
>= mddev
->max_disks
) {
1590 printk(KERN_WARNING
"md: %s: array is limited to %d devices\n",
1591 mdname(mddev
), mddev
->max_disks
);
1594 bdevname(rdev
->bdev
,b
);
1595 while ( (s
=strchr(b
, '/')) != NULL
)
1598 rdev
->mddev
= mddev
;
1599 printk(KERN_INFO
"md: bind<%s>\n", b
);
1601 if ((err
= kobject_add(&rdev
->kobj
, &mddev
->kobj
, "dev-%s", b
)))
1604 ko
= &part_to_dev(rdev
->bdev
->bd_part
)->kobj
;
1605 if ((err
= sysfs_create_link(&rdev
->kobj
, ko
, "block"))) {
1606 kobject_del(&rdev
->kobj
);
1609 rdev
->sysfs_state
= sysfs_get_dirent(rdev
->kobj
.sd
, "state");
1611 list_add_rcu(&rdev
->same_set
, &mddev
->disks
);
1612 bd_claim_by_disk(rdev
->bdev
, rdev
->bdev
->bd_holder
, mddev
->gendisk
);
1614 /* May as well allow recovery to be retried once */
1615 mddev
->recovery_disabled
= 0;
1617 md_integrity_check(rdev
, mddev
);
1621 printk(KERN_WARNING
"md: failed to register dev-%s for %s\n",
1626 static void md_delayed_delete(struct work_struct
*ws
)
1628 mdk_rdev_t
*rdev
= container_of(ws
, mdk_rdev_t
, del_work
);
1629 kobject_del(&rdev
->kobj
);
1630 kobject_put(&rdev
->kobj
);
1633 static void unbind_rdev_from_array(mdk_rdev_t
* rdev
)
1635 char b
[BDEVNAME_SIZE
];
1640 bd_release_from_disk(rdev
->bdev
, rdev
->mddev
->gendisk
);
1641 list_del_rcu(&rdev
->same_set
);
1642 printk(KERN_INFO
"md: unbind<%s>\n", bdevname(rdev
->bdev
,b
));
1644 sysfs_remove_link(&rdev
->kobj
, "block");
1645 sysfs_put(rdev
->sysfs_state
);
1646 rdev
->sysfs_state
= NULL
;
1647 /* We need to delay this, otherwise we can deadlock when
1648 * writing to 'remove' to "dev/state". We also need
1649 * to delay it due to rcu usage.
1652 INIT_WORK(&rdev
->del_work
, md_delayed_delete
);
1653 kobject_get(&rdev
->kobj
);
1654 schedule_work(&rdev
->del_work
);
1658 * prevent the device from being mounted, repartitioned or
1659 * otherwise reused by a RAID array (or any other kernel
1660 * subsystem), by bd_claiming the device.
1662 static int lock_rdev(mdk_rdev_t
*rdev
, dev_t dev
, int shared
)
1665 struct block_device
*bdev
;
1666 char b
[BDEVNAME_SIZE
];
1668 bdev
= open_by_devnum(dev
, FMODE_READ
|FMODE_WRITE
);
1670 printk(KERN_ERR
"md: could not open %s.\n",
1671 __bdevname(dev
, b
));
1672 return PTR_ERR(bdev
);
1674 err
= bd_claim(bdev
, shared
? (mdk_rdev_t
*)lock_rdev
: rdev
);
1676 printk(KERN_ERR
"md: could not bd_claim %s.\n",
1678 blkdev_put(bdev
, FMODE_READ
|FMODE_WRITE
);
1682 set_bit(AllReserved
, &rdev
->flags
);
1687 static void unlock_rdev(mdk_rdev_t
*rdev
)
1689 struct block_device
*bdev
= rdev
->bdev
;
1694 blkdev_put(bdev
, FMODE_READ
|FMODE_WRITE
);
1697 void md_autodetect_dev(dev_t dev
);
1699 static void export_rdev(mdk_rdev_t
* rdev
)
1701 char b
[BDEVNAME_SIZE
];
1702 printk(KERN_INFO
"md: export_rdev(%s)\n",
1703 bdevname(rdev
->bdev
,b
));
1708 if (test_bit(AutoDetected
, &rdev
->flags
))
1709 md_autodetect_dev(rdev
->bdev
->bd_dev
);
1712 kobject_put(&rdev
->kobj
);
1715 static void kick_rdev_from_array(mdk_rdev_t
* rdev
)
1717 unbind_rdev_from_array(rdev
);
1721 static void export_array(mddev_t
*mddev
)
1723 mdk_rdev_t
*rdev
, *tmp
;
1725 rdev_for_each(rdev
, tmp
, mddev
) {
1730 kick_rdev_from_array(rdev
);
1732 if (!list_empty(&mddev
->disks
))
1734 mddev
->raid_disks
= 0;
1735 mddev
->major_version
= 0;
1738 static void print_desc(mdp_disk_t
*desc
)
1740 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc
->number
,
1741 desc
->major
,desc
->minor
,desc
->raid_disk
,desc
->state
);
1744 static void print_sb_90(mdp_super_t
*sb
)
1749 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1750 sb
->major_version
, sb
->minor_version
, sb
->patch_version
,
1751 sb
->set_uuid0
, sb
->set_uuid1
, sb
->set_uuid2
, sb
->set_uuid3
,
1753 printk(KERN_INFO
"md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1754 sb
->level
, sb
->size
, sb
->nr_disks
, sb
->raid_disks
,
1755 sb
->md_minor
, sb
->layout
, sb
->chunk_size
);
1756 printk(KERN_INFO
"md: UT:%08x ST:%d AD:%d WD:%d"
1757 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1758 sb
->utime
, sb
->state
, sb
->active_disks
, sb
->working_disks
,
1759 sb
->failed_disks
, sb
->spare_disks
,
1760 sb
->sb_csum
, (unsigned long)sb
->events_lo
);
1763 for (i
= 0; i
< MD_SB_DISKS
; i
++) {
1766 desc
= sb
->disks
+ i
;
1767 if (desc
->number
|| desc
->major
|| desc
->minor
||
1768 desc
->raid_disk
|| (desc
->state
&& (desc
->state
!= 4))) {
1769 printk(" D %2d: ", i
);
1773 printk(KERN_INFO
"md: THIS: ");
1774 print_desc(&sb
->this_disk
);
1777 static void print_sb_1(struct mdp_superblock_1
*sb
)
1781 uuid
= sb
->set_uuid
;
1782 printk(KERN_INFO
"md: SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x"
1783 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n"
1784 KERN_INFO
"md: Name: \"%s\" CT:%llu\n",
1785 le32_to_cpu(sb
->major_version
),
1786 le32_to_cpu(sb
->feature_map
),
1787 uuid
[0], uuid
[1], uuid
[2], uuid
[3],
1788 uuid
[4], uuid
[5], uuid
[6], uuid
[7],
1789 uuid
[8], uuid
[9], uuid
[10], uuid
[11],
1790 uuid
[12], uuid
[13], uuid
[14], uuid
[15],
1792 (unsigned long long)le64_to_cpu(sb
->ctime
)
1793 & MD_SUPERBLOCK_1_TIME_SEC_MASK
);
1795 uuid
= sb
->device_uuid
;
1796 printk(KERN_INFO
"md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
1798 KERN_INFO
"md: Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x"
1799 ":%02x%02x%02x%02x%02x%02x\n"
1800 KERN_INFO
"md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
1801 KERN_INFO
"md: (MaxDev:%u) \n",
1802 le32_to_cpu(sb
->level
),
1803 (unsigned long long)le64_to_cpu(sb
->size
),
1804 le32_to_cpu(sb
->raid_disks
),
1805 le32_to_cpu(sb
->layout
),
1806 le32_to_cpu(sb
->chunksize
),
1807 (unsigned long long)le64_to_cpu(sb
->data_offset
),
1808 (unsigned long long)le64_to_cpu(sb
->data_size
),
1809 (unsigned long long)le64_to_cpu(sb
->super_offset
),
1810 (unsigned long long)le64_to_cpu(sb
->recovery_offset
),
1811 le32_to_cpu(sb
->dev_number
),
1812 uuid
[0], uuid
[1], uuid
[2], uuid
[3],
1813 uuid
[4], uuid
[5], uuid
[6], uuid
[7],
1814 uuid
[8], uuid
[9], uuid
[10], uuid
[11],
1815 uuid
[12], uuid
[13], uuid
[14], uuid
[15],
1817 (unsigned long long)le64_to_cpu(sb
->utime
) & MD_SUPERBLOCK_1_TIME_SEC_MASK
,
1818 (unsigned long long)le64_to_cpu(sb
->events
),
1819 (unsigned long long)le64_to_cpu(sb
->resync_offset
),
1820 le32_to_cpu(sb
->sb_csum
),
1821 le32_to_cpu(sb
->max_dev
)
1825 static void print_rdev(mdk_rdev_t
*rdev
, int major_version
)
1827 char b
[BDEVNAME_SIZE
];
1828 printk(KERN_INFO
"md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n",
1829 bdevname(rdev
->bdev
, b
), (unsigned long long)rdev
->sectors
,
1830 test_bit(Faulty
, &rdev
->flags
), test_bit(In_sync
, &rdev
->flags
),
1832 if (rdev
->sb_loaded
) {
1833 printk(KERN_INFO
"md: rdev superblock (MJ:%d):\n", major_version
);
1834 switch (major_version
) {
1836 print_sb_90((mdp_super_t
*)page_address(rdev
->sb_page
));
1839 print_sb_1((struct mdp_superblock_1
*)page_address(rdev
->sb_page
));
1843 printk(KERN_INFO
"md: no rdev superblock!\n");
1846 static void md_print_devices(void)
1848 struct list_head
*tmp
;
1851 char b
[BDEVNAME_SIZE
];
1854 printk("md: **********************************\n");
1855 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
1856 printk("md: **********************************\n");
1857 for_each_mddev(mddev
, tmp
) {
1860 bitmap_print_sb(mddev
->bitmap
);
1862 printk("%s: ", mdname(mddev
));
1863 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
1864 printk("<%s>", bdevname(rdev
->bdev
,b
));
1867 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
1868 print_rdev(rdev
, mddev
->major_version
);
1870 printk("md: **********************************\n");
1875 static void sync_sbs(mddev_t
* mddev
, int nospares
)
1877 /* Update each superblock (in-memory image), but
1878 * if we are allowed to, skip spares which already
1879 * have the right event counter, or have one earlier
1880 * (which would mean they aren't being marked as dirty
1881 * with the rest of the array)
1885 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
1886 if (rdev
->sb_events
== mddev
->events
||
1888 rdev
->raid_disk
< 0 &&
1889 (rdev
->sb_events
&1)==0 &&
1890 rdev
->sb_events
+1 == mddev
->events
)) {
1891 /* Don't update this superblock */
1892 rdev
->sb_loaded
= 2;
1894 super_types
[mddev
->major_version
].
1895 sync_super(mddev
, rdev
);
1896 rdev
->sb_loaded
= 1;
1901 static void md_update_sb(mddev_t
* mddev
, int force_change
)
1907 mddev
->utime
= get_seconds();
1908 if (mddev
->external
)
1911 spin_lock_irq(&mddev
->write_lock
);
1913 set_bit(MD_CHANGE_PENDING
, &mddev
->flags
);
1914 if (test_and_clear_bit(MD_CHANGE_DEVS
, &mddev
->flags
))
1916 if (test_and_clear_bit(MD_CHANGE_CLEAN
, &mddev
->flags
))
1917 /* just a clean<-> dirty transition, possibly leave spares alone,
1918 * though if events isn't the right even/odd, we will have to do
1924 if (mddev
->degraded
)
1925 /* If the array is degraded, then skipping spares is both
1926 * dangerous and fairly pointless.
1927 * Dangerous because a device that was removed from the array
1928 * might have a event_count that still looks up-to-date,
1929 * so it can be re-added without a resync.
1930 * Pointless because if there are any spares to skip,
1931 * then a recovery will happen and soon that array won't
1932 * be degraded any more and the spare can go back to sleep then.
1936 sync_req
= mddev
->in_sync
;
1938 /* If this is just a dirty<->clean transition, and the array is clean
1939 * and 'events' is odd, we can roll back to the previous clean state */
1941 && (mddev
->in_sync
&& mddev
->recovery_cp
== MaxSector
)
1942 && (mddev
->events
& 1)
1943 && mddev
->events
!= 1)
1946 /* otherwise we have to go forward and ... */
1948 if (!mddev
->in_sync
|| mddev
->recovery_cp
!= MaxSector
) { /* not clean */
1949 /* .. if the array isn't clean, insist on an odd 'events' */
1950 if ((mddev
->events
&1)==0) {
1955 /* otherwise insist on an even 'events' (for clean states) */
1956 if ((mddev
->events
&1)) {
1963 if (!mddev
->events
) {
1965 * oops, this 64-bit counter should never wrap.
1966 * Either we are in around ~1 trillion A.C., assuming
1967 * 1 reboot per second, or we have a bug:
1974 * do not write anything to disk if using
1975 * nonpersistent superblocks
1977 if (!mddev
->persistent
) {
1978 if (!mddev
->external
)
1979 clear_bit(MD_CHANGE_PENDING
, &mddev
->flags
);
1981 spin_unlock_irq(&mddev
->write_lock
);
1982 wake_up(&mddev
->sb_wait
);
1985 sync_sbs(mddev
, nospares
);
1986 spin_unlock_irq(&mddev
->write_lock
);
1989 "md: updating %s RAID superblock on device (in sync %d)\n",
1990 mdname(mddev
),mddev
->in_sync
);
1992 bitmap_update_sb(mddev
->bitmap
);
1993 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
1994 char b
[BDEVNAME_SIZE
];
1995 dprintk(KERN_INFO
"md: ");
1996 if (rdev
->sb_loaded
!= 1)
1997 continue; /* no noise on spare devices */
1998 if (test_bit(Faulty
, &rdev
->flags
))
1999 dprintk("(skipping faulty ");
2001 dprintk("%s ", bdevname(rdev
->bdev
,b
));
2002 if (!test_bit(Faulty
, &rdev
->flags
)) {
2003 md_super_write(mddev
,rdev
,
2004 rdev
->sb_start
, rdev
->sb_size
,
2006 dprintk(KERN_INFO
"(write) %s's sb offset: %llu\n",
2007 bdevname(rdev
->bdev
,b
),
2008 (unsigned long long)rdev
->sb_start
);
2009 rdev
->sb_events
= mddev
->events
;
2013 if (mddev
->level
== LEVEL_MULTIPATH
)
2014 /* only need to write one superblock... */
2017 md_super_wait(mddev
);
2018 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
2020 spin_lock_irq(&mddev
->write_lock
);
2021 if (mddev
->in_sync
!= sync_req
||
2022 test_bit(MD_CHANGE_DEVS
, &mddev
->flags
)) {
2023 /* have to write it out again */
2024 spin_unlock_irq(&mddev
->write_lock
);
2027 clear_bit(MD_CHANGE_PENDING
, &mddev
->flags
);
2028 spin_unlock_irq(&mddev
->write_lock
);
2029 wake_up(&mddev
->sb_wait
);
2030 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
))
2031 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
2035 /* words written to sysfs files may, or may not, be \n terminated.
2036 * We want to accept with case. For this we use cmd_match.
2038 static int cmd_match(const char *cmd
, const char *str
)
2040 /* See if cmd, written into a sysfs file, matches
2041 * str. They must either be the same, or cmd can
2042 * have a trailing newline
2044 while (*cmd
&& *str
&& *cmd
== *str
) {
2055 struct rdev_sysfs_entry
{
2056 struct attribute attr
;
2057 ssize_t (*show
)(mdk_rdev_t
*, char *);
2058 ssize_t (*store
)(mdk_rdev_t
*, const char *, size_t);
2062 state_show(mdk_rdev_t
*rdev
, char *page
)
2067 if (test_bit(Faulty
, &rdev
->flags
)) {
2068 len
+= sprintf(page
+len
, "%sfaulty",sep
);
2071 if (test_bit(In_sync
, &rdev
->flags
)) {
2072 len
+= sprintf(page
+len
, "%sin_sync",sep
);
2075 if (test_bit(WriteMostly
, &rdev
->flags
)) {
2076 len
+= sprintf(page
+len
, "%swrite_mostly",sep
);
2079 if (test_bit(Blocked
, &rdev
->flags
)) {
2080 len
+= sprintf(page
+len
, "%sblocked", sep
);
2083 if (!test_bit(Faulty
, &rdev
->flags
) &&
2084 !test_bit(In_sync
, &rdev
->flags
)) {
2085 len
+= sprintf(page
+len
, "%sspare", sep
);
2088 return len
+sprintf(page
+len
, "\n");
2092 state_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
2095 * faulty - simulates and error
2096 * remove - disconnects the device
2097 * writemostly - sets write_mostly
2098 * -writemostly - clears write_mostly
2099 * blocked - sets the Blocked flag
2100 * -blocked - clears the Blocked flag
2101 * insync - sets Insync providing device isn't active
2104 if (cmd_match(buf
, "faulty") && rdev
->mddev
->pers
) {
2105 md_error(rdev
->mddev
, rdev
);
2107 } else if (cmd_match(buf
, "remove")) {
2108 if (rdev
->raid_disk
>= 0)
2111 mddev_t
*mddev
= rdev
->mddev
;
2112 kick_rdev_from_array(rdev
);
2114 md_update_sb(mddev
, 1);
2115 md_new_event(mddev
);
2118 } else if (cmd_match(buf
, "writemostly")) {
2119 set_bit(WriteMostly
, &rdev
->flags
);
2121 } else if (cmd_match(buf
, "-writemostly")) {
2122 clear_bit(WriteMostly
, &rdev
->flags
);
2124 } else if (cmd_match(buf
, "blocked")) {
2125 set_bit(Blocked
, &rdev
->flags
);
2127 } else if (cmd_match(buf
, "-blocked")) {
2128 clear_bit(Blocked
, &rdev
->flags
);
2129 wake_up(&rdev
->blocked_wait
);
2130 set_bit(MD_RECOVERY_NEEDED
, &rdev
->mddev
->recovery
);
2131 md_wakeup_thread(rdev
->mddev
->thread
);
2134 } else if (cmd_match(buf
, "insync") && rdev
->raid_disk
== -1) {
2135 set_bit(In_sync
, &rdev
->flags
);
2138 if (!err
&& rdev
->sysfs_state
)
2139 sysfs_notify_dirent(rdev
->sysfs_state
);
2140 return err
? err
: len
;
2142 static struct rdev_sysfs_entry rdev_state
=
2143 __ATTR(state
, S_IRUGO
|S_IWUSR
, state_show
, state_store
);
2146 errors_show(mdk_rdev_t
*rdev
, char *page
)
2148 return sprintf(page
, "%d\n", atomic_read(&rdev
->corrected_errors
));
2152 errors_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
2155 unsigned long n
= simple_strtoul(buf
, &e
, 10);
2156 if (*buf
&& (*e
== 0 || *e
== '\n')) {
2157 atomic_set(&rdev
->corrected_errors
, n
);
2162 static struct rdev_sysfs_entry rdev_errors
=
2163 __ATTR(errors
, S_IRUGO
|S_IWUSR
, errors_show
, errors_store
);
2166 slot_show(mdk_rdev_t
*rdev
, char *page
)
2168 if (rdev
->raid_disk
< 0)
2169 return sprintf(page
, "none\n");
2171 return sprintf(page
, "%d\n", rdev
->raid_disk
);
2175 slot_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
2180 int slot
= simple_strtoul(buf
, &e
, 10);
2181 if (strncmp(buf
, "none", 4)==0)
2183 else if (e
==buf
|| (*e
&& *e
!= '\n'))
2185 if (rdev
->mddev
->pers
&& slot
== -1) {
2186 /* Setting 'slot' on an active array requires also
2187 * updating the 'rd%d' link, and communicating
2188 * with the personality with ->hot_*_disk.
2189 * For now we only support removing
2190 * failed/spare devices. This normally happens automatically,
2191 * but not when the metadata is externally managed.
2193 if (rdev
->raid_disk
== -1)
2195 /* personality does all needed checks */
2196 if (rdev
->mddev
->pers
->hot_add_disk
== NULL
)
2198 err
= rdev
->mddev
->pers
->
2199 hot_remove_disk(rdev
->mddev
, rdev
->raid_disk
);
2202 sprintf(nm
, "rd%d", rdev
->raid_disk
);
2203 sysfs_remove_link(&rdev
->mddev
->kobj
, nm
);
2204 set_bit(MD_RECOVERY_NEEDED
, &rdev
->mddev
->recovery
);
2205 md_wakeup_thread(rdev
->mddev
->thread
);
2206 } else if (rdev
->mddev
->pers
) {
2208 /* Activating a spare .. or possibly reactivating
2209 * if we ever get bitmaps working here.
2212 if (rdev
->raid_disk
!= -1)
2215 if (rdev
->mddev
->pers
->hot_add_disk
== NULL
)
2218 list_for_each_entry(rdev2
, &rdev
->mddev
->disks
, same_set
)
2219 if (rdev2
->raid_disk
== slot
)
2222 rdev
->raid_disk
= slot
;
2223 if (test_bit(In_sync
, &rdev
->flags
))
2224 rdev
->saved_raid_disk
= slot
;
2226 rdev
->saved_raid_disk
= -1;
2227 err
= rdev
->mddev
->pers
->
2228 hot_add_disk(rdev
->mddev
, rdev
);
2230 rdev
->raid_disk
= -1;
2233 sysfs_notify_dirent(rdev
->sysfs_state
);
2234 sprintf(nm
, "rd%d", rdev
->raid_disk
);
2235 if (sysfs_create_link(&rdev
->mddev
->kobj
, &rdev
->kobj
, nm
))
2237 "md: cannot register "
2239 nm
, mdname(rdev
->mddev
));
2241 /* don't wakeup anyone, leave that to userspace. */
2243 if (slot
>= rdev
->mddev
->raid_disks
)
2245 rdev
->raid_disk
= slot
;
2246 /* assume it is working */
2247 clear_bit(Faulty
, &rdev
->flags
);
2248 clear_bit(WriteMostly
, &rdev
->flags
);
2249 set_bit(In_sync
, &rdev
->flags
);
2250 sysfs_notify_dirent(rdev
->sysfs_state
);
2256 static struct rdev_sysfs_entry rdev_slot
=
2257 __ATTR(slot
, S_IRUGO
|S_IWUSR
, slot_show
, slot_store
);
2260 offset_show(mdk_rdev_t
*rdev
, char *page
)
2262 return sprintf(page
, "%llu\n", (unsigned long long)rdev
->data_offset
);
2266 offset_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
2269 unsigned long long offset
= simple_strtoull(buf
, &e
, 10);
2270 if (e
==buf
|| (*e
&& *e
!= '\n'))
2272 if (rdev
->mddev
->pers
&& rdev
->raid_disk
>= 0)
2274 if (rdev
->sectors
&& rdev
->mddev
->external
)
2275 /* Must set offset before size, so overlap checks
2278 rdev
->data_offset
= offset
;
2282 static struct rdev_sysfs_entry rdev_offset
=
2283 __ATTR(offset
, S_IRUGO
|S_IWUSR
, offset_show
, offset_store
);
2286 rdev_size_show(mdk_rdev_t
*rdev
, char *page
)
2288 return sprintf(page
, "%llu\n", (unsigned long long)rdev
->sectors
/ 2);
2291 static int overlaps(sector_t s1
, sector_t l1
, sector_t s2
, sector_t l2
)
2293 /* check if two start/length pairs overlap */
2301 static int strict_blocks_to_sectors(const char *buf
, sector_t
*sectors
)
2303 unsigned long long blocks
;
2306 if (strict_strtoull(buf
, 10, &blocks
) < 0)
2309 if (blocks
& 1ULL << (8 * sizeof(blocks
) - 1))
2310 return -EINVAL
; /* sector conversion overflow */
2313 if (new != blocks
* 2)
2314 return -EINVAL
; /* unsigned long long to sector_t overflow */
2321 rdev_size_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
2323 mddev_t
*my_mddev
= rdev
->mddev
;
2324 sector_t oldsectors
= rdev
->sectors
;
2327 if (strict_blocks_to_sectors(buf
, §ors
) < 0)
2329 if (my_mddev
->pers
&& rdev
->raid_disk
>= 0) {
2330 if (my_mddev
->persistent
) {
2331 sectors
= super_types
[my_mddev
->major_version
].
2332 rdev_size_change(rdev
, sectors
);
2335 } else if (!sectors
)
2336 sectors
= (rdev
->bdev
->bd_inode
->i_size
>> 9) -
2339 if (sectors
< my_mddev
->dev_sectors
)
2340 return -EINVAL
; /* component must fit device */
2342 rdev
->sectors
= sectors
;
2343 if (sectors
> oldsectors
&& my_mddev
->external
) {
2344 /* need to check that all other rdevs with the same ->bdev
2345 * do not overlap. We need to unlock the mddev to avoid
2346 * a deadlock. We have already changed rdev->sectors, and if
2347 * we have to change it back, we will have the lock again.
2351 struct list_head
*tmp
;
2353 mddev_unlock(my_mddev
);
2354 for_each_mddev(mddev
, tmp
) {
2358 list_for_each_entry(rdev2
, &mddev
->disks
, same_set
)
2359 if (test_bit(AllReserved
, &rdev2
->flags
) ||
2360 (rdev
->bdev
== rdev2
->bdev
&&
2362 overlaps(rdev
->data_offset
, rdev
->sectors
,
2368 mddev_unlock(mddev
);
2374 mddev_lock(my_mddev
);
2376 /* Someone else could have slipped in a size
2377 * change here, but doing so is just silly.
2378 * We put oldsectors back because we *know* it is
2379 * safe, and trust userspace not to race with
2382 rdev
->sectors
= oldsectors
;
2389 static struct rdev_sysfs_entry rdev_size
=
2390 __ATTR(size
, S_IRUGO
|S_IWUSR
, rdev_size_show
, rdev_size_store
);
2392 static struct attribute
*rdev_default_attrs
[] = {
2401 rdev_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
2403 struct rdev_sysfs_entry
*entry
= container_of(attr
, struct rdev_sysfs_entry
, attr
);
2404 mdk_rdev_t
*rdev
= container_of(kobj
, mdk_rdev_t
, kobj
);
2405 mddev_t
*mddev
= rdev
->mddev
;
2411 rv
= mddev
? mddev_lock(mddev
) : -EBUSY
;
2413 if (rdev
->mddev
== NULL
)
2416 rv
= entry
->show(rdev
, page
);
2417 mddev_unlock(mddev
);
2423 rdev_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
2424 const char *page
, size_t length
)
2426 struct rdev_sysfs_entry
*entry
= container_of(attr
, struct rdev_sysfs_entry
, attr
);
2427 mdk_rdev_t
*rdev
= container_of(kobj
, mdk_rdev_t
, kobj
);
2429 mddev_t
*mddev
= rdev
->mddev
;
2433 if (!capable(CAP_SYS_ADMIN
))
2435 rv
= mddev
? mddev_lock(mddev
): -EBUSY
;
2437 if (rdev
->mddev
== NULL
)
2440 rv
= entry
->store(rdev
, page
, length
);
2441 mddev_unlock(mddev
);
2446 static void rdev_free(struct kobject
*ko
)
2448 mdk_rdev_t
*rdev
= container_of(ko
, mdk_rdev_t
, kobj
);
2451 static struct sysfs_ops rdev_sysfs_ops
= {
2452 .show
= rdev_attr_show
,
2453 .store
= rdev_attr_store
,
2455 static struct kobj_type rdev_ktype
= {
2456 .release
= rdev_free
,
2457 .sysfs_ops
= &rdev_sysfs_ops
,
2458 .default_attrs
= rdev_default_attrs
,
2462 * Import a device. If 'super_format' >= 0, then sanity check the superblock
2464 * mark the device faulty if:
2466 * - the device is nonexistent (zero size)
2467 * - the device has no valid superblock
2469 * a faulty rdev _never_ has rdev->sb set.
2471 static mdk_rdev_t
*md_import_device(dev_t newdev
, int super_format
, int super_minor
)
2473 char b
[BDEVNAME_SIZE
];
2478 rdev
= kzalloc(sizeof(*rdev
), GFP_KERNEL
);
2480 printk(KERN_ERR
"md: could not alloc mem for new device!\n");
2481 return ERR_PTR(-ENOMEM
);
2484 if ((err
= alloc_disk_sb(rdev
)))
2487 err
= lock_rdev(rdev
, newdev
, super_format
== -2);
2491 kobject_init(&rdev
->kobj
, &rdev_ktype
);
2494 rdev
->saved_raid_disk
= -1;
2495 rdev
->raid_disk
= -1;
2497 rdev
->data_offset
= 0;
2498 rdev
->sb_events
= 0;
2499 atomic_set(&rdev
->nr_pending
, 0);
2500 atomic_set(&rdev
->read_errors
, 0);
2501 atomic_set(&rdev
->corrected_errors
, 0);
2503 size
= rdev
->bdev
->bd_inode
->i_size
>> BLOCK_SIZE_BITS
;
2506 "md: %s has zero or unknown size, marking faulty!\n",
2507 bdevname(rdev
->bdev
,b
));
2512 if (super_format
>= 0) {
2513 err
= super_types
[super_format
].
2514 load_super(rdev
, NULL
, super_minor
);
2515 if (err
== -EINVAL
) {
2517 "md: %s does not have a valid v%d.%d "
2518 "superblock, not importing!\n",
2519 bdevname(rdev
->bdev
,b
),
2520 super_format
, super_minor
);
2525 "md: could not read %s's sb, not importing!\n",
2526 bdevname(rdev
->bdev
,b
));
2531 INIT_LIST_HEAD(&rdev
->same_set
);
2532 init_waitqueue_head(&rdev
->blocked_wait
);
2537 if (rdev
->sb_page
) {
2543 return ERR_PTR(err
);
2547 * Check a full RAID array for plausibility
2551 static void analyze_sbs(mddev_t
* mddev
)
2554 mdk_rdev_t
*rdev
, *freshest
, *tmp
;
2555 char b
[BDEVNAME_SIZE
];
2558 rdev_for_each(rdev
, tmp
, mddev
)
2559 switch (super_types
[mddev
->major_version
].
2560 load_super(rdev
, freshest
, mddev
->minor_version
)) {
2568 "md: fatal superblock inconsistency in %s"
2569 " -- removing from array\n",
2570 bdevname(rdev
->bdev
,b
));
2571 kick_rdev_from_array(rdev
);
2575 super_types
[mddev
->major_version
].
2576 validate_super(mddev
, freshest
);
2579 rdev_for_each(rdev
, tmp
, mddev
) {
2580 if (rdev
->desc_nr
>= mddev
->max_disks
||
2581 i
> mddev
->max_disks
) {
2583 "md: %s: %s: only %d devices permitted\n",
2584 mdname(mddev
), bdevname(rdev
->bdev
, b
),
2586 kick_rdev_from_array(rdev
);
2589 if (rdev
!= freshest
)
2590 if (super_types
[mddev
->major_version
].
2591 validate_super(mddev
, rdev
)) {
2592 printk(KERN_WARNING
"md: kicking non-fresh %s"
2594 bdevname(rdev
->bdev
,b
));
2595 kick_rdev_from_array(rdev
);
2598 if (mddev
->level
== LEVEL_MULTIPATH
) {
2599 rdev
->desc_nr
= i
++;
2600 rdev
->raid_disk
= rdev
->desc_nr
;
2601 set_bit(In_sync
, &rdev
->flags
);
2602 } else if (rdev
->raid_disk
>= mddev
->raid_disks
) {
2603 rdev
->raid_disk
= -1;
2604 clear_bit(In_sync
, &rdev
->flags
);
2609 static void md_safemode_timeout(unsigned long data
);
2612 safe_delay_show(mddev_t
*mddev
, char *page
)
2614 int msec
= (mddev
->safemode_delay
*1000)/HZ
;
2615 return sprintf(page
, "%d.%03d\n", msec
/1000, msec
%1000);
2618 safe_delay_store(mddev_t
*mddev
, const char *cbuf
, size_t len
)
2626 /* remove a period, and count digits after it */
2627 if (len
>= sizeof(buf
))
2629 strlcpy(buf
, cbuf
, sizeof(buf
));
2630 for (i
=0; i
<len
; i
++) {
2632 if (isdigit(buf
[i
])) {
2637 } else if (buf
[i
] == '.') {
2642 if (strict_strtoul(buf
, 10, &msec
) < 0)
2644 msec
= (msec
* 1000) / scale
;
2646 mddev
->safemode_delay
= 0;
2648 unsigned long old_delay
= mddev
->safemode_delay
;
2649 mddev
->safemode_delay
= (msec
*HZ
)/1000;
2650 if (mddev
->safemode_delay
== 0)
2651 mddev
->safemode_delay
= 1;
2652 if (mddev
->safemode_delay
< old_delay
)
2653 md_safemode_timeout((unsigned long)mddev
);
2657 static struct md_sysfs_entry md_safe_delay
=
2658 __ATTR(safe_mode_delay
, S_IRUGO
|S_IWUSR
,safe_delay_show
, safe_delay_store
);
2661 level_show(mddev_t
*mddev
, char *page
)
2663 struct mdk_personality
*p
= mddev
->pers
;
2665 return sprintf(page
, "%s\n", p
->name
);
2666 else if (mddev
->clevel
[0])
2667 return sprintf(page
, "%s\n", mddev
->clevel
);
2668 else if (mddev
->level
!= LEVEL_NONE
)
2669 return sprintf(page
, "%d\n", mddev
->level
);
2675 level_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2679 struct mdk_personality
*pers
;
2682 if (mddev
->pers
== NULL
) {
2685 if (len
>= sizeof(mddev
->clevel
))
2687 strncpy(mddev
->clevel
, buf
, len
);
2688 if (mddev
->clevel
[len
-1] == '\n')
2690 mddev
->clevel
[len
] = 0;
2691 mddev
->level
= LEVEL_NONE
;
2695 /* request to change the personality. Need to ensure:
2696 * - array is not engaged in resync/recovery/reshape
2697 * - old personality can be suspended
2698 * - new personality will access other array.
2701 if (mddev
->sync_thread
|| mddev
->reshape_position
!= MaxSector
)
2704 if (!mddev
->pers
->quiesce
) {
2705 printk(KERN_WARNING
"md: %s: %s does not support online personality change\n",
2706 mdname(mddev
), mddev
->pers
->name
);
2710 /* Now find the new personality */
2711 if (len
== 0 || len
>= sizeof(level
))
2713 strncpy(level
, buf
, len
);
2714 if (level
[len
-1] == '\n')
2718 request_module("md-%s", level
);
2719 spin_lock(&pers_lock
);
2720 pers
= find_pers(LEVEL_NONE
, level
);
2721 if (!pers
|| !try_module_get(pers
->owner
)) {
2722 spin_unlock(&pers_lock
);
2723 printk(KERN_WARNING
"md: personality %s not loaded\n", level
);
2726 spin_unlock(&pers_lock
);
2728 if (pers
== mddev
->pers
) {
2729 /* Nothing to do! */
2730 module_put(pers
->owner
);
2733 if (!pers
->takeover
) {
2734 module_put(pers
->owner
);
2735 printk(KERN_WARNING
"md: %s: %s does not support personality takeover\n",
2736 mdname(mddev
), level
);
2740 /* ->takeover must set new_* and/or delta_disks
2741 * if it succeeds, and may set them when it fails.
2743 priv
= pers
->takeover(mddev
);
2745 mddev
->new_level
= mddev
->level
;
2746 mddev
->new_layout
= mddev
->layout
;
2747 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
2748 mddev
->raid_disks
-= mddev
->delta_disks
;
2749 mddev
->delta_disks
= 0;
2750 module_put(pers
->owner
);
2751 printk(KERN_WARNING
"md: %s: %s would not accept array\n",
2752 mdname(mddev
), level
);
2753 return PTR_ERR(priv
);
2756 /* Looks like we have a winner */
2757 mddev_suspend(mddev
);
2758 mddev
->pers
->stop(mddev
);
2759 module_put(mddev
->pers
->owner
);
2761 mddev
->private = priv
;
2762 strlcpy(mddev
->clevel
, pers
->name
, sizeof(mddev
->clevel
));
2763 mddev
->level
= mddev
->new_level
;
2764 mddev
->layout
= mddev
->new_layout
;
2765 mddev
->chunk_sectors
= mddev
->new_chunk_sectors
;
2766 mddev
->delta_disks
= 0;
2768 mddev_resume(mddev
);
2769 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
2770 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
2771 md_wakeup_thread(mddev
->thread
);
2775 static struct md_sysfs_entry md_level
=
2776 __ATTR(level
, S_IRUGO
|S_IWUSR
, level_show
, level_store
);
2780 layout_show(mddev_t
*mddev
, char *page
)
2782 /* just a number, not meaningful for all levels */
2783 if (mddev
->reshape_position
!= MaxSector
&&
2784 mddev
->layout
!= mddev
->new_layout
)
2785 return sprintf(page
, "%d (%d)\n",
2786 mddev
->new_layout
, mddev
->layout
);
2787 return sprintf(page
, "%d\n", mddev
->layout
);
2791 layout_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2794 unsigned long n
= simple_strtoul(buf
, &e
, 10);
2796 if (!*buf
|| (*e
&& *e
!= '\n'))
2801 if (mddev
->pers
->check_reshape
== NULL
)
2803 mddev
->new_layout
= n
;
2804 err
= mddev
->pers
->check_reshape(mddev
);
2806 mddev
->new_layout
= mddev
->layout
;
2810 mddev
->new_layout
= n
;
2811 if (mddev
->reshape_position
== MaxSector
)
2816 static struct md_sysfs_entry md_layout
=
2817 __ATTR(layout
, S_IRUGO
|S_IWUSR
, layout_show
, layout_store
);
2821 raid_disks_show(mddev_t
*mddev
, char *page
)
2823 if (mddev
->raid_disks
== 0)
2825 if (mddev
->reshape_position
!= MaxSector
&&
2826 mddev
->delta_disks
!= 0)
2827 return sprintf(page
, "%d (%d)\n", mddev
->raid_disks
,
2828 mddev
->raid_disks
- mddev
->delta_disks
);
2829 return sprintf(page
, "%d\n", mddev
->raid_disks
);
2832 static int update_raid_disks(mddev_t
*mddev
, int raid_disks
);
2835 raid_disks_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2839 unsigned long n
= simple_strtoul(buf
, &e
, 10);
2841 if (!*buf
|| (*e
&& *e
!= '\n'))
2845 rv
= update_raid_disks(mddev
, n
);
2846 else if (mddev
->reshape_position
!= MaxSector
) {
2847 int olddisks
= mddev
->raid_disks
- mddev
->delta_disks
;
2848 mddev
->delta_disks
= n
- olddisks
;
2849 mddev
->raid_disks
= n
;
2851 mddev
->raid_disks
= n
;
2852 return rv
? rv
: len
;
2854 static struct md_sysfs_entry md_raid_disks
=
2855 __ATTR(raid_disks
, S_IRUGO
|S_IWUSR
, raid_disks_show
, raid_disks_store
);
2858 chunk_size_show(mddev_t
*mddev
, char *page
)
2860 if (mddev
->reshape_position
!= MaxSector
&&
2861 mddev
->chunk_sectors
!= mddev
->new_chunk_sectors
)
2862 return sprintf(page
, "%d (%d)\n",
2863 mddev
->new_chunk_sectors
<< 9,
2864 mddev
->chunk_sectors
<< 9);
2865 return sprintf(page
, "%d\n", mddev
->chunk_sectors
<< 9);
2869 chunk_size_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2872 unsigned long n
= simple_strtoul(buf
, &e
, 10);
2874 if (!*buf
|| (*e
&& *e
!= '\n'))
2879 if (mddev
->pers
->check_reshape
== NULL
)
2881 mddev
->new_chunk_sectors
= n
>> 9;
2882 err
= mddev
->pers
->check_reshape(mddev
);
2884 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
2888 mddev
->new_chunk_sectors
= n
>> 9;
2889 if (mddev
->reshape_position
== MaxSector
)
2890 mddev
->chunk_sectors
= n
>> 9;
2894 static struct md_sysfs_entry md_chunk_size
=
2895 __ATTR(chunk_size
, S_IRUGO
|S_IWUSR
, chunk_size_show
, chunk_size_store
);
2898 resync_start_show(mddev_t
*mddev
, char *page
)
2900 if (mddev
->recovery_cp
== MaxSector
)
2901 return sprintf(page
, "none\n");
2902 return sprintf(page
, "%llu\n", (unsigned long long)mddev
->recovery_cp
);
2906 resync_start_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2909 unsigned long long n
= simple_strtoull(buf
, &e
, 10);
2913 if (!*buf
|| (*e
&& *e
!= '\n'))
2916 mddev
->recovery_cp
= n
;
2919 static struct md_sysfs_entry md_resync_start
=
2920 __ATTR(resync_start
, S_IRUGO
|S_IWUSR
, resync_start_show
, resync_start_store
);
2923 * The array state can be:
2926 * No devices, no size, no level
2927 * Equivalent to STOP_ARRAY ioctl
2929 * May have some settings, but array is not active
2930 * all IO results in error
2931 * When written, doesn't tear down array, but just stops it
2932 * suspended (not supported yet)
2933 * All IO requests will block. The array can be reconfigured.
2934 * Writing this, if accepted, will block until array is quiescent
2936 * no resync can happen. no superblocks get written.
2937 * write requests fail
2939 * like readonly, but behaves like 'clean' on a write request.
2941 * clean - no pending writes, but otherwise active.
2942 * When written to inactive array, starts without resync
2943 * If a write request arrives then
2944 * if metadata is known, mark 'dirty' and switch to 'active'.
2945 * if not known, block and switch to write-pending
2946 * If written to an active array that has pending writes, then fails.
2948 * fully active: IO and resync can be happening.
2949 * When written to inactive array, starts with resync
2952 * clean, but writes are blocked waiting for 'active' to be written.
2955 * like active, but no writes have been seen for a while (100msec).
2958 enum array_state
{ clear
, inactive
, suspended
, readonly
, read_auto
, clean
, active
,
2959 write_pending
, active_idle
, bad_word
};
2960 static char *array_states
[] = {
2961 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
2962 "write-pending", "active-idle", NULL
};
2964 static int match_word(const char *word
, char **list
)
2967 for (n
=0; list
[n
]; n
++)
2968 if (cmd_match(word
, list
[n
]))
2974 array_state_show(mddev_t
*mddev
, char *page
)
2976 enum array_state st
= inactive
;
2989 else if (test_bit(MD_CHANGE_CLEAN
, &mddev
->flags
))
2991 else if (mddev
->safemode
)
2997 if (list_empty(&mddev
->disks
) &&
2998 mddev
->raid_disks
== 0 &&
2999 mddev
->dev_sectors
== 0)
3004 return sprintf(page
, "%s\n", array_states
[st
]);
3007 static int do_md_stop(mddev_t
* mddev
, int ro
, int is_open
);
3008 static int do_md_run(mddev_t
* mddev
);
3009 static int restart_array(mddev_t
*mddev
);
3012 array_state_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3015 enum array_state st
= match_word(buf
, array_states
);
3020 /* stopping an active array */
3021 if (atomic_read(&mddev
->openers
) > 0)
3023 err
= do_md_stop(mddev
, 0, 0);
3026 /* stopping an active array */
3028 if (atomic_read(&mddev
->openers
) > 0)
3030 err
= do_md_stop(mddev
, 2, 0);
3032 err
= 0; /* already inactive */
3035 break; /* not supported yet */
3038 err
= do_md_stop(mddev
, 1, 0);
3041 set_disk_ro(mddev
->gendisk
, 1);
3042 err
= do_md_run(mddev
);
3048 err
= do_md_stop(mddev
, 1, 0);
3049 else if (mddev
->ro
== 1)
3050 err
= restart_array(mddev
);
3053 set_disk_ro(mddev
->gendisk
, 0);
3057 err
= do_md_run(mddev
);
3062 restart_array(mddev
);
3063 spin_lock_irq(&mddev
->write_lock
);
3064 if (atomic_read(&mddev
->writes_pending
) == 0) {
3065 if (mddev
->in_sync
== 0) {
3067 if (mddev
->safemode
== 1)
3068 mddev
->safemode
= 0;
3069 if (mddev
->persistent
)
3070 set_bit(MD_CHANGE_CLEAN
,
3076 spin_unlock_irq(&mddev
->write_lock
);
3082 restart_array(mddev
);
3083 if (mddev
->external
)
3084 clear_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
3085 wake_up(&mddev
->sb_wait
);
3089 set_disk_ro(mddev
->gendisk
, 0);
3090 err
= do_md_run(mddev
);
3095 /* these cannot be set */
3101 sysfs_notify_dirent(mddev
->sysfs_state
);
3105 static struct md_sysfs_entry md_array_state
=
3106 __ATTR(array_state
, S_IRUGO
|S_IWUSR
, array_state_show
, array_state_store
);
3109 null_show(mddev_t
*mddev
, char *page
)
3115 new_dev_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3117 /* buf must be %d:%d\n? giving major and minor numbers */
3118 /* The new device is added to the array.
3119 * If the array has a persistent superblock, we read the
3120 * superblock to initialise info and check validity.
3121 * Otherwise, only checking done is that in bind_rdev_to_array,
3122 * which mainly checks size.
3125 int major
= simple_strtoul(buf
, &e
, 10);
3131 if (!*buf
|| *e
!= ':' || !e
[1] || e
[1] == '\n')
3133 minor
= simple_strtoul(e
+1, &e
, 10);
3134 if (*e
&& *e
!= '\n')
3136 dev
= MKDEV(major
, minor
);
3137 if (major
!= MAJOR(dev
) ||
3138 minor
!= MINOR(dev
))
3142 if (mddev
->persistent
) {
3143 rdev
= md_import_device(dev
, mddev
->major_version
,
3144 mddev
->minor_version
);
3145 if (!IS_ERR(rdev
) && !list_empty(&mddev
->disks
)) {
3146 mdk_rdev_t
*rdev0
= list_entry(mddev
->disks
.next
,
3147 mdk_rdev_t
, same_set
);
3148 err
= super_types
[mddev
->major_version
]
3149 .load_super(rdev
, rdev0
, mddev
->minor_version
);
3153 } else if (mddev
->external
)
3154 rdev
= md_import_device(dev
, -2, -1);
3156 rdev
= md_import_device(dev
, -1, -1);
3159 return PTR_ERR(rdev
);
3160 err
= bind_rdev_to_array(rdev
, mddev
);
3164 return err
? err
: len
;
3167 static struct md_sysfs_entry md_new_device
=
3168 __ATTR(new_dev
, S_IWUSR
, null_show
, new_dev_store
);
3171 bitmap_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3174 unsigned long chunk
, end_chunk
;
3178 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
3180 chunk
= end_chunk
= simple_strtoul(buf
, &end
, 0);
3181 if (buf
== end
) break;
3182 if (*end
== '-') { /* range */
3184 end_chunk
= simple_strtoul(buf
, &end
, 0);
3185 if (buf
== end
) break;
3187 if (*end
&& !isspace(*end
)) break;
3188 bitmap_dirty_bits(mddev
->bitmap
, chunk
, end_chunk
);
3190 while (isspace(*buf
)) buf
++;
3192 bitmap_unplug(mddev
->bitmap
); /* flush the bits to disk */
3197 static struct md_sysfs_entry md_bitmap
=
3198 __ATTR(bitmap_set_bits
, S_IWUSR
, null_show
, bitmap_store
);
3201 size_show(mddev_t
*mddev
, char *page
)
3203 return sprintf(page
, "%llu\n",
3204 (unsigned long long)mddev
->dev_sectors
/ 2);
3207 static int update_size(mddev_t
*mddev
, sector_t num_sectors
);
3210 size_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3212 /* If array is inactive, we can reduce the component size, but
3213 * not increase it (except from 0).
3214 * If array is active, we can try an on-line resize
3217 int err
= strict_blocks_to_sectors(buf
, §ors
);
3222 err
= update_size(mddev
, sectors
);
3223 md_update_sb(mddev
, 1);
3225 if (mddev
->dev_sectors
== 0 ||
3226 mddev
->dev_sectors
> sectors
)
3227 mddev
->dev_sectors
= sectors
;
3231 return err
? err
: len
;
3234 static struct md_sysfs_entry md_size
=
3235 __ATTR(component_size
, S_IRUGO
|S_IWUSR
, size_show
, size_store
);
3240 * 'none' for arrays with no metadata (good luck...)
3241 * 'external' for arrays with externally managed metadata,
3242 * or N.M for internally known formats
3245 metadata_show(mddev_t
*mddev
, char *page
)
3247 if (mddev
->persistent
)
3248 return sprintf(page
, "%d.%d\n",
3249 mddev
->major_version
, mddev
->minor_version
);
3250 else if (mddev
->external
)
3251 return sprintf(page
, "external:%s\n", mddev
->metadata_type
);
3253 return sprintf(page
, "none\n");
3257 metadata_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3261 /* Changing the details of 'external' metadata is
3262 * always permitted. Otherwise there must be
3263 * no devices attached to the array.
3265 if (mddev
->external
&& strncmp(buf
, "external:", 9) == 0)
3267 else if (!list_empty(&mddev
->disks
))
3270 if (cmd_match(buf
, "none")) {
3271 mddev
->persistent
= 0;
3272 mddev
->external
= 0;
3273 mddev
->major_version
= 0;
3274 mddev
->minor_version
= 90;
3277 if (strncmp(buf
, "external:", 9) == 0) {
3278 size_t namelen
= len
-9;
3279 if (namelen
>= sizeof(mddev
->metadata_type
))
3280 namelen
= sizeof(mddev
->metadata_type
)-1;
3281 strncpy(mddev
->metadata_type
, buf
+9, namelen
);
3282 mddev
->metadata_type
[namelen
] = 0;
3283 if (namelen
&& mddev
->metadata_type
[namelen
-1] == '\n')
3284 mddev
->metadata_type
[--namelen
] = 0;
3285 mddev
->persistent
= 0;
3286 mddev
->external
= 1;
3287 mddev
->major_version
= 0;
3288 mddev
->minor_version
= 90;
3291 major
= simple_strtoul(buf
, &e
, 10);
3292 if (e
==buf
|| *e
!= '.')
3295 minor
= simple_strtoul(buf
, &e
, 10);
3296 if (e
==buf
|| (*e
&& *e
!= '\n') )
3298 if (major
>= ARRAY_SIZE(super_types
) || super_types
[major
].name
== NULL
)
3300 mddev
->major_version
= major
;
3301 mddev
->minor_version
= minor
;
3302 mddev
->persistent
= 1;
3303 mddev
->external
= 0;
3307 static struct md_sysfs_entry md_metadata
=
3308 __ATTR(metadata_version
, S_IRUGO
|S_IWUSR
, metadata_show
, metadata_store
);
3311 action_show(mddev_t
*mddev
, char *page
)
3313 char *type
= "idle";
3314 if (test_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
))
3316 else if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
) ||
3317 (!mddev
->ro
&& test_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
))) {
3318 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
3320 else if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
3321 if (!test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
))
3323 else if (test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
))
3327 } else if (test_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
))
3330 return sprintf(page
, "%s\n", type
);
3334 action_store(mddev_t
*mddev
, const char *page
, size_t len
)
3336 if (!mddev
->pers
|| !mddev
->pers
->sync_request
)
3339 if (cmd_match(page
, "frozen"))
3340 set_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
);
3342 clear_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
);
3344 if (cmd_match(page
, "idle") || cmd_match(page
, "frozen")) {
3345 if (mddev
->sync_thread
) {
3346 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
3347 md_unregister_thread(mddev
->sync_thread
);
3348 mddev
->sync_thread
= NULL
;
3349 mddev
->recovery
= 0;
3351 } else if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
) ||
3352 test_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
))
3354 else if (cmd_match(page
, "resync"))
3355 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
3356 else if (cmd_match(page
, "recover")) {
3357 set_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
);
3358 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
3359 } else if (cmd_match(page
, "reshape")) {
3361 if (mddev
->pers
->start_reshape
== NULL
)
3363 err
= mddev
->pers
->start_reshape(mddev
);
3366 sysfs_notify(&mddev
->kobj
, NULL
, "degraded");
3368 if (cmd_match(page
, "check"))
3369 set_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
3370 else if (!cmd_match(page
, "repair"))
3372 set_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
);
3373 set_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
3375 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
3376 md_wakeup_thread(mddev
->thread
);
3377 sysfs_notify_dirent(mddev
->sysfs_action
);
3382 mismatch_cnt_show(mddev_t
*mddev
, char *page
)
3384 return sprintf(page
, "%llu\n",
3385 (unsigned long long) mddev
->resync_mismatches
);
3388 static struct md_sysfs_entry md_scan_mode
=
3389 __ATTR(sync_action
, S_IRUGO
|S_IWUSR
, action_show
, action_store
);
3392 static struct md_sysfs_entry md_mismatches
= __ATTR_RO(mismatch_cnt
);
3395 sync_min_show(mddev_t
*mddev
, char *page
)
3397 return sprintf(page
, "%d (%s)\n", speed_min(mddev
),
3398 mddev
->sync_speed_min
? "local": "system");
3402 sync_min_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3406 if (strncmp(buf
, "system", 6)==0) {
3407 mddev
->sync_speed_min
= 0;
3410 min
= simple_strtoul(buf
, &e
, 10);
3411 if (buf
== e
|| (*e
&& *e
!= '\n') || min
<= 0)
3413 mddev
->sync_speed_min
= min
;
3417 static struct md_sysfs_entry md_sync_min
=
3418 __ATTR(sync_speed_min
, S_IRUGO
|S_IWUSR
, sync_min_show
, sync_min_store
);
3421 sync_max_show(mddev_t
*mddev
, char *page
)
3423 return sprintf(page
, "%d (%s)\n", speed_max(mddev
),
3424 mddev
->sync_speed_max
? "local": "system");
3428 sync_max_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3432 if (strncmp(buf
, "system", 6)==0) {
3433 mddev
->sync_speed_max
= 0;
3436 max
= simple_strtoul(buf
, &e
, 10);
3437 if (buf
== e
|| (*e
&& *e
!= '\n') || max
<= 0)
3439 mddev
->sync_speed_max
= max
;
3443 static struct md_sysfs_entry md_sync_max
=
3444 __ATTR(sync_speed_max
, S_IRUGO
|S_IWUSR
, sync_max_show
, sync_max_store
);
3447 degraded_show(mddev_t
*mddev
, char *page
)
3449 return sprintf(page
, "%d\n", mddev
->degraded
);
3451 static struct md_sysfs_entry md_degraded
= __ATTR_RO(degraded
);
3454 sync_force_parallel_show(mddev_t
*mddev
, char *page
)
3456 return sprintf(page
, "%d\n", mddev
->parallel_resync
);
3460 sync_force_parallel_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3464 if (strict_strtol(buf
, 10, &n
))
3467 if (n
!= 0 && n
!= 1)
3470 mddev
->parallel_resync
= n
;
3472 if (mddev
->sync_thread
)
3473 wake_up(&resync_wait
);
3478 /* force parallel resync, even with shared block devices */
3479 static struct md_sysfs_entry md_sync_force_parallel
=
3480 __ATTR(sync_force_parallel
, S_IRUGO
|S_IWUSR
,
3481 sync_force_parallel_show
, sync_force_parallel_store
);
3484 sync_speed_show(mddev_t
*mddev
, char *page
)
3486 unsigned long resync
, dt
, db
;
3487 if (mddev
->curr_resync
== 0)
3488 return sprintf(page
, "none\n");
3489 resync
= mddev
->curr_mark_cnt
- atomic_read(&mddev
->recovery_active
);
3490 dt
= (jiffies
- mddev
->resync_mark
) / HZ
;
3492 db
= resync
- mddev
->resync_mark_cnt
;
3493 return sprintf(page
, "%lu\n", db
/dt
/2); /* K/sec */
3496 static struct md_sysfs_entry md_sync_speed
= __ATTR_RO(sync_speed
);
3499 sync_completed_show(mddev_t
*mddev
, char *page
)
3501 unsigned long max_sectors
, resync
;
3503 if (!test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
))
3504 return sprintf(page
, "none\n");
3506 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
))
3507 max_sectors
= mddev
->resync_max_sectors
;
3509 max_sectors
= mddev
->dev_sectors
;
3511 resync
= mddev
->curr_resync_completed
;
3512 return sprintf(page
, "%lu / %lu\n", resync
, max_sectors
);
3515 static struct md_sysfs_entry md_sync_completed
= __ATTR_RO(sync_completed
);
3518 min_sync_show(mddev_t
*mddev
, char *page
)
3520 return sprintf(page
, "%llu\n",
3521 (unsigned long long)mddev
->resync_min
);
3524 min_sync_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3526 unsigned long long min
;
3527 if (strict_strtoull(buf
, 10, &min
))
3529 if (min
> mddev
->resync_max
)
3531 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
))
3534 /* Must be a multiple of chunk_size */
3535 if (mddev
->chunk_sectors
) {
3536 sector_t temp
= min
;
3537 if (sector_div(temp
, mddev
->chunk_sectors
))
3540 mddev
->resync_min
= min
;
3545 static struct md_sysfs_entry md_min_sync
=
3546 __ATTR(sync_min
, S_IRUGO
|S_IWUSR
, min_sync_show
, min_sync_store
);
3549 max_sync_show(mddev_t
*mddev
, char *page
)
3551 if (mddev
->resync_max
== MaxSector
)
3552 return sprintf(page
, "max\n");
3554 return sprintf(page
, "%llu\n",
3555 (unsigned long long)mddev
->resync_max
);
3558 max_sync_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3560 if (strncmp(buf
, "max", 3) == 0)
3561 mddev
->resync_max
= MaxSector
;
3563 unsigned long long max
;
3564 if (strict_strtoull(buf
, 10, &max
))
3566 if (max
< mddev
->resync_min
)
3568 if (max
< mddev
->resync_max
&&
3569 test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
))
3572 /* Must be a multiple of chunk_size */
3573 if (mddev
->chunk_sectors
) {
3574 sector_t temp
= max
;
3575 if (sector_div(temp
, mddev
->chunk_sectors
))
3578 mddev
->resync_max
= max
;
3580 wake_up(&mddev
->recovery_wait
);
3584 static struct md_sysfs_entry md_max_sync
=
3585 __ATTR(sync_max
, S_IRUGO
|S_IWUSR
, max_sync_show
, max_sync_store
);
3588 suspend_lo_show(mddev_t
*mddev
, char *page
)
3590 return sprintf(page
, "%llu\n", (unsigned long long)mddev
->suspend_lo
);
3594 suspend_lo_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3597 unsigned long long new = simple_strtoull(buf
, &e
, 10);
3599 if (mddev
->pers
->quiesce
== NULL
)
3601 if (buf
== e
|| (*e
&& *e
!= '\n'))
3603 if (new >= mddev
->suspend_hi
||
3604 (new > mddev
->suspend_lo
&& new < mddev
->suspend_hi
)) {
3605 mddev
->suspend_lo
= new;
3606 mddev
->pers
->quiesce(mddev
, 2);
3611 static struct md_sysfs_entry md_suspend_lo
=
3612 __ATTR(suspend_lo
, S_IRUGO
|S_IWUSR
, suspend_lo_show
, suspend_lo_store
);
3616 suspend_hi_show(mddev_t
*mddev
, char *page
)
3618 return sprintf(page
, "%llu\n", (unsigned long long)mddev
->suspend_hi
);
3622 suspend_hi_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3625 unsigned long long new = simple_strtoull(buf
, &e
, 10);
3627 if (mddev
->pers
->quiesce
== NULL
)
3629 if (buf
== e
|| (*e
&& *e
!= '\n'))
3631 if ((new <= mddev
->suspend_lo
&& mddev
->suspend_lo
>= mddev
->suspend_hi
) ||
3632 (new > mddev
->suspend_lo
&& new > mddev
->suspend_hi
)) {
3633 mddev
->suspend_hi
= new;
3634 mddev
->pers
->quiesce(mddev
, 1);
3635 mddev
->pers
->quiesce(mddev
, 0);
3640 static struct md_sysfs_entry md_suspend_hi
=
3641 __ATTR(suspend_hi
, S_IRUGO
|S_IWUSR
, suspend_hi_show
, suspend_hi_store
);
3644 reshape_position_show(mddev_t
*mddev
, char *page
)
3646 if (mddev
->reshape_position
!= MaxSector
)
3647 return sprintf(page
, "%llu\n",
3648 (unsigned long long)mddev
->reshape_position
);
3649 strcpy(page
, "none\n");
3654 reshape_position_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3657 unsigned long long new = simple_strtoull(buf
, &e
, 10);
3660 if (buf
== e
|| (*e
&& *e
!= '\n'))
3662 mddev
->reshape_position
= new;
3663 mddev
->delta_disks
= 0;
3664 mddev
->new_level
= mddev
->level
;
3665 mddev
->new_layout
= mddev
->layout
;
3666 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
3670 static struct md_sysfs_entry md_reshape_position
=
3671 __ATTR(reshape_position
, S_IRUGO
|S_IWUSR
, reshape_position_show
,
3672 reshape_position_store
);
3675 array_size_show(mddev_t
*mddev
, char *page
)
3677 if (mddev
->external_size
)
3678 return sprintf(page
, "%llu\n",
3679 (unsigned long long)mddev
->array_sectors
/2);
3681 return sprintf(page
, "default\n");
3685 array_size_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3689 if (strncmp(buf
, "default", 7) == 0) {
3691 sectors
= mddev
->pers
->size(mddev
, 0, 0);
3693 sectors
= mddev
->array_sectors
;
3695 mddev
->external_size
= 0;
3697 if (strict_blocks_to_sectors(buf
, §ors
) < 0)
3699 if (mddev
->pers
&& mddev
->pers
->size(mddev
, 0, 0) < sectors
)
3702 mddev
->external_size
= 1;
3705 mddev
->array_sectors
= sectors
;
3706 set_capacity(mddev
->gendisk
, mddev
->array_sectors
);
3708 struct block_device
*bdev
= bdget_disk(mddev
->gendisk
, 0);
3711 mutex_lock(&bdev
->bd_inode
->i_mutex
);
3712 i_size_write(bdev
->bd_inode
,
3713 (loff_t
)mddev
->array_sectors
<< 9);
3714 mutex_unlock(&bdev
->bd_inode
->i_mutex
);
3722 static struct md_sysfs_entry md_array_size
=
3723 __ATTR(array_size
, S_IRUGO
|S_IWUSR
, array_size_show
,
3726 static struct attribute
*md_default_attrs
[] = {
3729 &md_raid_disks
.attr
,
3730 &md_chunk_size
.attr
,
3732 &md_resync_start
.attr
,
3734 &md_new_device
.attr
,
3735 &md_safe_delay
.attr
,
3736 &md_array_state
.attr
,
3737 &md_reshape_position
.attr
,
3738 &md_array_size
.attr
,
3742 static struct attribute
*md_redundancy_attrs
[] = {
3744 &md_mismatches
.attr
,
3747 &md_sync_speed
.attr
,
3748 &md_sync_force_parallel
.attr
,
3749 &md_sync_completed
.attr
,
3752 &md_suspend_lo
.attr
,
3753 &md_suspend_hi
.attr
,
3758 static struct attribute_group md_redundancy_group
= {
3760 .attrs
= md_redundancy_attrs
,
3765 md_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
3767 struct md_sysfs_entry
*entry
= container_of(attr
, struct md_sysfs_entry
, attr
);
3768 mddev_t
*mddev
= container_of(kobj
, struct mddev_s
, kobj
);
3773 rv
= mddev_lock(mddev
);
3775 rv
= entry
->show(mddev
, page
);
3776 mddev_unlock(mddev
);
3782 md_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
3783 const char *page
, size_t length
)
3785 struct md_sysfs_entry
*entry
= container_of(attr
, struct md_sysfs_entry
, attr
);
3786 mddev_t
*mddev
= container_of(kobj
, struct mddev_s
, kobj
);
3791 if (!capable(CAP_SYS_ADMIN
))
3793 rv
= mddev_lock(mddev
);
3794 if (mddev
->hold_active
== UNTIL_IOCTL
)
3795 mddev
->hold_active
= 0;
3797 rv
= entry
->store(mddev
, page
, length
);
3798 mddev_unlock(mddev
);
3803 static void md_free(struct kobject
*ko
)
3805 mddev_t
*mddev
= container_of(ko
, mddev_t
, kobj
);
3807 if (mddev
->sysfs_state
)
3808 sysfs_put(mddev
->sysfs_state
);
3810 if (mddev
->gendisk
) {
3811 del_gendisk(mddev
->gendisk
);
3812 put_disk(mddev
->gendisk
);
3815 blk_cleanup_queue(mddev
->queue
);
3820 static struct sysfs_ops md_sysfs_ops
= {
3821 .show
= md_attr_show
,
3822 .store
= md_attr_store
,
3824 static struct kobj_type md_ktype
= {
3826 .sysfs_ops
= &md_sysfs_ops
,
3827 .default_attrs
= md_default_attrs
,
3832 static void mddev_delayed_delete(struct work_struct
*ws
)
3834 mddev_t
*mddev
= container_of(ws
, mddev_t
, del_work
);
3836 if (mddev
->private == &md_redundancy_group
) {
3837 sysfs_remove_group(&mddev
->kobj
, &md_redundancy_group
);
3838 if (mddev
->sysfs_action
)
3839 sysfs_put(mddev
->sysfs_action
);
3840 mddev
->sysfs_action
= NULL
;
3841 mddev
->private = NULL
;
3843 kobject_del(&mddev
->kobj
);
3844 kobject_put(&mddev
->kobj
);
3847 static int md_alloc(dev_t dev
, char *name
)
3849 static DEFINE_MUTEX(disks_mutex
);
3850 mddev_t
*mddev
= mddev_find(dev
);
3851 struct gendisk
*disk
;
3860 partitioned
= (MAJOR(mddev
->unit
) != MD_MAJOR
);
3861 shift
= partitioned
? MdpMinorShift
: 0;
3862 unit
= MINOR(mddev
->unit
) >> shift
;
3864 /* wait for any previous instance if this device
3865 * to be completed removed (mddev_delayed_delete).
3867 flush_scheduled_work();
3869 mutex_lock(&disks_mutex
);
3870 if (mddev
->gendisk
) {
3871 mutex_unlock(&disks_mutex
);
3877 /* Need to ensure that 'name' is not a duplicate.
3880 spin_lock(&all_mddevs_lock
);
3882 list_for_each_entry(mddev2
, &all_mddevs
, all_mddevs
)
3883 if (mddev2
->gendisk
&&
3884 strcmp(mddev2
->gendisk
->disk_name
, name
) == 0) {
3885 spin_unlock(&all_mddevs_lock
);
3888 spin_unlock(&all_mddevs_lock
);
3891 mddev
->queue
= blk_alloc_queue(GFP_KERNEL
);
3892 if (!mddev
->queue
) {
3893 mutex_unlock(&disks_mutex
);
3897 mddev
->queue
->queuedata
= mddev
;
3899 /* Can be unlocked because the queue is new: no concurrency */
3900 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER
, mddev
->queue
);
3902 blk_queue_make_request(mddev
->queue
, md_make_request
);
3904 disk
= alloc_disk(1 << shift
);
3906 mutex_unlock(&disks_mutex
);
3907 blk_cleanup_queue(mddev
->queue
);
3908 mddev
->queue
= NULL
;
3912 disk
->major
= MAJOR(mddev
->unit
);
3913 disk
->first_minor
= unit
<< shift
;
3915 strcpy(disk
->disk_name
, name
);
3916 else if (partitioned
)
3917 sprintf(disk
->disk_name
, "md_d%d", unit
);
3919 sprintf(disk
->disk_name
, "md%d", unit
);
3920 disk
->fops
= &md_fops
;
3921 disk
->private_data
= mddev
;
3922 disk
->queue
= mddev
->queue
;
3923 /* Allow extended partitions. This makes the
3924 * 'mdp' device redundant, but we can't really
3927 disk
->flags
|= GENHD_FL_EXT_DEVT
;
3929 mddev
->gendisk
= disk
;
3930 error
= kobject_init_and_add(&mddev
->kobj
, &md_ktype
,
3931 &disk_to_dev(disk
)->kobj
, "%s", "md");
3932 mutex_unlock(&disks_mutex
);
3934 printk(KERN_WARNING
"md: cannot register %s/md - name in use\n",
3937 kobject_uevent(&mddev
->kobj
, KOBJ_ADD
);
3938 mddev
->sysfs_state
= sysfs_get_dirent(mddev
->kobj
.sd
, "array_state");
3944 static struct kobject
*md_probe(dev_t dev
, int *part
, void *data
)
3946 md_alloc(dev
, NULL
);
3950 static int add_named_array(const char *val
, struct kernel_param
*kp
)
3952 /* val must be "md_*" where * is not all digits.
3953 * We allocate an array with a large free minor number, and
3954 * set the name to val. val must not already be an active name.
3956 int len
= strlen(val
);
3957 char buf
[DISK_NAME_LEN
];
3959 while (len
&& val
[len
-1] == '\n')
3961 if (len
>= DISK_NAME_LEN
)
3963 strlcpy(buf
, val
, len
+1);
3964 if (strncmp(buf
, "md_", 3) != 0)
3966 return md_alloc(0, buf
);
3969 static void md_safemode_timeout(unsigned long data
)
3971 mddev_t
*mddev
= (mddev_t
*) data
;
3973 if (!atomic_read(&mddev
->writes_pending
)) {
3974 mddev
->safemode
= 1;
3975 if (mddev
->external
)
3976 sysfs_notify_dirent(mddev
->sysfs_state
);
3978 md_wakeup_thread(mddev
->thread
);
3981 static int start_dirty_degraded
;
3983 static int do_md_run(mddev_t
* mddev
)
3988 struct gendisk
*disk
;
3989 struct mdk_personality
*pers
;
3990 char b
[BDEVNAME_SIZE
];
3992 if (list_empty(&mddev
->disks
))
3993 /* cannot run an array with no devices.. */
4000 * Analyze all RAID superblock(s)
4002 if (!mddev
->raid_disks
) {
4003 if (!mddev
->persistent
)
4008 chunk_size
= mddev
->chunk_sectors
<< 9;
4011 if (chunk_size
> MAX_CHUNK_SIZE
) {
4012 printk(KERN_ERR
"too big chunk_size: %d > %d\n",
4013 chunk_size
, MAX_CHUNK_SIZE
);
4016 /* devices must have minimum size of one chunk */
4017 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
4018 if (test_bit(Faulty
, &rdev
->flags
))
4020 if (rdev
->sectors
< chunk_size
/ 512) {
4022 "md: Dev %s smaller than chunk_size:"
4024 bdevname(rdev
->bdev
,b
),
4025 (unsigned long long)rdev
->sectors
,
4032 if (mddev
->level
!= LEVEL_NONE
)
4033 request_module("md-level-%d", mddev
->level
);
4034 else if (mddev
->clevel
[0])
4035 request_module("md-%s", mddev
->clevel
);
4038 * Drop all container device buffers, from now on
4039 * the only valid external interface is through the md
4042 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
4043 if (test_bit(Faulty
, &rdev
->flags
))
4045 sync_blockdev(rdev
->bdev
);
4046 invalidate_bdev(rdev
->bdev
);
4048 /* perform some consistency tests on the device.
4049 * We don't want the data to overlap the metadata,
4050 * Internal Bitmap issues have been handled elsewhere.
4052 if (rdev
->data_offset
< rdev
->sb_start
) {
4053 if (mddev
->dev_sectors
&&
4054 rdev
->data_offset
+ mddev
->dev_sectors
4056 printk("md: %s: data overlaps metadata\n",
4061 if (rdev
->sb_start
+ rdev
->sb_size
/512
4062 > rdev
->data_offset
) {
4063 printk("md: %s: metadata overlaps data\n",
4068 sysfs_notify_dirent(rdev
->sysfs_state
);
4071 md_probe(mddev
->unit
, NULL
, NULL
);
4072 disk
= mddev
->gendisk
;
4076 spin_lock(&pers_lock
);
4077 pers
= find_pers(mddev
->level
, mddev
->clevel
);
4078 if (!pers
|| !try_module_get(pers
->owner
)) {
4079 spin_unlock(&pers_lock
);
4080 if (mddev
->level
!= LEVEL_NONE
)
4081 printk(KERN_WARNING
"md: personality for level %d is not loaded!\n",
4084 printk(KERN_WARNING
"md: personality for level %s is not loaded!\n",
4089 spin_unlock(&pers_lock
);
4090 if (mddev
->level
!= pers
->level
) {
4091 mddev
->level
= pers
->level
;
4092 mddev
->new_level
= pers
->level
;
4094 strlcpy(mddev
->clevel
, pers
->name
, sizeof(mddev
->clevel
));
4096 if (pers
->level
>= 4 && pers
->level
<= 6)
4097 /* Cannot support integrity (yet) */
4098 blk_integrity_unregister(mddev
->gendisk
);
4100 if (mddev
->reshape_position
!= MaxSector
&&
4101 pers
->start_reshape
== NULL
) {
4102 /* This personality cannot handle reshaping... */
4104 module_put(pers
->owner
);
4108 if (pers
->sync_request
) {
4109 /* Warn if this is a potentially silly
4112 char b
[BDEVNAME_SIZE
], b2
[BDEVNAME_SIZE
];
4116 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
4117 list_for_each_entry(rdev2
, &mddev
->disks
, same_set
) {
4119 rdev
->bdev
->bd_contains
==
4120 rdev2
->bdev
->bd_contains
) {
4122 "%s: WARNING: %s appears to be"
4123 " on the same physical disk as"
4126 bdevname(rdev
->bdev
,b
),
4127 bdevname(rdev2
->bdev
,b2
));
4134 "True protection against single-disk"
4135 " failure might be compromised.\n");
4138 mddev
->recovery
= 0;
4139 /* may be over-ridden by personality */
4140 mddev
->resync_max_sectors
= mddev
->dev_sectors
;
4142 mddev
->barriers_work
= 1;
4143 mddev
->ok_start_degraded
= start_dirty_degraded
;
4146 mddev
->ro
= 2; /* read-only, but switch on first write */
4148 err
= mddev
->pers
->run(mddev
);
4150 printk(KERN_ERR
"md: pers->run() failed ...\n");
4151 else if (mddev
->pers
->size(mddev
, 0, 0) < mddev
->array_sectors
) {
4152 WARN_ONCE(!mddev
->external_size
, "%s: default size too small,"
4153 " but 'external_size' not in effect?\n", __func__
);
4155 "md: invalid array_size %llu > default size %llu\n",
4156 (unsigned long long)mddev
->array_sectors
/ 2,
4157 (unsigned long long)mddev
->pers
->size(mddev
, 0, 0) / 2);
4159 mddev
->pers
->stop(mddev
);
4161 if (err
== 0 && mddev
->pers
->sync_request
) {
4162 err
= bitmap_create(mddev
);
4164 printk(KERN_ERR
"%s: failed to create bitmap (%d)\n",
4165 mdname(mddev
), err
);
4166 mddev
->pers
->stop(mddev
);
4170 module_put(mddev
->pers
->owner
);
4172 bitmap_destroy(mddev
);
4175 if (mddev
->pers
->sync_request
) {
4176 if (sysfs_create_group(&mddev
->kobj
, &md_redundancy_group
))
4178 "md: cannot register extra attributes for %s\n",
4180 mddev
->sysfs_action
= sysfs_get_dirent(mddev
->kobj
.sd
, "sync_action");
4181 } else if (mddev
->ro
== 2) /* auto-readonly not meaningful */
4184 atomic_set(&mddev
->writes_pending
,0);
4185 mddev
->safemode
= 0;
4186 mddev
->safemode_timer
.function
= md_safemode_timeout
;
4187 mddev
->safemode_timer
.data
= (unsigned long) mddev
;
4188 mddev
->safemode_delay
= (200 * HZ
)/1000 +1; /* 200 msec delay */
4191 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
4192 if (rdev
->raid_disk
>= 0) {
4194 sprintf(nm
, "rd%d", rdev
->raid_disk
);
4195 if (sysfs_create_link(&mddev
->kobj
, &rdev
->kobj
, nm
))
4196 printk("md: cannot register %s for %s\n",
4200 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
4203 md_update_sb(mddev
, 0);
4205 set_capacity(disk
, mddev
->array_sectors
);
4207 /* If there is a partially-recovered drive we need to
4208 * start recovery here. If we leave it to md_check_recovery,
4209 * it will remove the drives and not do the right thing
4211 if (mddev
->degraded
&& !mddev
->sync_thread
) {
4213 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
4214 if (rdev
->raid_disk
>= 0 &&
4215 !test_bit(In_sync
, &rdev
->flags
) &&
4216 !test_bit(Faulty
, &rdev
->flags
))
4217 /* complete an interrupted recovery */
4219 if (spares
&& mddev
->pers
->sync_request
) {
4220 mddev
->recovery
= 0;
4221 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
4222 mddev
->sync_thread
= md_register_thread(md_do_sync
,
4225 if (!mddev
->sync_thread
) {
4226 printk(KERN_ERR
"%s: could not start resync"
4229 /* leave the spares where they are, it shouldn't hurt */
4230 mddev
->recovery
= 0;
4234 md_wakeup_thread(mddev
->thread
);
4235 md_wakeup_thread(mddev
->sync_thread
); /* possibly kick off a reshape */
4238 md_new_event(mddev
);
4239 sysfs_notify_dirent(mddev
->sysfs_state
);
4240 if (mddev
->sysfs_action
)
4241 sysfs_notify_dirent(mddev
->sysfs_action
);
4242 sysfs_notify(&mddev
->kobj
, NULL
, "degraded");
4243 kobject_uevent(&disk_to_dev(mddev
->gendisk
)->kobj
, KOBJ_CHANGE
);
4247 static int restart_array(mddev_t
*mddev
)
4249 struct gendisk
*disk
= mddev
->gendisk
;
4251 /* Complain if it has no devices */
4252 if (list_empty(&mddev
->disks
))
4258 mddev
->safemode
= 0;
4260 set_disk_ro(disk
, 0);
4261 printk(KERN_INFO
"md: %s switched to read-write mode.\n",
4263 /* Kick recovery or resync if necessary */
4264 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
4265 md_wakeup_thread(mddev
->thread
);
4266 md_wakeup_thread(mddev
->sync_thread
);
4267 sysfs_notify_dirent(mddev
->sysfs_state
);
4271 /* similar to deny_write_access, but accounts for our holding a reference
4272 * to the file ourselves */
4273 static int deny_bitmap_write_access(struct file
* file
)
4275 struct inode
*inode
= file
->f_mapping
->host
;
4277 spin_lock(&inode
->i_lock
);
4278 if (atomic_read(&inode
->i_writecount
) > 1) {
4279 spin_unlock(&inode
->i_lock
);
4282 atomic_set(&inode
->i_writecount
, -1);
4283 spin_unlock(&inode
->i_lock
);
4288 static void restore_bitmap_write_access(struct file
*file
)
4290 struct inode
*inode
= file
->f_mapping
->host
;
4292 spin_lock(&inode
->i_lock
);
4293 atomic_set(&inode
->i_writecount
, 1);
4294 spin_unlock(&inode
->i_lock
);
4298 * 0 - completely stop and dis-assemble array
4299 * 1 - switch to readonly
4300 * 2 - stop but do not disassemble array
4302 static int do_md_stop(mddev_t
* mddev
, int mode
, int is_open
)
4305 struct gendisk
*disk
= mddev
->gendisk
;
4308 if (atomic_read(&mddev
->openers
) > is_open
) {
4309 printk("md: %s still in use.\n",mdname(mddev
));
4315 if (mddev
->sync_thread
) {
4316 set_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
);
4317 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
4318 md_unregister_thread(mddev
->sync_thread
);
4319 mddev
->sync_thread
= NULL
;
4322 del_timer_sync(&mddev
->safemode_timer
);
4325 case 1: /* readonly */
4331 case 0: /* disassemble */
4333 bitmap_flush(mddev
);
4334 md_super_wait(mddev
);
4336 set_disk_ro(disk
, 0);
4338 mddev
->pers
->stop(mddev
);
4339 mddev
->queue
->merge_bvec_fn
= NULL
;
4340 mddev
->queue
->unplug_fn
= NULL
;
4341 mddev
->queue
->backing_dev_info
.congested_fn
= NULL
;
4342 module_put(mddev
->pers
->owner
);
4343 if (mddev
->pers
->sync_request
)
4344 mddev
->private = &md_redundancy_group
;
4346 /* tell userspace to handle 'inactive' */
4347 sysfs_notify_dirent(mddev
->sysfs_state
);
4349 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
4350 if (rdev
->raid_disk
>= 0) {
4352 sprintf(nm
, "rd%d", rdev
->raid_disk
);
4353 sysfs_remove_link(&mddev
->kobj
, nm
);
4356 set_capacity(disk
, 0);
4362 if (!mddev
->in_sync
|| mddev
->flags
) {
4363 /* mark array as shutdown cleanly */
4365 md_update_sb(mddev
, 1);
4368 set_disk_ro(disk
, 1);
4369 clear_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
);
4373 * Free resources if final stop
4377 printk(KERN_INFO
"md: %s stopped.\n", mdname(mddev
));
4379 bitmap_destroy(mddev
);
4380 if (mddev
->bitmap_file
) {
4381 restore_bitmap_write_access(mddev
->bitmap_file
);
4382 fput(mddev
->bitmap_file
);
4383 mddev
->bitmap_file
= NULL
;
4385 mddev
->bitmap_offset
= 0;
4387 /* make sure all md_delayed_delete calls have finished */
4388 flush_scheduled_work();
4390 export_array(mddev
);
4392 mddev
->array_sectors
= 0;
4393 mddev
->external_size
= 0;
4394 mddev
->dev_sectors
= 0;
4395 mddev
->raid_disks
= 0;
4396 mddev
->recovery_cp
= 0;
4397 mddev
->resync_min
= 0;
4398 mddev
->resync_max
= MaxSector
;
4399 mddev
->reshape_position
= MaxSector
;
4400 mddev
->external
= 0;
4401 mddev
->persistent
= 0;
4402 mddev
->level
= LEVEL_NONE
;
4403 mddev
->clevel
[0] = 0;
4406 mddev
->metadata_type
[0] = 0;
4407 mddev
->chunk_sectors
= 0;
4408 mddev
->ctime
= mddev
->utime
= 0;
4410 mddev
->max_disks
= 0;
4412 mddev
->delta_disks
= 0;
4413 mddev
->new_level
= LEVEL_NONE
;
4414 mddev
->new_layout
= 0;
4415 mddev
->new_chunk_sectors
= 0;
4416 mddev
->curr_resync
= 0;
4417 mddev
->resync_mismatches
= 0;
4418 mddev
->suspend_lo
= mddev
->suspend_hi
= 0;
4419 mddev
->sync_speed_min
= mddev
->sync_speed_max
= 0;
4420 mddev
->recovery
= 0;
4423 mddev
->degraded
= 0;
4424 mddev
->barriers_work
= 0;
4425 mddev
->safemode
= 0;
4426 kobject_uevent(&disk_to_dev(mddev
->gendisk
)->kobj
, KOBJ_CHANGE
);
4427 if (mddev
->hold_active
== UNTIL_STOP
)
4428 mddev
->hold_active
= 0;
4430 } else if (mddev
->pers
)
4431 printk(KERN_INFO
"md: %s switched to read-only mode.\n",
4434 blk_integrity_unregister(disk
);
4435 md_new_event(mddev
);
4436 sysfs_notify_dirent(mddev
->sysfs_state
);
4442 static void autorun_array(mddev_t
*mddev
)
4447 if (list_empty(&mddev
->disks
))
4450 printk(KERN_INFO
"md: running: ");
4452 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
4453 char b
[BDEVNAME_SIZE
];
4454 printk("<%s>", bdevname(rdev
->bdev
,b
));
4458 err
= do_md_run(mddev
);
4460 printk(KERN_WARNING
"md: do_md_run() returned %d\n", err
);
4461 do_md_stop(mddev
, 0, 0);
4466 * lets try to run arrays based on all disks that have arrived
4467 * until now. (those are in pending_raid_disks)
4469 * the method: pick the first pending disk, collect all disks with
4470 * the same UUID, remove all from the pending list and put them into
4471 * the 'same_array' list. Then order this list based on superblock
4472 * update time (freshest comes first), kick out 'old' disks and
4473 * compare superblocks. If everything's fine then run it.
4475 * If "unit" is allocated, then bump its reference count
4477 static void autorun_devices(int part
)
4479 mdk_rdev_t
*rdev0
, *rdev
, *tmp
;
4481 char b
[BDEVNAME_SIZE
];
4483 printk(KERN_INFO
"md: autorun ...\n");
4484 while (!list_empty(&pending_raid_disks
)) {
4487 LIST_HEAD(candidates
);
4488 rdev0
= list_entry(pending_raid_disks
.next
,
4489 mdk_rdev_t
, same_set
);
4491 printk(KERN_INFO
"md: considering %s ...\n",
4492 bdevname(rdev0
->bdev
,b
));
4493 INIT_LIST_HEAD(&candidates
);
4494 rdev_for_each_list(rdev
, tmp
, &pending_raid_disks
)
4495 if (super_90_load(rdev
, rdev0
, 0) >= 0) {
4496 printk(KERN_INFO
"md: adding %s ...\n",
4497 bdevname(rdev
->bdev
,b
));
4498 list_move(&rdev
->same_set
, &candidates
);
4501 * now we have a set of devices, with all of them having
4502 * mostly sane superblocks. It's time to allocate the
4506 dev
= MKDEV(mdp_major
,
4507 rdev0
->preferred_minor
<< MdpMinorShift
);
4508 unit
= MINOR(dev
) >> MdpMinorShift
;
4510 dev
= MKDEV(MD_MAJOR
, rdev0
->preferred_minor
);
4513 if (rdev0
->preferred_minor
!= unit
) {
4514 printk(KERN_INFO
"md: unit number in %s is bad: %d\n",
4515 bdevname(rdev0
->bdev
, b
), rdev0
->preferred_minor
);
4519 md_probe(dev
, NULL
, NULL
);
4520 mddev
= mddev_find(dev
);
4521 if (!mddev
|| !mddev
->gendisk
) {
4525 "md: cannot allocate memory for md drive.\n");
4528 if (mddev_lock(mddev
))
4529 printk(KERN_WARNING
"md: %s locked, cannot run\n",
4531 else if (mddev
->raid_disks
|| mddev
->major_version
4532 || !list_empty(&mddev
->disks
)) {
4534 "md: %s already running, cannot run %s\n",
4535 mdname(mddev
), bdevname(rdev0
->bdev
,b
));
4536 mddev_unlock(mddev
);
4538 printk(KERN_INFO
"md: created %s\n", mdname(mddev
));
4539 mddev
->persistent
= 1;
4540 rdev_for_each_list(rdev
, tmp
, &candidates
) {
4541 list_del_init(&rdev
->same_set
);
4542 if (bind_rdev_to_array(rdev
, mddev
))
4545 autorun_array(mddev
);
4546 mddev_unlock(mddev
);
4548 /* on success, candidates will be empty, on error
4551 rdev_for_each_list(rdev
, tmp
, &candidates
) {
4552 list_del_init(&rdev
->same_set
);
4557 printk(KERN_INFO
"md: ... autorun DONE.\n");
4559 #endif /* !MODULE */
4561 static int get_version(void __user
* arg
)
4565 ver
.major
= MD_MAJOR_VERSION
;
4566 ver
.minor
= MD_MINOR_VERSION
;
4567 ver
.patchlevel
= MD_PATCHLEVEL_VERSION
;
4569 if (copy_to_user(arg
, &ver
, sizeof(ver
)))
4575 static int get_array_info(mddev_t
* mddev
, void __user
* arg
)
4577 mdu_array_info_t info
;
4578 int nr
,working
,active
,failed
,spare
;
4581 nr
=working
=active
=failed
=spare
=0;
4582 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
4584 if (test_bit(Faulty
, &rdev
->flags
))
4588 if (test_bit(In_sync
, &rdev
->flags
))
4595 info
.major_version
= mddev
->major_version
;
4596 info
.minor_version
= mddev
->minor_version
;
4597 info
.patch_version
= MD_PATCHLEVEL_VERSION
;
4598 info
.ctime
= mddev
->ctime
;
4599 info
.level
= mddev
->level
;
4600 info
.size
= mddev
->dev_sectors
/ 2;
4601 if (info
.size
!= mddev
->dev_sectors
/ 2) /* overflow */
4604 info
.raid_disks
= mddev
->raid_disks
;
4605 info
.md_minor
= mddev
->md_minor
;
4606 info
.not_persistent
= !mddev
->persistent
;
4608 info
.utime
= mddev
->utime
;
4611 info
.state
= (1<<MD_SB_CLEAN
);
4612 if (mddev
->bitmap
&& mddev
->bitmap_offset
)
4613 info
.state
= (1<<MD_SB_BITMAP_PRESENT
);
4614 info
.active_disks
= active
;
4615 info
.working_disks
= working
;
4616 info
.failed_disks
= failed
;
4617 info
.spare_disks
= spare
;
4619 info
.layout
= mddev
->layout
;
4620 info
.chunk_size
= mddev
->chunk_sectors
<< 9;
4622 if (copy_to_user(arg
, &info
, sizeof(info
)))
4628 static int get_bitmap_file(mddev_t
* mddev
, void __user
* arg
)
4630 mdu_bitmap_file_t
*file
= NULL
; /* too big for stack allocation */
4631 char *ptr
, *buf
= NULL
;
4634 if (md_allow_write(mddev
))
4635 file
= kmalloc(sizeof(*file
), GFP_NOIO
);
4637 file
= kmalloc(sizeof(*file
), GFP_KERNEL
);
4642 /* bitmap disabled, zero the first byte and copy out */
4643 if (!mddev
->bitmap
|| !mddev
->bitmap
->file
) {
4644 file
->pathname
[0] = '\0';
4648 buf
= kmalloc(sizeof(file
->pathname
), GFP_KERNEL
);
4652 ptr
= d_path(&mddev
->bitmap
->file
->f_path
, buf
, sizeof(file
->pathname
));
4656 strcpy(file
->pathname
, ptr
);
4660 if (copy_to_user(arg
, file
, sizeof(*file
)))
4668 static int get_disk_info(mddev_t
* mddev
, void __user
* arg
)
4670 mdu_disk_info_t info
;
4673 if (copy_from_user(&info
, arg
, sizeof(info
)))
4676 rdev
= find_rdev_nr(mddev
, info
.number
);
4678 info
.major
= MAJOR(rdev
->bdev
->bd_dev
);
4679 info
.minor
= MINOR(rdev
->bdev
->bd_dev
);
4680 info
.raid_disk
= rdev
->raid_disk
;
4682 if (test_bit(Faulty
, &rdev
->flags
))
4683 info
.state
|= (1<<MD_DISK_FAULTY
);
4684 else if (test_bit(In_sync
, &rdev
->flags
)) {
4685 info
.state
|= (1<<MD_DISK_ACTIVE
);
4686 info
.state
|= (1<<MD_DISK_SYNC
);
4688 if (test_bit(WriteMostly
, &rdev
->flags
))
4689 info
.state
|= (1<<MD_DISK_WRITEMOSTLY
);
4691 info
.major
= info
.minor
= 0;
4692 info
.raid_disk
= -1;
4693 info
.state
= (1<<MD_DISK_REMOVED
);
4696 if (copy_to_user(arg
, &info
, sizeof(info
)))
4702 static int add_new_disk(mddev_t
* mddev
, mdu_disk_info_t
*info
)
4704 char b
[BDEVNAME_SIZE
], b2
[BDEVNAME_SIZE
];
4706 dev_t dev
= MKDEV(info
->major
,info
->minor
);
4708 if (info
->major
!= MAJOR(dev
) || info
->minor
!= MINOR(dev
))
4711 if (!mddev
->raid_disks
) {
4713 /* expecting a device which has a superblock */
4714 rdev
= md_import_device(dev
, mddev
->major_version
, mddev
->minor_version
);
4717 "md: md_import_device returned %ld\n",
4719 return PTR_ERR(rdev
);
4721 if (!list_empty(&mddev
->disks
)) {
4722 mdk_rdev_t
*rdev0
= list_entry(mddev
->disks
.next
,
4723 mdk_rdev_t
, same_set
);
4724 int err
= super_types
[mddev
->major_version
]
4725 .load_super(rdev
, rdev0
, mddev
->minor_version
);
4728 "md: %s has different UUID to %s\n",
4729 bdevname(rdev
->bdev
,b
),
4730 bdevname(rdev0
->bdev
,b2
));
4735 err
= bind_rdev_to_array(rdev
, mddev
);
4742 * add_new_disk can be used once the array is assembled
4743 * to add "hot spares". They must already have a superblock
4748 if (!mddev
->pers
->hot_add_disk
) {
4750 "%s: personality does not support diskops!\n",
4754 if (mddev
->persistent
)
4755 rdev
= md_import_device(dev
, mddev
->major_version
,
4756 mddev
->minor_version
);
4758 rdev
= md_import_device(dev
, -1, -1);
4761 "md: md_import_device returned %ld\n",
4763 return PTR_ERR(rdev
);
4765 /* set save_raid_disk if appropriate */
4766 if (!mddev
->persistent
) {
4767 if (info
->state
& (1<<MD_DISK_SYNC
) &&
4768 info
->raid_disk
< mddev
->raid_disks
)
4769 rdev
->raid_disk
= info
->raid_disk
;
4771 rdev
->raid_disk
= -1;
4773 super_types
[mddev
->major_version
].
4774 validate_super(mddev
, rdev
);
4775 rdev
->saved_raid_disk
= rdev
->raid_disk
;
4777 clear_bit(In_sync
, &rdev
->flags
); /* just to be sure */
4778 if (info
->state
& (1<<MD_DISK_WRITEMOSTLY
))
4779 set_bit(WriteMostly
, &rdev
->flags
);
4781 clear_bit(WriteMostly
, &rdev
->flags
);
4783 rdev
->raid_disk
= -1;
4784 err
= bind_rdev_to_array(rdev
, mddev
);
4785 if (!err
&& !mddev
->pers
->hot_remove_disk
) {
4786 /* If there is hot_add_disk but no hot_remove_disk
4787 * then added disks for geometry changes,
4788 * and should be added immediately.
4790 super_types
[mddev
->major_version
].
4791 validate_super(mddev
, rdev
);
4792 err
= mddev
->pers
->hot_add_disk(mddev
, rdev
);
4794 unbind_rdev_from_array(rdev
);
4799 sysfs_notify_dirent(rdev
->sysfs_state
);
4801 md_update_sb(mddev
, 1);
4802 if (mddev
->degraded
)
4803 set_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
);
4804 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
4805 md_wakeup_thread(mddev
->thread
);
4809 /* otherwise, add_new_disk is only allowed
4810 * for major_version==0 superblocks
4812 if (mddev
->major_version
!= 0) {
4813 printk(KERN_WARNING
"%s: ADD_NEW_DISK not supported\n",
4818 if (!(info
->state
& (1<<MD_DISK_FAULTY
))) {
4820 rdev
= md_import_device(dev
, -1, 0);
4823 "md: error, md_import_device() returned %ld\n",
4825 return PTR_ERR(rdev
);
4827 rdev
->desc_nr
= info
->number
;
4828 if (info
->raid_disk
< mddev
->raid_disks
)
4829 rdev
->raid_disk
= info
->raid_disk
;
4831 rdev
->raid_disk
= -1;
4833 if (rdev
->raid_disk
< mddev
->raid_disks
)
4834 if (info
->state
& (1<<MD_DISK_SYNC
))
4835 set_bit(In_sync
, &rdev
->flags
);
4837 if (info
->state
& (1<<MD_DISK_WRITEMOSTLY
))
4838 set_bit(WriteMostly
, &rdev
->flags
);
4840 if (!mddev
->persistent
) {
4841 printk(KERN_INFO
"md: nonpersistent superblock ...\n");
4842 rdev
->sb_start
= rdev
->bdev
->bd_inode
->i_size
/ 512;
4844 rdev
->sb_start
= calc_dev_sboffset(rdev
->bdev
);
4845 rdev
->sectors
= calc_num_sectors(rdev
,
4846 mddev
->chunk_sectors
<< 9);
4848 err
= bind_rdev_to_array(rdev
, mddev
);
4858 static int hot_remove_disk(mddev_t
* mddev
, dev_t dev
)
4860 char b
[BDEVNAME_SIZE
];
4863 rdev
= find_rdev(mddev
, dev
);
4867 if (rdev
->raid_disk
>= 0)
4870 kick_rdev_from_array(rdev
);
4871 md_update_sb(mddev
, 1);
4872 md_new_event(mddev
);
4876 printk(KERN_WARNING
"md: cannot remove active disk %s from %s ...\n",
4877 bdevname(rdev
->bdev
,b
), mdname(mddev
));
4881 static int hot_add_disk(mddev_t
* mddev
, dev_t dev
)
4883 char b
[BDEVNAME_SIZE
];
4890 if (mddev
->major_version
!= 0) {
4891 printk(KERN_WARNING
"%s: HOT_ADD may only be used with"
4892 " version-0 superblocks.\n",
4896 if (!mddev
->pers
->hot_add_disk
) {
4898 "%s: personality does not support diskops!\n",
4903 rdev
= md_import_device(dev
, -1, 0);
4906 "md: error, md_import_device() returned %ld\n",
4911 if (mddev
->persistent
)
4912 rdev
->sb_start
= calc_dev_sboffset(rdev
->bdev
);
4914 rdev
->sb_start
= rdev
->bdev
->bd_inode
->i_size
/ 512;
4916 rdev
->sectors
= calc_num_sectors(rdev
, mddev
->chunk_sectors
<< 9);
4918 if (test_bit(Faulty
, &rdev
->flags
)) {
4920 "md: can not hot-add faulty %s disk to %s!\n",
4921 bdevname(rdev
->bdev
,b
), mdname(mddev
));
4925 clear_bit(In_sync
, &rdev
->flags
);
4927 rdev
->saved_raid_disk
= -1;
4928 err
= bind_rdev_to_array(rdev
, mddev
);
4933 * The rest should better be atomic, we can have disk failures
4934 * noticed in interrupt contexts ...
4937 rdev
->raid_disk
= -1;
4939 md_update_sb(mddev
, 1);
4942 * Kick recovery, maybe this spare has to be added to the
4943 * array immediately.
4945 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
4946 md_wakeup_thread(mddev
->thread
);
4947 md_new_event(mddev
);
4955 static int set_bitmap_file(mddev_t
*mddev
, int fd
)
4960 if (!mddev
->pers
->quiesce
)
4962 if (mddev
->recovery
|| mddev
->sync_thread
)
4964 /* we should be able to change the bitmap.. */
4970 return -EEXIST
; /* cannot add when bitmap is present */
4971 mddev
->bitmap_file
= fget(fd
);
4973 if (mddev
->bitmap_file
== NULL
) {
4974 printk(KERN_ERR
"%s: error: failed to get bitmap file\n",
4979 err
= deny_bitmap_write_access(mddev
->bitmap_file
);
4981 printk(KERN_ERR
"%s: error: bitmap file is already in use\n",
4983 fput(mddev
->bitmap_file
);
4984 mddev
->bitmap_file
= NULL
;
4987 mddev
->bitmap_offset
= 0; /* file overrides offset */
4988 } else if (mddev
->bitmap
== NULL
)
4989 return -ENOENT
; /* cannot remove what isn't there */
4992 mddev
->pers
->quiesce(mddev
, 1);
4994 err
= bitmap_create(mddev
);
4995 if (fd
< 0 || err
) {
4996 bitmap_destroy(mddev
);
4997 fd
= -1; /* make sure to put the file */
4999 mddev
->pers
->quiesce(mddev
, 0);
5002 if (mddev
->bitmap_file
) {
5003 restore_bitmap_write_access(mddev
->bitmap_file
);
5004 fput(mddev
->bitmap_file
);
5006 mddev
->bitmap_file
= NULL
;
5013 * set_array_info is used two different ways
5014 * The original usage is when creating a new array.
5015 * In this usage, raid_disks is > 0 and it together with
5016 * level, size, not_persistent,layout,chunksize determine the
5017 * shape of the array.
5018 * This will always create an array with a type-0.90.0 superblock.
5019 * The newer usage is when assembling an array.
5020 * In this case raid_disks will be 0, and the major_version field is
5021 * use to determine which style super-blocks are to be found on the devices.
5022 * The minor and patch _version numbers are also kept incase the
5023 * super_block handler wishes to interpret them.
5025 static int set_array_info(mddev_t
* mddev
, mdu_array_info_t
*info
)
5028 if (info
->raid_disks
== 0) {
5029 /* just setting version number for superblock loading */
5030 if (info
->major_version
< 0 ||
5031 info
->major_version
>= ARRAY_SIZE(super_types
) ||
5032 super_types
[info
->major_version
].name
== NULL
) {
5033 /* maybe try to auto-load a module? */
5035 "md: superblock version %d not known\n",
5036 info
->major_version
);
5039 mddev
->major_version
= info
->major_version
;
5040 mddev
->minor_version
= info
->minor_version
;
5041 mddev
->patch_version
= info
->patch_version
;
5042 mddev
->persistent
= !info
->not_persistent
;
5045 mddev
->major_version
= MD_MAJOR_VERSION
;
5046 mddev
->minor_version
= MD_MINOR_VERSION
;
5047 mddev
->patch_version
= MD_PATCHLEVEL_VERSION
;
5048 mddev
->ctime
= get_seconds();
5050 mddev
->level
= info
->level
;
5051 mddev
->clevel
[0] = 0;
5052 mddev
->dev_sectors
= 2 * (sector_t
)info
->size
;
5053 mddev
->raid_disks
= info
->raid_disks
;
5054 /* don't set md_minor, it is determined by which /dev/md* was
5057 if (info
->state
& (1<<MD_SB_CLEAN
))
5058 mddev
->recovery_cp
= MaxSector
;
5060 mddev
->recovery_cp
= 0;
5061 mddev
->persistent
= ! info
->not_persistent
;
5062 mddev
->external
= 0;
5064 mddev
->layout
= info
->layout
;
5065 mddev
->chunk_sectors
= info
->chunk_size
>> 9;
5067 mddev
->max_disks
= MD_SB_DISKS
;
5069 if (mddev
->persistent
)
5071 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
5073 mddev
->default_bitmap_offset
= MD_SB_BYTES
>> 9;
5074 mddev
->bitmap_offset
= 0;
5076 mddev
->reshape_position
= MaxSector
;
5079 * Generate a 128 bit UUID
5081 get_random_bytes(mddev
->uuid
, 16);
5083 mddev
->new_level
= mddev
->level
;
5084 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
5085 mddev
->new_layout
= mddev
->layout
;
5086 mddev
->delta_disks
= 0;
5091 void md_set_array_sectors(mddev_t
*mddev
, sector_t array_sectors
)
5093 WARN(!mddev_is_locked(mddev
), "%s: unlocked mddev!\n", __func__
);
5095 if (mddev
->external_size
)
5098 mddev
->array_sectors
= array_sectors
;
5100 EXPORT_SYMBOL(md_set_array_sectors
);
5102 static int update_size(mddev_t
*mddev
, sector_t num_sectors
)
5106 int fit
= (num_sectors
== 0);
5108 if (mddev
->pers
->resize
== NULL
)
5110 /* The "num_sectors" is the number of sectors of each device that
5111 * is used. This can only make sense for arrays with redundancy.
5112 * linear and raid0 always use whatever space is available. We can only
5113 * consider changing this number if no resync or reconstruction is
5114 * happening, and if the new size is acceptable. It must fit before the
5115 * sb_start or, if that is <data_offset, it must fit before the size
5116 * of each device. If num_sectors is zero, we find the largest size
5120 if (mddev
->sync_thread
)
5123 /* Sorry, cannot grow a bitmap yet, just remove it,
5127 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
5128 sector_t avail
= rdev
->sectors
;
5130 if (fit
&& (num_sectors
== 0 || num_sectors
> avail
))
5131 num_sectors
= avail
;
5132 if (avail
< num_sectors
)
5135 rv
= mddev
->pers
->resize(mddev
, num_sectors
);
5137 struct block_device
*bdev
;
5139 bdev
= bdget_disk(mddev
->gendisk
, 0);
5141 mutex_lock(&bdev
->bd_inode
->i_mutex
);
5142 i_size_write(bdev
->bd_inode
,
5143 (loff_t
)mddev
->array_sectors
<< 9);
5144 mutex_unlock(&bdev
->bd_inode
->i_mutex
);
5151 static int update_raid_disks(mddev_t
*mddev
, int raid_disks
)
5154 /* change the number of raid disks */
5155 if (mddev
->pers
->check_reshape
== NULL
)
5157 if (raid_disks
<= 0 ||
5158 raid_disks
>= mddev
->max_disks
)
5160 if (mddev
->sync_thread
|| mddev
->reshape_position
!= MaxSector
)
5162 mddev
->delta_disks
= raid_disks
- mddev
->raid_disks
;
5164 rv
= mddev
->pers
->check_reshape(mddev
);
5170 * update_array_info is used to change the configuration of an
5172 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
5173 * fields in the info are checked against the array.
5174 * Any differences that cannot be handled will cause an error.
5175 * Normally, only one change can be managed at a time.
5177 static int update_array_info(mddev_t
*mddev
, mdu_array_info_t
*info
)
5183 /* calculate expected state,ignoring low bits */
5184 if (mddev
->bitmap
&& mddev
->bitmap_offset
)
5185 state
|= (1 << MD_SB_BITMAP_PRESENT
);
5187 if (mddev
->major_version
!= info
->major_version
||
5188 mddev
->minor_version
!= info
->minor_version
||
5189 /* mddev->patch_version != info->patch_version || */
5190 mddev
->ctime
!= info
->ctime
||
5191 mddev
->level
!= info
->level
||
5192 /* mddev->layout != info->layout || */
5193 !mddev
->persistent
!= info
->not_persistent
||
5194 mddev
->chunk_sectors
!= info
->chunk_size
>> 9 ||
5195 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
5196 ((state
^info
->state
) & 0xfffffe00)
5199 /* Check there is only one change */
5200 if (info
->size
>= 0 && mddev
->dev_sectors
/ 2 != info
->size
)
5202 if (mddev
->raid_disks
!= info
->raid_disks
)
5204 if (mddev
->layout
!= info
->layout
)
5206 if ((state
^ info
->state
) & (1<<MD_SB_BITMAP_PRESENT
))
5213 if (mddev
->layout
!= info
->layout
) {
5215 * we don't need to do anything at the md level, the
5216 * personality will take care of it all.
5218 if (mddev
->pers
->check_reshape
== NULL
)
5221 mddev
->new_layout
= info
->layout
;
5222 rv
= mddev
->pers
->check_reshape(mddev
);
5224 mddev
->new_layout
= mddev
->layout
;
5228 if (info
->size
>= 0 && mddev
->dev_sectors
/ 2 != info
->size
)
5229 rv
= update_size(mddev
, (sector_t
)info
->size
* 2);
5231 if (mddev
->raid_disks
!= info
->raid_disks
)
5232 rv
= update_raid_disks(mddev
, info
->raid_disks
);
5234 if ((state
^ info
->state
) & (1<<MD_SB_BITMAP_PRESENT
)) {
5235 if (mddev
->pers
->quiesce
== NULL
)
5237 if (mddev
->recovery
|| mddev
->sync_thread
)
5239 if (info
->state
& (1<<MD_SB_BITMAP_PRESENT
)) {
5240 /* add the bitmap */
5243 if (mddev
->default_bitmap_offset
== 0)
5245 mddev
->bitmap_offset
= mddev
->default_bitmap_offset
;
5246 mddev
->pers
->quiesce(mddev
, 1);
5247 rv
= bitmap_create(mddev
);
5249 bitmap_destroy(mddev
);
5250 mddev
->pers
->quiesce(mddev
, 0);
5252 /* remove the bitmap */
5255 if (mddev
->bitmap
->file
)
5257 mddev
->pers
->quiesce(mddev
, 1);
5258 bitmap_destroy(mddev
);
5259 mddev
->pers
->quiesce(mddev
, 0);
5260 mddev
->bitmap_offset
= 0;
5263 md_update_sb(mddev
, 1);
5267 static int set_disk_faulty(mddev_t
*mddev
, dev_t dev
)
5271 if (mddev
->pers
== NULL
)
5274 rdev
= find_rdev(mddev
, dev
);
5278 md_error(mddev
, rdev
);
5283 * We have a problem here : there is no easy way to give a CHS
5284 * virtual geometry. We currently pretend that we have a 2 heads
5285 * 4 sectors (with a BIG number of cylinders...). This drives
5286 * dosfs just mad... ;-)
5288 static int md_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
5290 mddev_t
*mddev
= bdev
->bd_disk
->private_data
;
5294 geo
->cylinders
= get_capacity(mddev
->gendisk
) / 8;
5298 static int md_ioctl(struct block_device
*bdev
, fmode_t mode
,
5299 unsigned int cmd
, unsigned long arg
)
5302 void __user
*argp
= (void __user
*)arg
;
5303 mddev_t
*mddev
= NULL
;
5305 if (!capable(CAP_SYS_ADMIN
))
5309 * Commands dealing with the RAID driver but not any
5315 err
= get_version(argp
);
5318 case PRINT_RAID_DEBUG
:
5326 autostart_arrays(arg
);
5333 * Commands creating/starting a new array:
5336 mddev
= bdev
->bd_disk
->private_data
;
5343 err
= mddev_lock(mddev
);
5346 "md: ioctl lock interrupted, reason %d, cmd %d\n",
5353 case SET_ARRAY_INFO
:
5355 mdu_array_info_t info
;
5357 memset(&info
, 0, sizeof(info
));
5358 else if (copy_from_user(&info
, argp
, sizeof(info
))) {
5363 err
= update_array_info(mddev
, &info
);
5365 printk(KERN_WARNING
"md: couldn't update"
5366 " array info. %d\n", err
);
5371 if (!list_empty(&mddev
->disks
)) {
5373 "md: array %s already has disks!\n",
5378 if (mddev
->raid_disks
) {
5380 "md: array %s already initialised!\n",
5385 err
= set_array_info(mddev
, &info
);
5387 printk(KERN_WARNING
"md: couldn't set"
5388 " array info. %d\n", err
);
5398 * Commands querying/configuring an existing array:
5400 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
5401 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
5402 if ((!mddev
->raid_disks
&& !mddev
->external
)
5403 && cmd
!= ADD_NEW_DISK
&& cmd
!= STOP_ARRAY
5404 && cmd
!= RUN_ARRAY
&& cmd
!= SET_BITMAP_FILE
5405 && cmd
!= GET_BITMAP_FILE
) {
5411 * Commands even a read-only array can execute:
5415 case GET_ARRAY_INFO
:
5416 err
= get_array_info(mddev
, argp
);
5419 case GET_BITMAP_FILE
:
5420 err
= get_bitmap_file(mddev
, argp
);
5424 err
= get_disk_info(mddev
, argp
);
5427 case RESTART_ARRAY_RW
:
5428 err
= restart_array(mddev
);
5432 err
= do_md_stop(mddev
, 0, 1);
5436 err
= do_md_stop(mddev
, 1, 1);
5442 * The remaining ioctls are changing the state of the
5443 * superblock, so we do not allow them on read-only arrays.
5444 * However non-MD ioctls (e.g. get-size) will still come through
5445 * here and hit the 'default' below, so only disallow
5446 * 'md' ioctls, and switch to rw mode if started auto-readonly.
5448 if (_IOC_TYPE(cmd
) == MD_MAJOR
&& mddev
->ro
&& mddev
->pers
) {
5449 if (mddev
->ro
== 2) {
5451 sysfs_notify_dirent(mddev
->sysfs_state
);
5452 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
5453 md_wakeup_thread(mddev
->thread
);
5464 mdu_disk_info_t info
;
5465 if (copy_from_user(&info
, argp
, sizeof(info
)))
5468 err
= add_new_disk(mddev
, &info
);
5472 case HOT_REMOVE_DISK
:
5473 err
= hot_remove_disk(mddev
, new_decode_dev(arg
));
5477 err
= hot_add_disk(mddev
, new_decode_dev(arg
));
5480 case SET_DISK_FAULTY
:
5481 err
= set_disk_faulty(mddev
, new_decode_dev(arg
));
5485 err
= do_md_run(mddev
);
5488 case SET_BITMAP_FILE
:
5489 err
= set_bitmap_file(mddev
, (int)arg
);
5499 if (mddev
->hold_active
== UNTIL_IOCTL
&&
5501 mddev
->hold_active
= 0;
5502 mddev_unlock(mddev
);
5512 static int md_open(struct block_device
*bdev
, fmode_t mode
)
5515 * Succeed if we can lock the mddev, which confirms that
5516 * it isn't being stopped right now.
5518 mddev_t
*mddev
= mddev_find(bdev
->bd_dev
);
5521 if (mddev
->gendisk
!= bdev
->bd_disk
) {
5522 /* we are racing with mddev_put which is discarding this
5526 /* Wait until bdev->bd_disk is definitely gone */
5527 flush_scheduled_work();
5528 /* Then retry the open from the top */
5529 return -ERESTARTSYS
;
5531 BUG_ON(mddev
!= bdev
->bd_disk
->private_data
);
5533 if ((err
= mutex_lock_interruptible_nested(&mddev
->reconfig_mutex
, 1)))
5537 atomic_inc(&mddev
->openers
);
5538 mddev_unlock(mddev
);
5540 check_disk_change(bdev
);
5545 static int md_release(struct gendisk
*disk
, fmode_t mode
)
5547 mddev_t
*mddev
= disk
->private_data
;
5550 atomic_dec(&mddev
->openers
);
5556 static int md_media_changed(struct gendisk
*disk
)
5558 mddev_t
*mddev
= disk
->private_data
;
5560 return mddev
->changed
;
5563 static int md_revalidate(struct gendisk
*disk
)
5565 mddev_t
*mddev
= disk
->private_data
;
5570 static struct block_device_operations md_fops
=
5572 .owner
= THIS_MODULE
,
5574 .release
= md_release
,
5576 .getgeo
= md_getgeo
,
5577 .media_changed
= md_media_changed
,
5578 .revalidate_disk
= md_revalidate
,
5581 static int md_thread(void * arg
)
5583 mdk_thread_t
*thread
= arg
;
5586 * md_thread is a 'system-thread', it's priority should be very
5587 * high. We avoid resource deadlocks individually in each
5588 * raid personality. (RAID5 does preallocation) We also use RR and
5589 * the very same RT priority as kswapd, thus we will never get
5590 * into a priority inversion deadlock.
5592 * we definitely have to have equal or higher priority than
5593 * bdflush, otherwise bdflush will deadlock if there are too
5594 * many dirty RAID5 blocks.
5597 allow_signal(SIGKILL
);
5598 while (!kthread_should_stop()) {
5600 /* We need to wait INTERRUPTIBLE so that
5601 * we don't add to the load-average.
5602 * That means we need to be sure no signals are
5605 if (signal_pending(current
))
5606 flush_signals(current
);
5608 wait_event_interruptible_timeout
5610 test_bit(THREAD_WAKEUP
, &thread
->flags
)
5611 || kthread_should_stop(),
5614 clear_bit(THREAD_WAKEUP
, &thread
->flags
);
5616 thread
->run(thread
->mddev
);
5622 void md_wakeup_thread(mdk_thread_t
*thread
)
5625 dprintk("md: waking up MD thread %s.\n", thread
->tsk
->comm
);
5626 set_bit(THREAD_WAKEUP
, &thread
->flags
);
5627 wake_up(&thread
->wqueue
);
5631 mdk_thread_t
*md_register_thread(void (*run
) (mddev_t
*), mddev_t
*mddev
,
5634 mdk_thread_t
*thread
;
5636 thread
= kzalloc(sizeof(mdk_thread_t
), GFP_KERNEL
);
5640 init_waitqueue_head(&thread
->wqueue
);
5643 thread
->mddev
= mddev
;
5644 thread
->timeout
= MAX_SCHEDULE_TIMEOUT
;
5645 thread
->tsk
= kthread_run(md_thread
, thread
, name
, mdname(thread
->mddev
));
5646 if (IS_ERR(thread
->tsk
)) {
5653 void md_unregister_thread(mdk_thread_t
*thread
)
5657 dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread
->tsk
));
5659 kthread_stop(thread
->tsk
);
5663 void md_error(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
5670 if (!rdev
|| test_bit(Faulty
, &rdev
->flags
))
5673 if (mddev
->external
)
5674 set_bit(Blocked
, &rdev
->flags
);
5676 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
5678 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
5679 __builtin_return_address(0),__builtin_return_address(1),
5680 __builtin_return_address(2),__builtin_return_address(3));
5684 if (!mddev
->pers
->error_handler
)
5686 mddev
->pers
->error_handler(mddev
,rdev
);
5687 if (mddev
->degraded
)
5688 set_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
);
5689 set_bit(StateChanged
, &rdev
->flags
);
5690 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
5691 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
5692 md_wakeup_thread(mddev
->thread
);
5693 md_new_event_inintr(mddev
);
5696 /* seq_file implementation /proc/mdstat */
5698 static void status_unused(struct seq_file
*seq
)
5703 seq_printf(seq
, "unused devices: ");
5705 list_for_each_entry(rdev
, &pending_raid_disks
, same_set
) {
5706 char b
[BDEVNAME_SIZE
];
5708 seq_printf(seq
, "%s ",
5709 bdevname(rdev
->bdev
,b
));
5712 seq_printf(seq
, "<none>");
5714 seq_printf(seq
, "\n");
5718 static void status_resync(struct seq_file
*seq
, mddev_t
* mddev
)
5720 sector_t max_sectors
, resync
, res
;
5721 unsigned long dt
, db
;
5724 unsigned int per_milli
;
5726 resync
= mddev
->curr_resync
- atomic_read(&mddev
->recovery_active
);
5728 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
))
5729 max_sectors
= mddev
->resync_max_sectors
;
5731 max_sectors
= mddev
->dev_sectors
;
5734 * Should not happen.
5740 /* Pick 'scale' such that (resync>>scale)*1000 will fit
5741 * in a sector_t, and (max_sectors>>scale) will fit in a
5742 * u32, as those are the requirements for sector_div.
5743 * Thus 'scale' must be at least 10
5746 if (sizeof(sector_t
) > sizeof(unsigned long)) {
5747 while ( max_sectors
/2 > (1ULL<<(scale
+32)))
5750 res
= (resync
>>scale
)*1000;
5751 sector_div(res
, (u32
)((max_sectors
>>scale
)+1));
5755 int i
, x
= per_milli
/50, y
= 20-x
;
5756 seq_printf(seq
, "[");
5757 for (i
= 0; i
< x
; i
++)
5758 seq_printf(seq
, "=");
5759 seq_printf(seq
, ">");
5760 for (i
= 0; i
< y
; i
++)
5761 seq_printf(seq
, ".");
5762 seq_printf(seq
, "] ");
5764 seq_printf(seq
, " %s =%3u.%u%% (%llu/%llu)",
5765 (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
)?
5767 (test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
)?
5769 (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
) ?
5770 "resync" : "recovery"))),
5771 per_milli
/10, per_milli
% 10,
5772 (unsigned long long) resync
/2,
5773 (unsigned long long) max_sectors
/2);
5776 * dt: time from mark until now
5777 * db: blocks written from mark until now
5778 * rt: remaining time
5780 * rt is a sector_t, so could be 32bit or 64bit.
5781 * So we divide before multiply in case it is 32bit and close
5783 * We scale the divisor (db) by 32 to avoid loosing precision
5784 * near the end of resync when the number of remaining sectors
5786 * We then divide rt by 32 after multiplying by db to compensate.
5787 * The '+1' avoids division by zero if db is very small.
5789 dt
= ((jiffies
- mddev
->resync_mark
) / HZ
);
5791 db
= (mddev
->curr_mark_cnt
- atomic_read(&mddev
->recovery_active
))
5792 - mddev
->resync_mark_cnt
;
5794 rt
= max_sectors
- resync
; /* number of remaining sectors */
5795 sector_div(rt
, db
/32+1);
5799 seq_printf(seq
, " finish=%lu.%lumin", (unsigned long)rt
/ 60,
5800 ((unsigned long)rt
% 60)/6);
5802 seq_printf(seq
, " speed=%ldK/sec", db
/2/dt
);
5805 static void *md_seq_start(struct seq_file
*seq
, loff_t
*pos
)
5807 struct list_head
*tmp
;
5817 spin_lock(&all_mddevs_lock
);
5818 list_for_each(tmp
,&all_mddevs
)
5820 mddev
= list_entry(tmp
, mddev_t
, all_mddevs
);
5822 spin_unlock(&all_mddevs_lock
);
5825 spin_unlock(&all_mddevs_lock
);
5827 return (void*)2;/* tail */
5831 static void *md_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
5833 struct list_head
*tmp
;
5834 mddev_t
*next_mddev
, *mddev
= v
;
5840 spin_lock(&all_mddevs_lock
);
5842 tmp
= all_mddevs
.next
;
5844 tmp
= mddev
->all_mddevs
.next
;
5845 if (tmp
!= &all_mddevs
)
5846 next_mddev
= mddev_get(list_entry(tmp
,mddev_t
,all_mddevs
));
5848 next_mddev
= (void*)2;
5851 spin_unlock(&all_mddevs_lock
);
5859 static void md_seq_stop(struct seq_file
*seq
, void *v
)
5863 if (mddev
&& v
!= (void*)1 && v
!= (void*)2)
5867 struct mdstat_info
{
5871 static int md_seq_show(struct seq_file
*seq
, void *v
)
5876 struct mdstat_info
*mi
= seq
->private;
5877 struct bitmap
*bitmap
;
5879 if (v
== (void*)1) {
5880 struct mdk_personality
*pers
;
5881 seq_printf(seq
, "Personalities : ");
5882 spin_lock(&pers_lock
);
5883 list_for_each_entry(pers
, &pers_list
, list
)
5884 seq_printf(seq
, "[%s] ", pers
->name
);
5886 spin_unlock(&pers_lock
);
5887 seq_printf(seq
, "\n");
5888 mi
->event
= atomic_read(&md_event_count
);
5891 if (v
== (void*)2) {
5896 if (mddev_lock(mddev
) < 0)
5899 if (mddev
->pers
|| mddev
->raid_disks
|| !list_empty(&mddev
->disks
)) {
5900 seq_printf(seq
, "%s : %sactive", mdname(mddev
),
5901 mddev
->pers
? "" : "in");
5904 seq_printf(seq
, " (read-only)");
5906 seq_printf(seq
, " (auto-read-only)");
5907 seq_printf(seq
, " %s", mddev
->pers
->name
);
5911 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
5912 char b
[BDEVNAME_SIZE
];
5913 seq_printf(seq
, " %s[%d]",
5914 bdevname(rdev
->bdev
,b
), rdev
->desc_nr
);
5915 if (test_bit(WriteMostly
, &rdev
->flags
))
5916 seq_printf(seq
, "(W)");
5917 if (test_bit(Faulty
, &rdev
->flags
)) {
5918 seq_printf(seq
, "(F)");
5920 } else if (rdev
->raid_disk
< 0)
5921 seq_printf(seq
, "(S)"); /* spare */
5922 sectors
+= rdev
->sectors
;
5925 if (!list_empty(&mddev
->disks
)) {
5927 seq_printf(seq
, "\n %llu blocks",
5928 (unsigned long long)
5929 mddev
->array_sectors
/ 2);
5931 seq_printf(seq
, "\n %llu blocks",
5932 (unsigned long long)sectors
/ 2);
5934 if (mddev
->persistent
) {
5935 if (mddev
->major_version
!= 0 ||
5936 mddev
->minor_version
!= 90) {
5937 seq_printf(seq
," super %d.%d",
5938 mddev
->major_version
,
5939 mddev
->minor_version
);
5941 } else if (mddev
->external
)
5942 seq_printf(seq
, " super external:%s",
5943 mddev
->metadata_type
);
5945 seq_printf(seq
, " super non-persistent");
5948 mddev
->pers
->status(seq
, mddev
);
5949 seq_printf(seq
, "\n ");
5950 if (mddev
->pers
->sync_request
) {
5951 if (mddev
->curr_resync
> 2) {
5952 status_resync(seq
, mddev
);
5953 seq_printf(seq
, "\n ");
5954 } else if (mddev
->curr_resync
== 1 || mddev
->curr_resync
== 2)
5955 seq_printf(seq
, "\tresync=DELAYED\n ");
5956 else if (mddev
->recovery_cp
< MaxSector
)
5957 seq_printf(seq
, "\tresync=PENDING\n ");
5960 seq_printf(seq
, "\n ");
5962 if ((bitmap
= mddev
->bitmap
)) {
5963 unsigned long chunk_kb
;
5964 unsigned long flags
;
5965 spin_lock_irqsave(&bitmap
->lock
, flags
);
5966 chunk_kb
= bitmap
->chunksize
>> 10;
5967 seq_printf(seq
, "bitmap: %lu/%lu pages [%luKB], "
5969 bitmap
->pages
- bitmap
->missing_pages
,
5971 (bitmap
->pages
- bitmap
->missing_pages
)
5972 << (PAGE_SHIFT
- 10),
5973 chunk_kb
? chunk_kb
: bitmap
->chunksize
,
5974 chunk_kb
? "KB" : "B");
5976 seq_printf(seq
, ", file: ");
5977 seq_path(seq
, &bitmap
->file
->f_path
, " \t\n");
5980 seq_printf(seq
, "\n");
5981 spin_unlock_irqrestore(&bitmap
->lock
, flags
);
5984 seq_printf(seq
, "\n");
5986 mddev_unlock(mddev
);
5991 static const struct seq_operations md_seq_ops
= {
5992 .start
= md_seq_start
,
5993 .next
= md_seq_next
,
5994 .stop
= md_seq_stop
,
5995 .show
= md_seq_show
,
5998 static int md_seq_open(struct inode
*inode
, struct file
*file
)
6001 struct mdstat_info
*mi
= kmalloc(sizeof(*mi
), GFP_KERNEL
);
6005 error
= seq_open(file
, &md_seq_ops
);
6009 struct seq_file
*p
= file
->private_data
;
6011 mi
->event
= atomic_read(&md_event_count
);
6016 static unsigned int mdstat_poll(struct file
*filp
, poll_table
*wait
)
6018 struct seq_file
*m
= filp
->private_data
;
6019 struct mdstat_info
*mi
= m
->private;
6022 poll_wait(filp
, &md_event_waiters
, wait
);
6024 /* always allow read */
6025 mask
= POLLIN
| POLLRDNORM
;
6027 if (mi
->event
!= atomic_read(&md_event_count
))
6028 mask
|= POLLERR
| POLLPRI
;
6032 static const struct file_operations md_seq_fops
= {
6033 .owner
= THIS_MODULE
,
6034 .open
= md_seq_open
,
6036 .llseek
= seq_lseek
,
6037 .release
= seq_release_private
,
6038 .poll
= mdstat_poll
,
6041 int register_md_personality(struct mdk_personality
*p
)
6043 spin_lock(&pers_lock
);
6044 list_add_tail(&p
->list
, &pers_list
);
6045 printk(KERN_INFO
"md: %s personality registered for level %d\n", p
->name
, p
->level
);
6046 spin_unlock(&pers_lock
);
6050 int unregister_md_personality(struct mdk_personality
*p
)
6052 printk(KERN_INFO
"md: %s personality unregistered\n", p
->name
);
6053 spin_lock(&pers_lock
);
6054 list_del_init(&p
->list
);
6055 spin_unlock(&pers_lock
);
6059 static int is_mddev_idle(mddev_t
*mddev
, int init
)
6067 rdev_for_each_rcu(rdev
, mddev
) {
6068 struct gendisk
*disk
= rdev
->bdev
->bd_contains
->bd_disk
;
6069 curr_events
= (int)part_stat_read(&disk
->part0
, sectors
[0]) +
6070 (int)part_stat_read(&disk
->part0
, sectors
[1]) -
6071 atomic_read(&disk
->sync_io
);
6072 /* sync IO will cause sync_io to increase before the disk_stats
6073 * as sync_io is counted when a request starts, and
6074 * disk_stats is counted when it completes.
6075 * So resync activity will cause curr_events to be smaller than
6076 * when there was no such activity.
6077 * non-sync IO will cause disk_stat to increase without
6078 * increasing sync_io so curr_events will (eventually)
6079 * be larger than it was before. Once it becomes
6080 * substantially larger, the test below will cause
6081 * the array to appear non-idle, and resync will slow
6083 * If there is a lot of outstanding resync activity when
6084 * we set last_event to curr_events, then all that activity
6085 * completing might cause the array to appear non-idle
6086 * and resync will be slowed down even though there might
6087 * not have been non-resync activity. This will only
6088 * happen once though. 'last_events' will soon reflect
6089 * the state where there is little or no outstanding
6090 * resync requests, and further resync activity will
6091 * always make curr_events less than last_events.
6094 if (init
|| curr_events
- rdev
->last_events
> 64) {
6095 rdev
->last_events
= curr_events
;
6103 void md_done_sync(mddev_t
*mddev
, int blocks
, int ok
)
6105 /* another "blocks" (512byte) blocks have been synced */
6106 atomic_sub(blocks
, &mddev
->recovery_active
);
6107 wake_up(&mddev
->recovery_wait
);
6109 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
6110 md_wakeup_thread(mddev
->thread
);
6111 // stop recovery, signal do_sync ....
6116 /* md_write_start(mddev, bi)
6117 * If we need to update some array metadata (e.g. 'active' flag
6118 * in superblock) before writing, schedule a superblock update
6119 * and wait for it to complete.
6121 void md_write_start(mddev_t
*mddev
, struct bio
*bi
)
6124 if (bio_data_dir(bi
) != WRITE
)
6127 BUG_ON(mddev
->ro
== 1);
6128 if (mddev
->ro
== 2) {
6129 /* need to switch to read/write */
6131 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
6132 md_wakeup_thread(mddev
->thread
);
6133 md_wakeup_thread(mddev
->sync_thread
);
6136 atomic_inc(&mddev
->writes_pending
);
6137 if (mddev
->safemode
== 1)
6138 mddev
->safemode
= 0;
6139 if (mddev
->in_sync
) {
6140 spin_lock_irq(&mddev
->write_lock
);
6141 if (mddev
->in_sync
) {
6143 set_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
6144 md_wakeup_thread(mddev
->thread
);
6147 spin_unlock_irq(&mddev
->write_lock
);
6150 sysfs_notify_dirent(mddev
->sysfs_state
);
6151 wait_event(mddev
->sb_wait
,
6152 !test_bit(MD_CHANGE_CLEAN
, &mddev
->flags
) &&
6153 !test_bit(MD_CHANGE_PENDING
, &mddev
->flags
));
6156 void md_write_end(mddev_t
*mddev
)
6158 if (atomic_dec_and_test(&mddev
->writes_pending
)) {
6159 if (mddev
->safemode
== 2)
6160 md_wakeup_thread(mddev
->thread
);
6161 else if (mddev
->safemode_delay
)
6162 mod_timer(&mddev
->safemode_timer
, jiffies
+ mddev
->safemode_delay
);
6166 /* md_allow_write(mddev)
6167 * Calling this ensures that the array is marked 'active' so that writes
6168 * may proceed without blocking. It is important to call this before
6169 * attempting a GFP_KERNEL allocation while holding the mddev lock.
6170 * Must be called with mddev_lock held.
6172 * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
6173 * is dropped, so return -EAGAIN after notifying userspace.
6175 int md_allow_write(mddev_t
*mddev
)
6181 if (!mddev
->pers
->sync_request
)
6184 spin_lock_irq(&mddev
->write_lock
);
6185 if (mddev
->in_sync
) {
6187 set_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
6188 if (mddev
->safemode_delay
&&
6189 mddev
->safemode
== 0)
6190 mddev
->safemode
= 1;
6191 spin_unlock_irq(&mddev
->write_lock
);
6192 md_update_sb(mddev
, 0);
6193 sysfs_notify_dirent(mddev
->sysfs_state
);
6195 spin_unlock_irq(&mddev
->write_lock
);
6197 if (test_bit(MD_CHANGE_CLEAN
, &mddev
->flags
))
6202 EXPORT_SYMBOL_GPL(md_allow_write
);
6204 #define SYNC_MARKS 10
6205 #define SYNC_MARK_STEP (3*HZ)
6206 void md_do_sync(mddev_t
*mddev
)
6209 unsigned int currspeed
= 0,
6211 sector_t max_sectors
,j
, io_sectors
;
6212 unsigned long mark
[SYNC_MARKS
];
6213 sector_t mark_cnt
[SYNC_MARKS
];
6215 struct list_head
*tmp
;
6216 sector_t last_check
;
6221 /* just incase thread restarts... */
6222 if (test_bit(MD_RECOVERY_DONE
, &mddev
->recovery
))
6224 if (mddev
->ro
) /* never try to sync a read-only array */
6227 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
6228 if (test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
))
6229 desc
= "data-check";
6230 else if (test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
))
6231 desc
= "requested-resync";
6234 } else if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
6239 /* we overload curr_resync somewhat here.
6240 * 0 == not engaged in resync at all
6241 * 2 == checking that there is no conflict with another sync
6242 * 1 == like 2, but have yielded to allow conflicting resync to
6244 * other == active in resync - this many blocks
6246 * Before starting a resync we must have set curr_resync to
6247 * 2, and then checked that every "conflicting" array has curr_resync
6248 * less than ours. When we find one that is the same or higher
6249 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
6250 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
6251 * This will mean we have to start checking from the beginning again.
6256 mddev
->curr_resync
= 2;
6259 if (kthread_should_stop()) {
6260 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
6263 for_each_mddev(mddev2
, tmp
) {
6264 if (mddev2
== mddev
)
6266 if (!mddev
->parallel_resync
6267 && mddev2
->curr_resync
6268 && match_mddev_units(mddev
, mddev2
)) {
6270 if (mddev
< mddev2
&& mddev
->curr_resync
== 2) {
6271 /* arbitrarily yield */
6272 mddev
->curr_resync
= 1;
6273 wake_up(&resync_wait
);
6275 if (mddev
> mddev2
&& mddev
->curr_resync
== 1)
6276 /* no need to wait here, we can wait the next
6277 * time 'round when curr_resync == 2
6280 /* We need to wait 'interruptible' so as not to
6281 * contribute to the load average, and not to
6282 * be caught by 'softlockup'
6284 prepare_to_wait(&resync_wait
, &wq
, TASK_INTERRUPTIBLE
);
6285 if (!kthread_should_stop() &&
6286 mddev2
->curr_resync
>= mddev
->curr_resync
) {
6287 printk(KERN_INFO
"md: delaying %s of %s"
6288 " until %s has finished (they"
6289 " share one or more physical units)\n",
6290 desc
, mdname(mddev
), mdname(mddev2
));
6292 if (signal_pending(current
))
6293 flush_signals(current
);
6295 finish_wait(&resync_wait
, &wq
);
6298 finish_wait(&resync_wait
, &wq
);
6301 } while (mddev
->curr_resync
< 2);
6304 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
6305 /* resync follows the size requested by the personality,
6306 * which defaults to physical size, but can be virtual size
6308 max_sectors
= mddev
->resync_max_sectors
;
6309 mddev
->resync_mismatches
= 0;
6310 /* we don't use the checkpoint if there's a bitmap */
6311 if (test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
))
6312 j
= mddev
->resync_min
;
6313 else if (!mddev
->bitmap
)
6314 j
= mddev
->recovery_cp
;
6316 } else if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
6317 max_sectors
= mddev
->dev_sectors
;
6319 /* recovery follows the physical size of devices */
6320 max_sectors
= mddev
->dev_sectors
;
6322 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
6323 if (rdev
->raid_disk
>= 0 &&
6324 !test_bit(Faulty
, &rdev
->flags
) &&
6325 !test_bit(In_sync
, &rdev
->flags
) &&
6326 rdev
->recovery_offset
< j
)
6327 j
= rdev
->recovery_offset
;
6330 printk(KERN_INFO
"md: %s of RAID array %s\n", desc
, mdname(mddev
));
6331 printk(KERN_INFO
"md: minimum _guaranteed_ speed:"
6332 " %d KB/sec/disk.\n", speed_min(mddev
));
6333 printk(KERN_INFO
"md: using maximum available idle IO bandwidth "
6334 "(but not more than %d KB/sec) for %s.\n",
6335 speed_max(mddev
), desc
);
6337 is_mddev_idle(mddev
, 1); /* this initializes IO event counters */
6340 for (m
= 0; m
< SYNC_MARKS
; m
++) {
6342 mark_cnt
[m
] = io_sectors
;
6345 mddev
->resync_mark
= mark
[last_mark
];
6346 mddev
->resync_mark_cnt
= mark_cnt
[last_mark
];
6349 * Tune reconstruction:
6351 window
= 32*(PAGE_SIZE
/512);
6352 printk(KERN_INFO
"md: using %dk window, over a total of %llu blocks.\n",
6353 window
/2,(unsigned long long) max_sectors
/2);
6355 atomic_set(&mddev
->recovery_active
, 0);
6360 "md: resuming %s of %s from checkpoint.\n",
6361 desc
, mdname(mddev
));
6362 mddev
->curr_resync
= j
;
6365 while (j
< max_sectors
) {
6370 if (!test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
) &&
6371 ((mddev
->curr_resync
> mddev
->curr_resync_completed
&&
6372 (mddev
->curr_resync
- mddev
->curr_resync_completed
)
6373 > (max_sectors
>> 4)) ||
6374 (j
- mddev
->curr_resync_completed
)*2
6375 >= mddev
->resync_max
- mddev
->curr_resync_completed
6377 /* time to update curr_resync_completed */
6378 blk_unplug(mddev
->queue
);
6379 wait_event(mddev
->recovery_wait
,
6380 atomic_read(&mddev
->recovery_active
) == 0);
6381 mddev
->curr_resync_completed
=
6383 set_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
6384 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
6387 if (j
>= mddev
->resync_max
)
6388 wait_event(mddev
->recovery_wait
,
6389 mddev
->resync_max
> j
6390 || kthread_should_stop());
6392 if (kthread_should_stop())
6395 sectors
= mddev
->pers
->sync_request(mddev
, j
, &skipped
,
6396 currspeed
< speed_min(mddev
));
6398 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
6402 if (!skipped
) { /* actual IO requested */
6403 io_sectors
+= sectors
;
6404 atomic_add(sectors
, &mddev
->recovery_active
);
6408 if (j
>1) mddev
->curr_resync
= j
;
6409 mddev
->curr_mark_cnt
= io_sectors
;
6410 if (last_check
== 0)
6411 /* this is the earliers that rebuilt will be
6412 * visible in /proc/mdstat
6414 md_new_event(mddev
);
6416 if (last_check
+ window
> io_sectors
|| j
== max_sectors
)
6419 last_check
= io_sectors
;
6421 if (test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
))
6425 if (time_after_eq(jiffies
, mark
[last_mark
] + SYNC_MARK_STEP
)) {
6427 int next
= (last_mark
+1) % SYNC_MARKS
;
6429 mddev
->resync_mark
= mark
[next
];
6430 mddev
->resync_mark_cnt
= mark_cnt
[next
];
6431 mark
[next
] = jiffies
;
6432 mark_cnt
[next
] = io_sectors
- atomic_read(&mddev
->recovery_active
);
6437 if (kthread_should_stop())
6442 * this loop exits only if either when we are slower than
6443 * the 'hard' speed limit, or the system was IO-idle for
6445 * the system might be non-idle CPU-wise, but we only care
6446 * about not overloading the IO subsystem. (things like an
6447 * e2fsck being done on the RAID array should execute fast)
6449 blk_unplug(mddev
->queue
);
6452 currspeed
= ((unsigned long)(io_sectors
-mddev
->resync_mark_cnt
))/2
6453 /((jiffies
-mddev
->resync_mark
)/HZ
+1) +1;
6455 if (currspeed
> speed_min(mddev
)) {
6456 if ((currspeed
> speed_max(mddev
)) ||
6457 !is_mddev_idle(mddev
, 0)) {
6463 printk(KERN_INFO
"md: %s: %s done.\n",mdname(mddev
), desc
);
6465 * this also signals 'finished resyncing' to md_stop
6468 blk_unplug(mddev
->queue
);
6470 wait_event(mddev
->recovery_wait
, !atomic_read(&mddev
->recovery_active
));
6472 /* tell personality that we are finished */
6473 mddev
->pers
->sync_request(mddev
, max_sectors
, &skipped
, 1);
6475 if (!test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
) &&
6476 mddev
->curr_resync
> 2) {
6477 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
6478 if (test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
)) {
6479 if (mddev
->curr_resync
>= mddev
->recovery_cp
) {
6481 "md: checkpointing %s of %s.\n",
6482 desc
, mdname(mddev
));
6483 mddev
->recovery_cp
= mddev
->curr_resync
;
6486 mddev
->recovery_cp
= MaxSector
;
6488 if (!test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
))
6489 mddev
->curr_resync
= MaxSector
;
6490 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
6491 if (rdev
->raid_disk
>= 0 &&
6492 !test_bit(Faulty
, &rdev
->flags
) &&
6493 !test_bit(In_sync
, &rdev
->flags
) &&
6494 rdev
->recovery_offset
< mddev
->curr_resync
)
6495 rdev
->recovery_offset
= mddev
->curr_resync
;
6498 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
6501 mddev
->curr_resync
= 0;
6502 mddev
->curr_resync_completed
= 0;
6503 mddev
->resync_min
= 0;
6504 mddev
->resync_max
= MaxSector
;
6505 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
6506 wake_up(&resync_wait
);
6507 set_bit(MD_RECOVERY_DONE
, &mddev
->recovery
);
6508 md_wakeup_thread(mddev
->thread
);
6513 * got a signal, exit.
6516 "md: md_do_sync() got signal ... exiting\n");
6517 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
6521 EXPORT_SYMBOL_GPL(md_do_sync
);
6524 static int remove_and_add_spares(mddev_t
*mddev
)
6529 mddev
->curr_resync_completed
= 0;
6531 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
6532 if (rdev
->raid_disk
>= 0 &&
6533 !test_bit(Blocked
, &rdev
->flags
) &&
6534 (test_bit(Faulty
, &rdev
->flags
) ||
6535 ! test_bit(In_sync
, &rdev
->flags
)) &&
6536 atomic_read(&rdev
->nr_pending
)==0) {
6537 if (mddev
->pers
->hot_remove_disk(
6538 mddev
, rdev
->raid_disk
)==0) {
6540 sprintf(nm
,"rd%d", rdev
->raid_disk
);
6541 sysfs_remove_link(&mddev
->kobj
, nm
);
6542 rdev
->raid_disk
= -1;
6546 if (mddev
->degraded
&& ! mddev
->ro
&& !mddev
->recovery_disabled
) {
6547 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
6548 if (rdev
->raid_disk
>= 0 &&
6549 !test_bit(In_sync
, &rdev
->flags
) &&
6550 !test_bit(Blocked
, &rdev
->flags
))
6552 if (rdev
->raid_disk
< 0
6553 && !test_bit(Faulty
, &rdev
->flags
)) {
6554 rdev
->recovery_offset
= 0;
6556 hot_add_disk(mddev
, rdev
) == 0) {
6558 sprintf(nm
, "rd%d", rdev
->raid_disk
);
6559 if (sysfs_create_link(&mddev
->kobj
,
6562 "md: cannot register "
6566 md_new_event(mddev
);
6575 * This routine is regularly called by all per-raid-array threads to
6576 * deal with generic issues like resync and super-block update.
6577 * Raid personalities that don't have a thread (linear/raid0) do not
6578 * need this as they never do any recovery or update the superblock.
6580 * It does not do any resync itself, but rather "forks" off other threads
6581 * to do that as needed.
6582 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
6583 * "->recovery" and create a thread at ->sync_thread.
6584 * When the thread finishes it sets MD_RECOVERY_DONE
6585 * and wakeups up this thread which will reap the thread and finish up.
6586 * This thread also removes any faulty devices (with nr_pending == 0).
6588 * The overall approach is:
6589 * 1/ if the superblock needs updating, update it.
6590 * 2/ If a recovery thread is running, don't do anything else.
6591 * 3/ If recovery has finished, clean up, possibly marking spares active.
6592 * 4/ If there are any faulty devices, remove them.
6593 * 5/ If array is degraded, try to add spares devices
6594 * 6/ If array has spares or is not in-sync, start a resync thread.
6596 void md_check_recovery(mddev_t
*mddev
)
6602 bitmap_daemon_work(mddev
->bitmap
);
6607 if (signal_pending(current
)) {
6608 if (mddev
->pers
->sync_request
&& !mddev
->external
) {
6609 printk(KERN_INFO
"md: %s in immediate safe mode\n",
6611 mddev
->safemode
= 2;
6613 flush_signals(current
);
6616 if (mddev
->ro
&& !test_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
))
6619 (mddev
->flags
&& !mddev
->external
) ||
6620 test_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
) ||
6621 test_bit(MD_RECOVERY_DONE
, &mddev
->recovery
) ||
6622 (mddev
->external
== 0 && mddev
->safemode
== 1) ||
6623 (mddev
->safemode
== 2 && ! atomic_read(&mddev
->writes_pending
)
6624 && !mddev
->in_sync
&& mddev
->recovery_cp
== MaxSector
)
6628 if (mddev_trylock(mddev
)) {
6632 /* Only thing we do on a ro array is remove
6635 remove_and_add_spares(mddev
);
6636 clear_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
6640 if (!mddev
->external
) {
6642 spin_lock_irq(&mddev
->write_lock
);
6643 if (mddev
->safemode
&&
6644 !atomic_read(&mddev
->writes_pending
) &&
6646 mddev
->recovery_cp
== MaxSector
) {
6649 if (mddev
->persistent
)
6650 set_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
6652 if (mddev
->safemode
== 1)
6653 mddev
->safemode
= 0;
6654 spin_unlock_irq(&mddev
->write_lock
);
6656 sysfs_notify_dirent(mddev
->sysfs_state
);
6660 md_update_sb(mddev
, 0);
6662 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
6663 if (test_and_clear_bit(StateChanged
, &rdev
->flags
))
6664 sysfs_notify_dirent(rdev
->sysfs_state
);
6667 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
) &&
6668 !test_bit(MD_RECOVERY_DONE
, &mddev
->recovery
)) {
6669 /* resync/recovery still happening */
6670 clear_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
6673 if (mddev
->sync_thread
) {
6674 /* resync has finished, collect result */
6675 md_unregister_thread(mddev
->sync_thread
);
6676 mddev
->sync_thread
= NULL
;
6677 if (!test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
) &&
6678 !test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
)) {
6680 /* activate any spares */
6681 if (mddev
->pers
->spare_active(mddev
))
6682 sysfs_notify(&mddev
->kobj
, NULL
,
6685 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
) &&
6686 mddev
->pers
->finish_reshape
)
6687 mddev
->pers
->finish_reshape(mddev
);
6688 md_update_sb(mddev
, 1);
6690 /* if array is no-longer degraded, then any saved_raid_disk
6691 * information must be scrapped
6693 if (!mddev
->degraded
)
6694 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
6695 rdev
->saved_raid_disk
= -1;
6697 mddev
->recovery
= 0;
6698 /* flag recovery needed just to double check */
6699 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
6700 sysfs_notify_dirent(mddev
->sysfs_action
);
6701 md_new_event(mddev
);
6704 /* Set RUNNING before clearing NEEDED to avoid
6705 * any transients in the value of "sync_action".
6707 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
6708 clear_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
6709 /* Clear some bits that don't mean anything, but
6712 clear_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
6713 clear_bit(MD_RECOVERY_DONE
, &mddev
->recovery
);
6715 if (test_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
))
6717 /* no recovery is running.
6718 * remove any failed drives, then
6719 * add spares if possible.
6720 * Spare are also removed and re-added, to allow
6721 * the personality to fail the re-add.
6724 if (mddev
->reshape_position
!= MaxSector
) {
6725 if (mddev
->pers
->check_reshape
== NULL
||
6726 mddev
->pers
->check_reshape(mddev
) != 0)
6727 /* Cannot proceed */
6729 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
6730 clear_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
);
6731 } else if ((spares
= remove_and_add_spares(mddev
))) {
6732 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
6733 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
6734 clear_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
);
6735 set_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
);
6736 } else if (mddev
->recovery_cp
< MaxSector
) {
6737 set_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
6738 clear_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
);
6739 } else if (!test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
))
6740 /* nothing to be done ... */
6743 if (mddev
->pers
->sync_request
) {
6744 if (spares
&& mddev
->bitmap
&& ! mddev
->bitmap
->file
) {
6745 /* We are adding a device or devices to an array
6746 * which has the bitmap stored on all devices.
6747 * So make sure all bitmap pages get written
6749 bitmap_write_all(mddev
->bitmap
);
6751 mddev
->sync_thread
= md_register_thread(md_do_sync
,
6754 if (!mddev
->sync_thread
) {
6755 printk(KERN_ERR
"%s: could not start resync"
6758 /* leave the spares where they are, it shouldn't hurt */
6759 mddev
->recovery
= 0;
6761 md_wakeup_thread(mddev
->sync_thread
);
6762 sysfs_notify_dirent(mddev
->sysfs_action
);
6763 md_new_event(mddev
);
6766 if (!mddev
->sync_thread
) {
6767 clear_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
6768 if (test_and_clear_bit(MD_RECOVERY_RECOVER
,
6770 if (mddev
->sysfs_action
)
6771 sysfs_notify_dirent(mddev
->sysfs_action
);
6773 mddev_unlock(mddev
);
6777 void md_wait_for_blocked_rdev(mdk_rdev_t
*rdev
, mddev_t
*mddev
)
6779 sysfs_notify_dirent(rdev
->sysfs_state
);
6780 wait_event_timeout(rdev
->blocked_wait
,
6781 !test_bit(Blocked
, &rdev
->flags
),
6782 msecs_to_jiffies(5000));
6783 rdev_dec_pending(rdev
, mddev
);
6785 EXPORT_SYMBOL(md_wait_for_blocked_rdev
);
6787 static int md_notify_reboot(struct notifier_block
*this,
6788 unsigned long code
, void *x
)
6790 struct list_head
*tmp
;
6793 if ((code
== SYS_DOWN
) || (code
== SYS_HALT
) || (code
== SYS_POWER_OFF
)) {
6795 printk(KERN_INFO
"md: stopping all md devices.\n");
6797 for_each_mddev(mddev
, tmp
)
6798 if (mddev_trylock(mddev
)) {
6799 /* Force a switch to readonly even array
6800 * appears to still be in use. Hence
6803 do_md_stop(mddev
, 1, 100);
6804 mddev_unlock(mddev
);
6807 * certain more exotic SCSI devices are known to be
6808 * volatile wrt too early system reboots. While the
6809 * right place to handle this issue is the given
6810 * driver, we do want to have a safe RAID driver ...
6817 static struct notifier_block md_notifier
= {
6818 .notifier_call
= md_notify_reboot
,
6820 .priority
= INT_MAX
, /* before any real devices */
6823 static void md_geninit(void)
6825 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t
));
6827 proc_create("mdstat", S_IRUGO
, NULL
, &md_seq_fops
);
6830 static int __init
md_init(void)
6832 if (register_blkdev(MD_MAJOR
, "md"))
6834 if ((mdp_major
=register_blkdev(0, "mdp"))<=0) {
6835 unregister_blkdev(MD_MAJOR
, "md");
6838 blk_register_region(MKDEV(MD_MAJOR
, 0), 1UL<<MINORBITS
, THIS_MODULE
,
6839 md_probe
, NULL
, NULL
);
6840 blk_register_region(MKDEV(mdp_major
, 0), 1UL<<MINORBITS
, THIS_MODULE
,
6841 md_probe
, NULL
, NULL
);
6843 register_reboot_notifier(&md_notifier
);
6844 raid_table_header
= register_sysctl_table(raid_root_table
);
6854 * Searches all registered partitions for autorun RAID arrays
6858 static LIST_HEAD(all_detected_devices
);
6859 struct detected_devices_node
{
6860 struct list_head list
;
6864 void md_autodetect_dev(dev_t dev
)
6866 struct detected_devices_node
*node_detected_dev
;
6868 node_detected_dev
= kzalloc(sizeof(*node_detected_dev
), GFP_KERNEL
);
6869 if (node_detected_dev
) {
6870 node_detected_dev
->dev
= dev
;
6871 list_add_tail(&node_detected_dev
->list
, &all_detected_devices
);
6873 printk(KERN_CRIT
"md: md_autodetect_dev: kzalloc failed"
6874 ", skipping dev(%d,%d)\n", MAJOR(dev
), MINOR(dev
));
6879 static void autostart_arrays(int part
)
6882 struct detected_devices_node
*node_detected_dev
;
6884 int i_scanned
, i_passed
;
6889 printk(KERN_INFO
"md: Autodetecting RAID arrays.\n");
6891 while (!list_empty(&all_detected_devices
) && i_scanned
< INT_MAX
) {
6893 node_detected_dev
= list_entry(all_detected_devices
.next
,
6894 struct detected_devices_node
, list
);
6895 list_del(&node_detected_dev
->list
);
6896 dev
= node_detected_dev
->dev
;
6897 kfree(node_detected_dev
);
6898 rdev
= md_import_device(dev
,0, 90);
6902 if (test_bit(Faulty
, &rdev
->flags
)) {
6906 set_bit(AutoDetected
, &rdev
->flags
);
6907 list_add(&rdev
->same_set
, &pending_raid_disks
);
6911 printk(KERN_INFO
"md: Scanned %d and added %d devices.\n",
6912 i_scanned
, i_passed
);
6914 autorun_devices(part
);
6917 #endif /* !MODULE */
6919 static __exit
void md_exit(void)
6922 struct list_head
*tmp
;
6924 blk_unregister_region(MKDEV(MD_MAJOR
,0), 1U << MINORBITS
);
6925 blk_unregister_region(MKDEV(mdp_major
,0), 1U << MINORBITS
);
6927 unregister_blkdev(MD_MAJOR
,"md");
6928 unregister_blkdev(mdp_major
, "mdp");
6929 unregister_reboot_notifier(&md_notifier
);
6930 unregister_sysctl_table(raid_table_header
);
6931 remove_proc_entry("mdstat", NULL
);
6932 for_each_mddev(mddev
, tmp
) {
6933 export_array(mddev
);
6934 mddev
->hold_active
= 0;
6938 subsys_initcall(md_init
);
6939 module_exit(md_exit
)
6941 static int get_ro(char *buffer
, struct kernel_param
*kp
)
6943 return sprintf(buffer
, "%d", start_readonly
);
6945 static int set_ro(const char *val
, struct kernel_param
*kp
)
6948 int num
= simple_strtoul(val
, &e
, 10);
6949 if (*val
&& (*e
== '\0' || *e
== '\n')) {
6950 start_readonly
= num
;
6956 module_param_call(start_ro
, set_ro
, get_ro
, NULL
, S_IRUSR
|S_IWUSR
);
6957 module_param(start_dirty_degraded
, int, S_IRUGO
|S_IWUSR
);
6959 module_param_call(new_array
, add_named_array
, NULL
, NULL
, S_IWUSR
);
6961 EXPORT_SYMBOL(register_md_personality
);
6962 EXPORT_SYMBOL(unregister_md_personality
);
6963 EXPORT_SYMBOL(md_error
);
6964 EXPORT_SYMBOL(md_done_sync
);
6965 EXPORT_SYMBOL(md_write_start
);
6966 EXPORT_SYMBOL(md_write_end
);
6967 EXPORT_SYMBOL(md_register_thread
);
6968 EXPORT_SYMBOL(md_unregister_thread
);
6969 EXPORT_SYMBOL(md_wakeup_thread
);
6970 EXPORT_SYMBOL(md_check_recovery
);
6971 MODULE_LICENSE("GPL");
6973 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR
);