2 md.c : Multiple Devices driver for Linux
3 Copyright (C) 1998, 1999, 2000 Ingo Molnar
5 completely rewritten, based on the MD driver code from Marc Zyngier
9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13 - kmod support by: Cyrus Durgin
14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17 - lots of fixes and improvements to the RAID1/RAID5 and generic
18 RAID code (such as request based resynchronization):
20 Neil Brown <neilb@cse.unsw.edu.au>.
22 - persistent bitmap code
23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25 This program is free software; you can redistribute it and/or modify
26 it under the terms of the GNU General Public License as published by
27 the Free Software Foundation; either version 2, or (at your option)
30 You should have received a copy of the GNU General Public License
31 (for example /usr/src/linux/COPYING); if not, write to the Free
32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 #include <linux/kthread.h>
36 #include <linux/blkdev.h>
37 #include <linux/sysctl.h>
38 #include <linux/seq_file.h>
39 #include <linux/buffer_head.h> /* for invalidate_bdev */
40 #include <linux/poll.h>
41 #include <linux/ctype.h>
42 #include <linux/hdreg.h>
43 #include <linux/proc_fs.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
46 #include <linux/file.h>
47 #include <linux/delay.h>
48 #include <linux/raid/md_p.h>
49 #include <linux/raid/md_u.h>
54 #define dprintk(x...) ((void)(DEBUG && printk(x)))
58 static void autostart_arrays(int part
);
61 static LIST_HEAD(pers_list
);
62 static DEFINE_SPINLOCK(pers_lock
);
64 static void md_print_devices(void);
66 static DECLARE_WAIT_QUEUE_HEAD(resync_wait
);
68 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
71 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
72 * is 1000 KB/sec, so the extra system load does not show up that much.
73 * Increase it if you want to have more _guaranteed_ speed. Note that
74 * the RAID driver will use the maximum available bandwidth if the IO
75 * subsystem is idle. There is also an 'absolute maximum' reconstruction
76 * speed limit - in case reconstruction slows down your system despite
79 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
80 * or /sys/block/mdX/md/sync_speed_{min,max}
83 static int sysctl_speed_limit_min
= 1000;
84 static int sysctl_speed_limit_max
= 200000;
85 static inline int speed_min(mddev_t
*mddev
)
87 return mddev
->sync_speed_min
?
88 mddev
->sync_speed_min
: sysctl_speed_limit_min
;
91 static inline int speed_max(mddev_t
*mddev
)
93 return mddev
->sync_speed_max
?
94 mddev
->sync_speed_max
: sysctl_speed_limit_max
;
97 static struct ctl_table_header
*raid_table_header
;
99 static ctl_table raid_table
[] = {
101 .ctl_name
= DEV_RAID_SPEED_LIMIT_MIN
,
102 .procname
= "speed_limit_min",
103 .data
= &sysctl_speed_limit_min
,
104 .maxlen
= sizeof(int),
105 .mode
= S_IRUGO
|S_IWUSR
,
106 .proc_handler
= &proc_dointvec
,
109 .ctl_name
= DEV_RAID_SPEED_LIMIT_MAX
,
110 .procname
= "speed_limit_max",
111 .data
= &sysctl_speed_limit_max
,
112 .maxlen
= sizeof(int),
113 .mode
= S_IRUGO
|S_IWUSR
,
114 .proc_handler
= &proc_dointvec
,
119 static ctl_table raid_dir_table
[] = {
121 .ctl_name
= DEV_RAID
,
124 .mode
= S_IRUGO
|S_IXUGO
,
130 static ctl_table raid_root_table
[] = {
136 .child
= raid_dir_table
,
141 static struct block_device_operations md_fops
;
143 static int start_readonly
;
146 * We have a system wide 'event count' that is incremented
147 * on any 'interesting' event, and readers of /proc/mdstat
148 * can use 'poll' or 'select' to find out when the event
152 * start array, stop array, error, add device, remove device,
153 * start build, activate spare
155 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters
);
156 static atomic_t md_event_count
;
157 void md_new_event(mddev_t
*mddev
)
159 atomic_inc(&md_event_count
);
160 wake_up(&md_event_waiters
);
162 EXPORT_SYMBOL_GPL(md_new_event
);
164 /* Alternate version that can be called from interrupts
165 * when calling sysfs_notify isn't needed.
167 static void md_new_event_inintr(mddev_t
*mddev
)
169 atomic_inc(&md_event_count
);
170 wake_up(&md_event_waiters
);
174 * Enables to iterate over all existing md arrays
175 * all_mddevs_lock protects this list.
177 static LIST_HEAD(all_mddevs
);
178 static DEFINE_SPINLOCK(all_mddevs_lock
);
182 * iterates through all used mddevs in the system.
183 * We take care to grab the all_mddevs_lock whenever navigating
184 * the list, and to always hold a refcount when unlocked.
185 * Any code which breaks out of this loop while own
186 * a reference to the current mddev and must mddev_put it.
188 #define for_each_mddev(mddev,tmp) \
190 for (({ spin_lock(&all_mddevs_lock); \
191 tmp = all_mddevs.next; \
193 ({ if (tmp != &all_mddevs) \
194 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
195 spin_unlock(&all_mddevs_lock); \
196 if (mddev) mddev_put(mddev); \
197 mddev = list_entry(tmp, mddev_t, all_mddevs); \
198 tmp != &all_mddevs;}); \
199 ({ spin_lock(&all_mddevs_lock); \
204 /* Rather than calling directly into the personality make_request function,
205 * IO requests come here first so that we can check if the device is
206 * being suspended pending a reconfiguration.
207 * We hold a refcount over the call to ->make_request. By the time that
208 * call has finished, the bio has been linked into some internal structure
209 * and so is visible to ->quiesce(), so we don't need the refcount any more.
211 static int md_make_request(struct request_queue
*q
, struct bio
*bio
)
213 mddev_t
*mddev
= q
->queuedata
;
215 if (mddev
== NULL
|| mddev
->pers
== NULL
) {
220 if (mddev
->suspended
) {
223 prepare_to_wait(&mddev
->sb_wait
, &__wait
,
224 TASK_UNINTERRUPTIBLE
);
225 if (!mddev
->suspended
)
231 finish_wait(&mddev
->sb_wait
, &__wait
);
233 atomic_inc(&mddev
->active_io
);
235 rv
= mddev
->pers
->make_request(q
, bio
);
236 if (atomic_dec_and_test(&mddev
->active_io
) && mddev
->suspended
)
237 wake_up(&mddev
->sb_wait
);
242 static void mddev_suspend(mddev_t
*mddev
)
244 BUG_ON(mddev
->suspended
);
245 mddev
->suspended
= 1;
247 wait_event(mddev
->sb_wait
, atomic_read(&mddev
->active_io
) == 0);
248 mddev
->pers
->quiesce(mddev
, 1);
249 md_unregister_thread(mddev
->thread
);
250 mddev
->thread
= NULL
;
251 /* we now know that no code is executing in the personality module,
252 * except possibly the tail end of a ->bi_end_io function, but that
253 * is certain to complete before the module has a chance to get
258 static void mddev_resume(mddev_t
*mddev
)
260 mddev
->suspended
= 0;
261 wake_up(&mddev
->sb_wait
);
262 mddev
->pers
->quiesce(mddev
, 0);
266 static inline mddev_t
*mddev_get(mddev_t
*mddev
)
268 atomic_inc(&mddev
->active
);
272 static void mddev_delayed_delete(struct work_struct
*ws
)
274 mddev_t
*mddev
= container_of(ws
, mddev_t
, del_work
);
275 kobject_del(&mddev
->kobj
);
276 kobject_put(&mddev
->kobj
);
279 static void mddev_put(mddev_t
*mddev
)
281 if (!atomic_dec_and_lock(&mddev
->active
, &all_mddevs_lock
))
283 if (!mddev
->raid_disks
&& list_empty(&mddev
->disks
) &&
284 !mddev
->hold_active
) {
285 list_del(&mddev
->all_mddevs
);
286 if (mddev
->gendisk
) {
287 /* we did a probe so need to clean up.
288 * Call schedule_work inside the spinlock
289 * so that flush_scheduled_work() after
290 * mddev_find will succeed in waiting for the
293 INIT_WORK(&mddev
->del_work
, mddev_delayed_delete
);
294 schedule_work(&mddev
->del_work
);
298 spin_unlock(&all_mddevs_lock
);
301 static mddev_t
* mddev_find(dev_t unit
)
303 mddev_t
*mddev
, *new = NULL
;
306 spin_lock(&all_mddevs_lock
);
309 list_for_each_entry(mddev
, &all_mddevs
, all_mddevs
)
310 if (mddev
->unit
== unit
) {
312 spin_unlock(&all_mddevs_lock
);
318 list_add(&new->all_mddevs
, &all_mddevs
);
319 spin_unlock(&all_mddevs_lock
);
320 new->hold_active
= UNTIL_IOCTL
;
324 /* find an unused unit number */
325 static int next_minor
= 512;
326 int start
= next_minor
;
330 dev
= MKDEV(MD_MAJOR
, next_minor
);
332 if (next_minor
> MINORMASK
)
334 if (next_minor
== start
) {
335 /* Oh dear, all in use. */
336 spin_unlock(&all_mddevs_lock
);
342 list_for_each_entry(mddev
, &all_mddevs
, all_mddevs
)
343 if (mddev
->unit
== dev
) {
349 new->md_minor
= MINOR(dev
);
350 new->hold_active
= UNTIL_STOP
;
351 list_add(&new->all_mddevs
, &all_mddevs
);
352 spin_unlock(&all_mddevs_lock
);
355 spin_unlock(&all_mddevs_lock
);
357 new = kzalloc(sizeof(*new), GFP_KERNEL
);
362 if (MAJOR(unit
) == MD_MAJOR
)
363 new->md_minor
= MINOR(unit
);
365 new->md_minor
= MINOR(unit
) >> MdpMinorShift
;
367 mutex_init(&new->reconfig_mutex
);
368 INIT_LIST_HEAD(&new->disks
);
369 INIT_LIST_HEAD(&new->all_mddevs
);
370 init_timer(&new->safemode_timer
);
371 atomic_set(&new->active
, 1);
372 atomic_set(&new->openers
, 0);
373 atomic_set(&new->active_io
, 0);
374 spin_lock_init(&new->write_lock
);
375 init_waitqueue_head(&new->sb_wait
);
376 init_waitqueue_head(&new->recovery_wait
);
377 new->reshape_position
= MaxSector
;
379 new->resync_max
= MaxSector
;
380 new->level
= LEVEL_NONE
;
385 static inline int mddev_lock(mddev_t
* mddev
)
387 return mutex_lock_interruptible(&mddev
->reconfig_mutex
);
390 static inline int mddev_is_locked(mddev_t
*mddev
)
392 return mutex_is_locked(&mddev
->reconfig_mutex
);
395 static inline int mddev_trylock(mddev_t
* mddev
)
397 return mutex_trylock(&mddev
->reconfig_mutex
);
400 static inline void mddev_unlock(mddev_t
* mddev
)
402 mutex_unlock(&mddev
->reconfig_mutex
);
404 md_wakeup_thread(mddev
->thread
);
407 static mdk_rdev_t
* find_rdev_nr(mddev_t
*mddev
, int nr
)
411 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
412 if (rdev
->desc_nr
== nr
)
418 static mdk_rdev_t
* find_rdev(mddev_t
* mddev
, dev_t dev
)
422 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
423 if (rdev
->bdev
->bd_dev
== dev
)
429 static struct mdk_personality
*find_pers(int level
, char *clevel
)
431 struct mdk_personality
*pers
;
432 list_for_each_entry(pers
, &pers_list
, list
) {
433 if (level
!= LEVEL_NONE
&& pers
->level
== level
)
435 if (strcmp(pers
->name
, clevel
)==0)
441 /* return the offset of the super block in 512byte sectors */
442 static inline sector_t
calc_dev_sboffset(struct block_device
*bdev
)
444 sector_t num_sectors
= bdev
->bd_inode
->i_size
/ 512;
445 return MD_NEW_SIZE_SECTORS(num_sectors
);
448 static sector_t
calc_num_sectors(mdk_rdev_t
*rdev
, unsigned chunk_size
)
450 sector_t num_sectors
= rdev
->sb_start
;
453 num_sectors
&= ~((sector_t
)chunk_size
/512 - 1);
457 static int alloc_disk_sb(mdk_rdev_t
* rdev
)
462 rdev
->sb_page
= alloc_page(GFP_KERNEL
);
463 if (!rdev
->sb_page
) {
464 printk(KERN_ALERT
"md: out of memory.\n");
471 static void free_disk_sb(mdk_rdev_t
* rdev
)
474 put_page(rdev
->sb_page
);
476 rdev
->sb_page
= NULL
;
483 static void super_written(struct bio
*bio
, int error
)
485 mdk_rdev_t
*rdev
= bio
->bi_private
;
486 mddev_t
*mddev
= rdev
->mddev
;
488 if (error
|| !test_bit(BIO_UPTODATE
, &bio
->bi_flags
)) {
489 printk("md: super_written gets error=%d, uptodate=%d\n",
490 error
, test_bit(BIO_UPTODATE
, &bio
->bi_flags
));
491 WARN_ON(test_bit(BIO_UPTODATE
, &bio
->bi_flags
));
492 md_error(mddev
, rdev
);
495 if (atomic_dec_and_test(&mddev
->pending_writes
))
496 wake_up(&mddev
->sb_wait
);
500 static void super_written_barrier(struct bio
*bio
, int error
)
502 struct bio
*bio2
= bio
->bi_private
;
503 mdk_rdev_t
*rdev
= bio2
->bi_private
;
504 mddev_t
*mddev
= rdev
->mddev
;
506 if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
) &&
507 error
== -EOPNOTSUPP
) {
509 /* barriers don't appear to be supported :-( */
510 set_bit(BarriersNotsupp
, &rdev
->flags
);
511 mddev
->barriers_work
= 0;
512 spin_lock_irqsave(&mddev
->write_lock
, flags
);
513 bio2
->bi_next
= mddev
->biolist
;
514 mddev
->biolist
= bio2
;
515 spin_unlock_irqrestore(&mddev
->write_lock
, flags
);
516 wake_up(&mddev
->sb_wait
);
520 bio
->bi_private
= rdev
;
521 super_written(bio
, error
);
525 void md_super_write(mddev_t
*mddev
, mdk_rdev_t
*rdev
,
526 sector_t sector
, int size
, struct page
*page
)
528 /* write first size bytes of page to sector of rdev
529 * Increment mddev->pending_writes before returning
530 * and decrement it on completion, waking up sb_wait
531 * if zero is reached.
532 * If an error occurred, call md_error
534 * As we might need to resubmit the request if BIO_RW_BARRIER
535 * causes ENOTSUPP, we allocate a spare bio...
537 struct bio
*bio
= bio_alloc(GFP_NOIO
, 1);
538 int rw
= (1<<BIO_RW
) | (1<<BIO_RW_SYNCIO
) | (1<<BIO_RW_UNPLUG
);
540 bio
->bi_bdev
= rdev
->bdev
;
541 bio
->bi_sector
= sector
;
542 bio_add_page(bio
, page
, size
, 0);
543 bio
->bi_private
= rdev
;
544 bio
->bi_end_io
= super_written
;
547 atomic_inc(&mddev
->pending_writes
);
548 if (!test_bit(BarriersNotsupp
, &rdev
->flags
)) {
550 rw
|= (1<<BIO_RW_BARRIER
);
551 rbio
= bio_clone(bio
, GFP_NOIO
);
552 rbio
->bi_private
= bio
;
553 rbio
->bi_end_io
= super_written_barrier
;
554 submit_bio(rw
, rbio
);
559 void md_super_wait(mddev_t
*mddev
)
561 /* wait for all superblock writes that were scheduled to complete.
562 * if any had to be retried (due to BARRIER problems), retry them
566 prepare_to_wait(&mddev
->sb_wait
, &wq
, TASK_UNINTERRUPTIBLE
);
567 if (atomic_read(&mddev
->pending_writes
)==0)
569 while (mddev
->biolist
) {
571 spin_lock_irq(&mddev
->write_lock
);
572 bio
= mddev
->biolist
;
573 mddev
->biolist
= bio
->bi_next
;
575 spin_unlock_irq(&mddev
->write_lock
);
576 submit_bio(bio
->bi_rw
, bio
);
580 finish_wait(&mddev
->sb_wait
, &wq
);
583 static void bi_complete(struct bio
*bio
, int error
)
585 complete((struct completion
*)bio
->bi_private
);
588 int sync_page_io(struct block_device
*bdev
, sector_t sector
, int size
,
589 struct page
*page
, int rw
)
591 struct bio
*bio
= bio_alloc(GFP_NOIO
, 1);
592 struct completion event
;
595 rw
|= (1 << BIO_RW_SYNCIO
) | (1 << BIO_RW_UNPLUG
);
598 bio
->bi_sector
= sector
;
599 bio_add_page(bio
, page
, size
, 0);
600 init_completion(&event
);
601 bio
->bi_private
= &event
;
602 bio
->bi_end_io
= bi_complete
;
604 wait_for_completion(&event
);
606 ret
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
610 EXPORT_SYMBOL_GPL(sync_page_io
);
612 static int read_disk_sb(mdk_rdev_t
* rdev
, int size
)
614 char b
[BDEVNAME_SIZE
];
615 if (!rdev
->sb_page
) {
623 if (!sync_page_io(rdev
->bdev
, rdev
->sb_start
, size
, rdev
->sb_page
, READ
))
629 printk(KERN_WARNING
"md: disabled device %s, could not read superblock.\n",
630 bdevname(rdev
->bdev
,b
));
634 static int uuid_equal(mdp_super_t
*sb1
, mdp_super_t
*sb2
)
636 return sb1
->set_uuid0
== sb2
->set_uuid0
&&
637 sb1
->set_uuid1
== sb2
->set_uuid1
&&
638 sb1
->set_uuid2
== sb2
->set_uuid2
&&
639 sb1
->set_uuid3
== sb2
->set_uuid3
;
642 static int sb_equal(mdp_super_t
*sb1
, mdp_super_t
*sb2
)
645 mdp_super_t
*tmp1
, *tmp2
;
647 tmp1
= kmalloc(sizeof(*tmp1
),GFP_KERNEL
);
648 tmp2
= kmalloc(sizeof(*tmp2
),GFP_KERNEL
);
650 if (!tmp1
|| !tmp2
) {
652 printk(KERN_INFO
"md.c sb_equal(): failed to allocate memory!\n");
660 * nr_disks is not constant
665 ret
= (memcmp(tmp1
, tmp2
, MD_SB_GENERIC_CONSTANT_WORDS
* 4) == 0);
673 static u32
md_csum_fold(u32 csum
)
675 csum
= (csum
& 0xffff) + (csum
>> 16);
676 return (csum
& 0xffff) + (csum
>> 16);
679 static unsigned int calc_sb_csum(mdp_super_t
* sb
)
682 u32
*sb32
= (u32
*)sb
;
684 unsigned int disk_csum
, csum
;
686 disk_csum
= sb
->sb_csum
;
689 for (i
= 0; i
< MD_SB_BYTES
/4 ; i
++)
691 csum
= (newcsum
& 0xffffffff) + (newcsum
>>32);
695 /* This used to use csum_partial, which was wrong for several
696 * reasons including that different results are returned on
697 * different architectures. It isn't critical that we get exactly
698 * the same return value as before (we always csum_fold before
699 * testing, and that removes any differences). However as we
700 * know that csum_partial always returned a 16bit value on
701 * alphas, do a fold to maximise conformity to previous behaviour.
703 sb
->sb_csum
= md_csum_fold(disk_csum
);
705 sb
->sb_csum
= disk_csum
;
712 * Handle superblock details.
713 * We want to be able to handle multiple superblock formats
714 * so we have a common interface to them all, and an array of
715 * different handlers.
716 * We rely on user-space to write the initial superblock, and support
717 * reading and updating of superblocks.
718 * Interface methods are:
719 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
720 * loads and validates a superblock on dev.
721 * if refdev != NULL, compare superblocks on both devices
723 * 0 - dev has a superblock that is compatible with refdev
724 * 1 - dev has a superblock that is compatible and newer than refdev
725 * so dev should be used as the refdev in future
726 * -EINVAL superblock incompatible or invalid
727 * -othererror e.g. -EIO
729 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
730 * Verify that dev is acceptable into mddev.
731 * The first time, mddev->raid_disks will be 0, and data from
732 * dev should be merged in. Subsequent calls check that dev
733 * is new enough. Return 0 or -EINVAL
735 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
736 * Update the superblock for rdev with data in mddev
737 * This does not write to disc.
743 struct module
*owner
;
744 int (*load_super
)(mdk_rdev_t
*rdev
, mdk_rdev_t
*refdev
,
746 int (*validate_super
)(mddev_t
*mddev
, mdk_rdev_t
*rdev
);
747 void (*sync_super
)(mddev_t
*mddev
, mdk_rdev_t
*rdev
);
748 unsigned long long (*rdev_size_change
)(mdk_rdev_t
*rdev
,
749 sector_t num_sectors
);
753 * load_super for 0.90.0
755 static int super_90_load(mdk_rdev_t
*rdev
, mdk_rdev_t
*refdev
, int minor_version
)
757 char b
[BDEVNAME_SIZE
], b2
[BDEVNAME_SIZE
];
762 * Calculate the position of the superblock (512byte sectors),
763 * it's at the end of the disk.
765 * It also happens to be a multiple of 4Kb.
767 rdev
->sb_start
= calc_dev_sboffset(rdev
->bdev
);
769 ret
= read_disk_sb(rdev
, MD_SB_BYTES
);
774 bdevname(rdev
->bdev
, b
);
775 sb
= (mdp_super_t
*)page_address(rdev
->sb_page
);
777 if (sb
->md_magic
!= MD_SB_MAGIC
) {
778 printk(KERN_ERR
"md: invalid raid superblock magic on %s\n",
783 if (sb
->major_version
!= 0 ||
784 sb
->minor_version
< 90 ||
785 sb
->minor_version
> 91) {
786 printk(KERN_WARNING
"Bad version number %d.%d on %s\n",
787 sb
->major_version
, sb
->minor_version
,
792 if (sb
->raid_disks
<= 0)
795 if (md_csum_fold(calc_sb_csum(sb
)) != md_csum_fold(sb
->sb_csum
)) {
796 printk(KERN_WARNING
"md: invalid superblock checksum on %s\n",
801 rdev
->preferred_minor
= sb
->md_minor
;
802 rdev
->data_offset
= 0;
803 rdev
->sb_size
= MD_SB_BYTES
;
805 if (sb
->state
& (1<<MD_SB_BITMAP_PRESENT
)) {
806 if (sb
->level
!= 1 && sb
->level
!= 4
807 && sb
->level
!= 5 && sb
->level
!= 6
808 && sb
->level
!= 10) {
809 /* FIXME use a better test */
811 "md: bitmaps not supported for this level.\n");
816 if (sb
->level
== LEVEL_MULTIPATH
)
819 rdev
->desc_nr
= sb
->this_disk
.number
;
825 mdp_super_t
*refsb
= (mdp_super_t
*)page_address(refdev
->sb_page
);
826 if (!uuid_equal(refsb
, sb
)) {
827 printk(KERN_WARNING
"md: %s has different UUID to %s\n",
828 b
, bdevname(refdev
->bdev
,b2
));
831 if (!sb_equal(refsb
, sb
)) {
832 printk(KERN_WARNING
"md: %s has same UUID"
833 " but different superblock to %s\n",
834 b
, bdevname(refdev
->bdev
, b2
));
838 ev2
= md_event(refsb
);
844 rdev
->sectors
= calc_num_sectors(rdev
, sb
->chunk_size
);
846 if (rdev
->sectors
< sb
->size
* 2 && sb
->level
> 1)
847 /* "this cannot possibly happen" ... */
855 * validate_super for 0.90.0
857 static int super_90_validate(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
860 mdp_super_t
*sb
= (mdp_super_t
*)page_address(rdev
->sb_page
);
861 __u64 ev1
= md_event(sb
);
863 rdev
->raid_disk
= -1;
864 clear_bit(Faulty
, &rdev
->flags
);
865 clear_bit(In_sync
, &rdev
->flags
);
866 clear_bit(WriteMostly
, &rdev
->flags
);
867 clear_bit(BarriersNotsupp
, &rdev
->flags
);
869 if (mddev
->raid_disks
== 0) {
870 mddev
->major_version
= 0;
871 mddev
->minor_version
= sb
->minor_version
;
872 mddev
->patch_version
= sb
->patch_version
;
874 mddev
->chunk_size
= sb
->chunk_size
;
875 mddev
->ctime
= sb
->ctime
;
876 mddev
->utime
= sb
->utime
;
877 mddev
->level
= sb
->level
;
878 mddev
->clevel
[0] = 0;
879 mddev
->layout
= sb
->layout
;
880 mddev
->raid_disks
= sb
->raid_disks
;
881 mddev
->dev_sectors
= sb
->size
* 2;
883 mddev
->bitmap_offset
= 0;
884 mddev
->default_bitmap_offset
= MD_SB_BYTES
>> 9;
886 if (mddev
->minor_version
>= 91) {
887 mddev
->reshape_position
= sb
->reshape_position
;
888 mddev
->delta_disks
= sb
->delta_disks
;
889 mddev
->new_level
= sb
->new_level
;
890 mddev
->new_layout
= sb
->new_layout
;
891 mddev
->new_chunk
= sb
->new_chunk
;
893 mddev
->reshape_position
= MaxSector
;
894 mddev
->delta_disks
= 0;
895 mddev
->new_level
= mddev
->level
;
896 mddev
->new_layout
= mddev
->layout
;
897 mddev
->new_chunk
= mddev
->chunk_size
;
900 if (sb
->state
& (1<<MD_SB_CLEAN
))
901 mddev
->recovery_cp
= MaxSector
;
903 if (sb
->events_hi
== sb
->cp_events_hi
&&
904 sb
->events_lo
== sb
->cp_events_lo
) {
905 mddev
->recovery_cp
= sb
->recovery_cp
;
907 mddev
->recovery_cp
= 0;
910 memcpy(mddev
->uuid
+0, &sb
->set_uuid0
, 4);
911 memcpy(mddev
->uuid
+4, &sb
->set_uuid1
, 4);
912 memcpy(mddev
->uuid
+8, &sb
->set_uuid2
, 4);
913 memcpy(mddev
->uuid
+12,&sb
->set_uuid3
, 4);
915 mddev
->max_disks
= MD_SB_DISKS
;
917 if (sb
->state
& (1<<MD_SB_BITMAP_PRESENT
) &&
918 mddev
->bitmap_file
== NULL
)
919 mddev
->bitmap_offset
= mddev
->default_bitmap_offset
;
921 } else if (mddev
->pers
== NULL
) {
922 /* Insist on good event counter while assembling */
924 if (ev1
< mddev
->events
)
926 } else if (mddev
->bitmap
) {
927 /* if adding to array with a bitmap, then we can accept an
928 * older device ... but not too old.
930 if (ev1
< mddev
->bitmap
->events_cleared
)
933 if (ev1
< mddev
->events
)
934 /* just a hot-add of a new device, leave raid_disk at -1 */
938 if (mddev
->level
!= LEVEL_MULTIPATH
) {
939 desc
= sb
->disks
+ rdev
->desc_nr
;
941 if (desc
->state
& (1<<MD_DISK_FAULTY
))
942 set_bit(Faulty
, &rdev
->flags
);
943 else if (desc
->state
& (1<<MD_DISK_SYNC
) /* &&
944 desc->raid_disk < mddev->raid_disks */) {
945 set_bit(In_sync
, &rdev
->flags
);
946 rdev
->raid_disk
= desc
->raid_disk
;
948 if (desc
->state
& (1<<MD_DISK_WRITEMOSTLY
))
949 set_bit(WriteMostly
, &rdev
->flags
);
950 } else /* MULTIPATH are always insync */
951 set_bit(In_sync
, &rdev
->flags
);
956 * sync_super for 0.90.0
958 static void super_90_sync(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
962 int next_spare
= mddev
->raid_disks
;
965 /* make rdev->sb match mddev data..
968 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
969 * 3/ any empty disks < next_spare become removed
971 * disks[0] gets initialised to REMOVED because
972 * we cannot be sure from other fields if it has
973 * been initialised or not.
976 int active
=0, working
=0,failed
=0,spare
=0,nr_disks
=0;
978 rdev
->sb_size
= MD_SB_BYTES
;
980 sb
= (mdp_super_t
*)page_address(rdev
->sb_page
);
982 memset(sb
, 0, sizeof(*sb
));
984 sb
->md_magic
= MD_SB_MAGIC
;
985 sb
->major_version
= mddev
->major_version
;
986 sb
->patch_version
= mddev
->patch_version
;
987 sb
->gvalid_words
= 0; /* ignored */
988 memcpy(&sb
->set_uuid0
, mddev
->uuid
+0, 4);
989 memcpy(&sb
->set_uuid1
, mddev
->uuid
+4, 4);
990 memcpy(&sb
->set_uuid2
, mddev
->uuid
+8, 4);
991 memcpy(&sb
->set_uuid3
, mddev
->uuid
+12,4);
993 sb
->ctime
= mddev
->ctime
;
994 sb
->level
= mddev
->level
;
995 sb
->size
= mddev
->dev_sectors
/ 2;
996 sb
->raid_disks
= mddev
->raid_disks
;
997 sb
->md_minor
= mddev
->md_minor
;
998 sb
->not_persistent
= 0;
999 sb
->utime
= mddev
->utime
;
1001 sb
->events_hi
= (mddev
->events
>>32);
1002 sb
->events_lo
= (u32
)mddev
->events
;
1004 if (mddev
->reshape_position
== MaxSector
)
1005 sb
->minor_version
= 90;
1007 sb
->minor_version
= 91;
1008 sb
->reshape_position
= mddev
->reshape_position
;
1009 sb
->new_level
= mddev
->new_level
;
1010 sb
->delta_disks
= mddev
->delta_disks
;
1011 sb
->new_layout
= mddev
->new_layout
;
1012 sb
->new_chunk
= mddev
->new_chunk
;
1014 mddev
->minor_version
= sb
->minor_version
;
1017 sb
->recovery_cp
= mddev
->recovery_cp
;
1018 sb
->cp_events_hi
= (mddev
->events
>>32);
1019 sb
->cp_events_lo
= (u32
)mddev
->events
;
1020 if (mddev
->recovery_cp
== MaxSector
)
1021 sb
->state
= (1<< MD_SB_CLEAN
);
1023 sb
->recovery_cp
= 0;
1025 sb
->layout
= mddev
->layout
;
1026 sb
->chunk_size
= mddev
->chunk_size
;
1028 if (mddev
->bitmap
&& mddev
->bitmap_file
== NULL
)
1029 sb
->state
|= (1<<MD_SB_BITMAP_PRESENT
);
1031 sb
->disks
[0].state
= (1<<MD_DISK_REMOVED
);
1032 list_for_each_entry(rdev2
, &mddev
->disks
, same_set
) {
1035 if (rdev2
->raid_disk
>= 0 && test_bit(In_sync
, &rdev2
->flags
)
1036 && !test_bit(Faulty
, &rdev2
->flags
))
1037 desc_nr
= rdev2
->raid_disk
;
1039 desc_nr
= next_spare
++;
1040 rdev2
->desc_nr
= desc_nr
;
1041 d
= &sb
->disks
[rdev2
->desc_nr
];
1043 d
->number
= rdev2
->desc_nr
;
1044 d
->major
= MAJOR(rdev2
->bdev
->bd_dev
);
1045 d
->minor
= MINOR(rdev2
->bdev
->bd_dev
);
1046 if (rdev2
->raid_disk
>= 0 && test_bit(In_sync
, &rdev2
->flags
)
1047 && !test_bit(Faulty
, &rdev2
->flags
))
1048 d
->raid_disk
= rdev2
->raid_disk
;
1050 d
->raid_disk
= rdev2
->desc_nr
; /* compatibility */
1051 if (test_bit(Faulty
, &rdev2
->flags
))
1052 d
->state
= (1<<MD_DISK_FAULTY
);
1053 else if (test_bit(In_sync
, &rdev2
->flags
)) {
1054 d
->state
= (1<<MD_DISK_ACTIVE
);
1055 d
->state
|= (1<<MD_DISK_SYNC
);
1063 if (test_bit(WriteMostly
, &rdev2
->flags
))
1064 d
->state
|= (1<<MD_DISK_WRITEMOSTLY
);
1066 /* now set the "removed" and "faulty" bits on any missing devices */
1067 for (i
=0 ; i
< mddev
->raid_disks
; i
++) {
1068 mdp_disk_t
*d
= &sb
->disks
[i
];
1069 if (d
->state
== 0 && d
->number
== 0) {
1072 d
->state
= (1<<MD_DISK_REMOVED
);
1073 d
->state
|= (1<<MD_DISK_FAULTY
);
1077 sb
->nr_disks
= nr_disks
;
1078 sb
->active_disks
= active
;
1079 sb
->working_disks
= working
;
1080 sb
->failed_disks
= failed
;
1081 sb
->spare_disks
= spare
;
1083 sb
->this_disk
= sb
->disks
[rdev
->desc_nr
];
1084 sb
->sb_csum
= calc_sb_csum(sb
);
1088 * rdev_size_change for 0.90.0
1090 static unsigned long long
1091 super_90_rdev_size_change(mdk_rdev_t
*rdev
, sector_t num_sectors
)
1093 if (num_sectors
&& num_sectors
< rdev
->mddev
->dev_sectors
)
1094 return 0; /* component must fit device */
1095 if (rdev
->mddev
->bitmap_offset
)
1096 return 0; /* can't move bitmap */
1097 rdev
->sb_start
= calc_dev_sboffset(rdev
->bdev
);
1098 if (!num_sectors
|| num_sectors
> rdev
->sb_start
)
1099 num_sectors
= rdev
->sb_start
;
1100 md_super_write(rdev
->mddev
, rdev
, rdev
->sb_start
, rdev
->sb_size
,
1102 md_super_wait(rdev
->mddev
);
1103 return num_sectors
/ 2; /* kB for sysfs */
1108 * version 1 superblock
1111 static __le32
calc_sb_1_csum(struct mdp_superblock_1
* sb
)
1115 unsigned long long newcsum
;
1116 int size
= 256 + le32_to_cpu(sb
->max_dev
)*2;
1117 __le32
*isuper
= (__le32
*)sb
;
1120 disk_csum
= sb
->sb_csum
;
1123 for (i
=0; size
>=4; size
-= 4 )
1124 newcsum
+= le32_to_cpu(*isuper
++);
1127 newcsum
+= le16_to_cpu(*(__le16
*) isuper
);
1129 csum
= (newcsum
& 0xffffffff) + (newcsum
>> 32);
1130 sb
->sb_csum
= disk_csum
;
1131 return cpu_to_le32(csum
);
1134 static int super_1_load(mdk_rdev_t
*rdev
, mdk_rdev_t
*refdev
, int minor_version
)
1136 struct mdp_superblock_1
*sb
;
1139 char b
[BDEVNAME_SIZE
], b2
[BDEVNAME_SIZE
];
1143 * Calculate the position of the superblock in 512byte sectors.
1144 * It is always aligned to a 4K boundary and
1145 * depeding on minor_version, it can be:
1146 * 0: At least 8K, but less than 12K, from end of device
1147 * 1: At start of device
1148 * 2: 4K from start of device.
1150 switch(minor_version
) {
1152 sb_start
= rdev
->bdev
->bd_inode
->i_size
>> 9;
1154 sb_start
&= ~(sector_t
)(4*2-1);
1165 rdev
->sb_start
= sb_start
;
1167 /* superblock is rarely larger than 1K, but it can be larger,
1168 * and it is safe to read 4k, so we do that
1170 ret
= read_disk_sb(rdev
, 4096);
1171 if (ret
) return ret
;
1174 sb
= (struct mdp_superblock_1
*)page_address(rdev
->sb_page
);
1176 if (sb
->magic
!= cpu_to_le32(MD_SB_MAGIC
) ||
1177 sb
->major_version
!= cpu_to_le32(1) ||
1178 le32_to_cpu(sb
->max_dev
) > (4096-256)/2 ||
1179 le64_to_cpu(sb
->super_offset
) != rdev
->sb_start
||
1180 (le32_to_cpu(sb
->feature_map
) & ~MD_FEATURE_ALL
) != 0)
1183 if (calc_sb_1_csum(sb
) != sb
->sb_csum
) {
1184 printk("md: invalid superblock checksum on %s\n",
1185 bdevname(rdev
->bdev
,b
));
1188 if (le64_to_cpu(sb
->data_size
) < 10) {
1189 printk("md: data_size too small on %s\n",
1190 bdevname(rdev
->bdev
,b
));
1193 if ((le32_to_cpu(sb
->feature_map
) & MD_FEATURE_BITMAP_OFFSET
)) {
1194 if (sb
->level
!= cpu_to_le32(1) &&
1195 sb
->level
!= cpu_to_le32(4) &&
1196 sb
->level
!= cpu_to_le32(5) &&
1197 sb
->level
!= cpu_to_le32(6) &&
1198 sb
->level
!= cpu_to_le32(10)) {
1200 "md: bitmaps not supported for this level.\n");
1205 rdev
->preferred_minor
= 0xffff;
1206 rdev
->data_offset
= le64_to_cpu(sb
->data_offset
);
1207 atomic_set(&rdev
->corrected_errors
, le32_to_cpu(sb
->cnt_corrected_read
));
1209 rdev
->sb_size
= le32_to_cpu(sb
->max_dev
) * 2 + 256;
1210 bmask
= queue_hardsect_size(rdev
->bdev
->bd_disk
->queue
)-1;
1211 if (rdev
->sb_size
& bmask
)
1212 rdev
->sb_size
= (rdev
->sb_size
| bmask
) + 1;
1215 && rdev
->data_offset
< sb_start
+ (rdev
->sb_size
/512))
1218 if (sb
->level
== cpu_to_le32(LEVEL_MULTIPATH
))
1221 rdev
->desc_nr
= le32_to_cpu(sb
->dev_number
);
1227 struct mdp_superblock_1
*refsb
=
1228 (struct mdp_superblock_1
*)page_address(refdev
->sb_page
);
1230 if (memcmp(sb
->set_uuid
, refsb
->set_uuid
, 16) != 0 ||
1231 sb
->level
!= refsb
->level
||
1232 sb
->layout
!= refsb
->layout
||
1233 sb
->chunksize
!= refsb
->chunksize
) {
1234 printk(KERN_WARNING
"md: %s has strangely different"
1235 " superblock to %s\n",
1236 bdevname(rdev
->bdev
,b
),
1237 bdevname(refdev
->bdev
,b2
));
1240 ev1
= le64_to_cpu(sb
->events
);
1241 ev2
= le64_to_cpu(refsb
->events
);
1249 rdev
->sectors
= (rdev
->bdev
->bd_inode
->i_size
>> 9) -
1250 le64_to_cpu(sb
->data_offset
);
1252 rdev
->sectors
= rdev
->sb_start
;
1253 if (rdev
->sectors
< le64_to_cpu(sb
->data_size
))
1255 rdev
->sectors
= le64_to_cpu(sb
->data_size
);
1256 if (le32_to_cpu(sb
->chunksize
))
1257 rdev
->sectors
&= ~((sector_t
)le32_to_cpu(sb
->chunksize
) - 1);
1259 if (le64_to_cpu(sb
->size
) > rdev
->sectors
)
1264 static int super_1_validate(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1266 struct mdp_superblock_1
*sb
= (struct mdp_superblock_1
*)page_address(rdev
->sb_page
);
1267 __u64 ev1
= le64_to_cpu(sb
->events
);
1269 rdev
->raid_disk
= -1;
1270 clear_bit(Faulty
, &rdev
->flags
);
1271 clear_bit(In_sync
, &rdev
->flags
);
1272 clear_bit(WriteMostly
, &rdev
->flags
);
1273 clear_bit(BarriersNotsupp
, &rdev
->flags
);
1275 if (mddev
->raid_disks
== 0) {
1276 mddev
->major_version
= 1;
1277 mddev
->patch_version
= 0;
1278 mddev
->external
= 0;
1279 mddev
->chunk_size
= le32_to_cpu(sb
->chunksize
) << 9;
1280 mddev
->ctime
= le64_to_cpu(sb
->ctime
) & ((1ULL << 32)-1);
1281 mddev
->utime
= le64_to_cpu(sb
->utime
) & ((1ULL << 32)-1);
1282 mddev
->level
= le32_to_cpu(sb
->level
);
1283 mddev
->clevel
[0] = 0;
1284 mddev
->layout
= le32_to_cpu(sb
->layout
);
1285 mddev
->raid_disks
= le32_to_cpu(sb
->raid_disks
);
1286 mddev
->dev_sectors
= le64_to_cpu(sb
->size
);
1287 mddev
->events
= ev1
;
1288 mddev
->bitmap_offset
= 0;
1289 mddev
->default_bitmap_offset
= 1024 >> 9;
1291 mddev
->recovery_cp
= le64_to_cpu(sb
->resync_offset
);
1292 memcpy(mddev
->uuid
, sb
->set_uuid
, 16);
1294 mddev
->max_disks
= (4096-256)/2;
1296 if ((le32_to_cpu(sb
->feature_map
) & MD_FEATURE_BITMAP_OFFSET
) &&
1297 mddev
->bitmap_file
== NULL
)
1298 mddev
->bitmap_offset
= (__s32
)le32_to_cpu(sb
->bitmap_offset
);
1300 if ((le32_to_cpu(sb
->feature_map
) & MD_FEATURE_RESHAPE_ACTIVE
)) {
1301 mddev
->reshape_position
= le64_to_cpu(sb
->reshape_position
);
1302 mddev
->delta_disks
= le32_to_cpu(sb
->delta_disks
);
1303 mddev
->new_level
= le32_to_cpu(sb
->new_level
);
1304 mddev
->new_layout
= le32_to_cpu(sb
->new_layout
);
1305 mddev
->new_chunk
= le32_to_cpu(sb
->new_chunk
)<<9;
1307 mddev
->reshape_position
= MaxSector
;
1308 mddev
->delta_disks
= 0;
1309 mddev
->new_level
= mddev
->level
;
1310 mddev
->new_layout
= mddev
->layout
;
1311 mddev
->new_chunk
= mddev
->chunk_size
;
1314 } else if (mddev
->pers
== NULL
) {
1315 /* Insist of good event counter while assembling */
1317 if (ev1
< mddev
->events
)
1319 } else if (mddev
->bitmap
) {
1320 /* If adding to array with a bitmap, then we can accept an
1321 * older device, but not too old.
1323 if (ev1
< mddev
->bitmap
->events_cleared
)
1326 if (ev1
< mddev
->events
)
1327 /* just a hot-add of a new device, leave raid_disk at -1 */
1330 if (mddev
->level
!= LEVEL_MULTIPATH
) {
1332 role
= le16_to_cpu(sb
->dev_roles
[rdev
->desc_nr
]);
1334 case 0xffff: /* spare */
1336 case 0xfffe: /* faulty */
1337 set_bit(Faulty
, &rdev
->flags
);
1340 if ((le32_to_cpu(sb
->feature_map
) &
1341 MD_FEATURE_RECOVERY_OFFSET
))
1342 rdev
->recovery_offset
= le64_to_cpu(sb
->recovery_offset
);
1344 set_bit(In_sync
, &rdev
->flags
);
1345 rdev
->raid_disk
= role
;
1348 if (sb
->devflags
& WriteMostly1
)
1349 set_bit(WriteMostly
, &rdev
->flags
);
1350 } else /* MULTIPATH are always insync */
1351 set_bit(In_sync
, &rdev
->flags
);
1356 static void super_1_sync(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1358 struct mdp_superblock_1
*sb
;
1361 /* make rdev->sb match mddev and rdev data. */
1363 sb
= (struct mdp_superblock_1
*)page_address(rdev
->sb_page
);
1365 sb
->feature_map
= 0;
1367 sb
->recovery_offset
= cpu_to_le64(0);
1368 memset(sb
->pad1
, 0, sizeof(sb
->pad1
));
1369 memset(sb
->pad2
, 0, sizeof(sb
->pad2
));
1370 memset(sb
->pad3
, 0, sizeof(sb
->pad3
));
1372 sb
->utime
= cpu_to_le64((__u64
)mddev
->utime
);
1373 sb
->events
= cpu_to_le64(mddev
->events
);
1375 sb
->resync_offset
= cpu_to_le64(mddev
->recovery_cp
);
1377 sb
->resync_offset
= cpu_to_le64(0);
1379 sb
->cnt_corrected_read
= cpu_to_le32(atomic_read(&rdev
->corrected_errors
));
1381 sb
->raid_disks
= cpu_to_le32(mddev
->raid_disks
);
1382 sb
->size
= cpu_to_le64(mddev
->dev_sectors
);
1384 if (mddev
->bitmap
&& mddev
->bitmap_file
== NULL
) {
1385 sb
->bitmap_offset
= cpu_to_le32((__u32
)mddev
->bitmap_offset
);
1386 sb
->feature_map
= cpu_to_le32(MD_FEATURE_BITMAP_OFFSET
);
1389 if (rdev
->raid_disk
>= 0 &&
1390 !test_bit(In_sync
, &rdev
->flags
)) {
1391 if (mddev
->curr_resync_completed
> rdev
->recovery_offset
)
1392 rdev
->recovery_offset
= mddev
->curr_resync_completed
;
1393 if (rdev
->recovery_offset
> 0) {
1395 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET
);
1396 sb
->recovery_offset
=
1397 cpu_to_le64(rdev
->recovery_offset
);
1401 if (mddev
->reshape_position
!= MaxSector
) {
1402 sb
->feature_map
|= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE
);
1403 sb
->reshape_position
= cpu_to_le64(mddev
->reshape_position
);
1404 sb
->new_layout
= cpu_to_le32(mddev
->new_layout
);
1405 sb
->delta_disks
= cpu_to_le32(mddev
->delta_disks
);
1406 sb
->new_level
= cpu_to_le32(mddev
->new_level
);
1407 sb
->new_chunk
= cpu_to_le32(mddev
->new_chunk
>>9);
1411 list_for_each_entry(rdev2
, &mddev
->disks
, same_set
)
1412 if (rdev2
->desc_nr
+1 > max_dev
)
1413 max_dev
= rdev2
->desc_nr
+1;
1415 if (max_dev
> le32_to_cpu(sb
->max_dev
))
1416 sb
->max_dev
= cpu_to_le32(max_dev
);
1417 for (i
=0; i
<max_dev
;i
++)
1418 sb
->dev_roles
[i
] = cpu_to_le16(0xfffe);
1420 list_for_each_entry(rdev2
, &mddev
->disks
, same_set
) {
1422 if (test_bit(Faulty
, &rdev2
->flags
))
1423 sb
->dev_roles
[i
] = cpu_to_le16(0xfffe);
1424 else if (test_bit(In_sync
, &rdev2
->flags
))
1425 sb
->dev_roles
[i
] = cpu_to_le16(rdev2
->raid_disk
);
1426 else if (rdev2
->raid_disk
>= 0 && rdev2
->recovery_offset
> 0)
1427 sb
->dev_roles
[i
] = cpu_to_le16(rdev2
->raid_disk
);
1429 sb
->dev_roles
[i
] = cpu_to_le16(0xffff);
1432 sb
->sb_csum
= calc_sb_1_csum(sb
);
1435 static unsigned long long
1436 super_1_rdev_size_change(mdk_rdev_t
*rdev
, sector_t num_sectors
)
1438 struct mdp_superblock_1
*sb
;
1439 sector_t max_sectors
;
1440 if (num_sectors
&& num_sectors
< rdev
->mddev
->dev_sectors
)
1441 return 0; /* component must fit device */
1442 if (rdev
->sb_start
< rdev
->data_offset
) {
1443 /* minor versions 1 and 2; superblock before data */
1444 max_sectors
= rdev
->bdev
->bd_inode
->i_size
>> 9;
1445 max_sectors
-= rdev
->data_offset
;
1446 if (!num_sectors
|| num_sectors
> max_sectors
)
1447 num_sectors
= max_sectors
;
1448 } else if (rdev
->mddev
->bitmap_offset
) {
1449 /* minor version 0 with bitmap we can't move */
1452 /* minor version 0; superblock after data */
1454 sb_start
= (rdev
->bdev
->bd_inode
->i_size
>> 9) - 8*2;
1455 sb_start
&= ~(sector_t
)(4*2 - 1);
1456 max_sectors
= rdev
->sectors
+ sb_start
- rdev
->sb_start
;
1457 if (!num_sectors
|| num_sectors
> max_sectors
)
1458 num_sectors
= max_sectors
;
1459 rdev
->sb_start
= sb_start
;
1461 sb
= (struct mdp_superblock_1
*) page_address(rdev
->sb_page
);
1462 sb
->data_size
= cpu_to_le64(num_sectors
);
1463 sb
->super_offset
= rdev
->sb_start
;
1464 sb
->sb_csum
= calc_sb_1_csum(sb
);
1465 md_super_write(rdev
->mddev
, rdev
, rdev
->sb_start
, rdev
->sb_size
,
1467 md_super_wait(rdev
->mddev
);
1468 return num_sectors
/ 2; /* kB for sysfs */
1471 static struct super_type super_types
[] = {
1474 .owner
= THIS_MODULE
,
1475 .load_super
= super_90_load
,
1476 .validate_super
= super_90_validate
,
1477 .sync_super
= super_90_sync
,
1478 .rdev_size_change
= super_90_rdev_size_change
,
1482 .owner
= THIS_MODULE
,
1483 .load_super
= super_1_load
,
1484 .validate_super
= super_1_validate
,
1485 .sync_super
= super_1_sync
,
1486 .rdev_size_change
= super_1_rdev_size_change
,
1490 static int match_mddev_units(mddev_t
*mddev1
, mddev_t
*mddev2
)
1492 mdk_rdev_t
*rdev
, *rdev2
;
1495 rdev_for_each_rcu(rdev
, mddev1
)
1496 rdev_for_each_rcu(rdev2
, mddev2
)
1497 if (rdev
->bdev
->bd_contains
==
1498 rdev2
->bdev
->bd_contains
) {
1506 static LIST_HEAD(pending_raid_disks
);
1508 static void md_integrity_check(mdk_rdev_t
*rdev
, mddev_t
*mddev
)
1510 struct mdk_personality
*pers
= mddev
->pers
;
1511 struct gendisk
*disk
= mddev
->gendisk
;
1512 struct blk_integrity
*bi_rdev
= bdev_get_integrity(rdev
->bdev
);
1513 struct blk_integrity
*bi_mddev
= blk_get_integrity(disk
);
1515 /* Data integrity passthrough not supported on RAID 4, 5 and 6 */
1516 if (pers
&& pers
->level
>= 4 && pers
->level
<= 6)
1519 /* If rdev is integrity capable, register profile for mddev */
1520 if (!bi_mddev
&& bi_rdev
) {
1521 if (blk_integrity_register(disk
, bi_rdev
))
1522 printk(KERN_ERR
"%s: %s Could not register integrity!\n",
1523 __func__
, disk
->disk_name
);
1525 printk(KERN_NOTICE
"Enabling data integrity on %s\n",
1530 /* Check that mddev and rdev have matching profiles */
1531 if (blk_integrity_compare(disk
, rdev
->bdev
->bd_disk
) < 0) {
1532 printk(KERN_ERR
"%s: %s/%s integrity mismatch!\n", __func__
,
1533 disk
->disk_name
, rdev
->bdev
->bd_disk
->disk_name
);
1534 printk(KERN_NOTICE
"Disabling data integrity on %s\n",
1536 blk_integrity_unregister(disk
);
1540 static int bind_rdev_to_array(mdk_rdev_t
* rdev
, mddev_t
* mddev
)
1542 char b
[BDEVNAME_SIZE
];
1552 /* prevent duplicates */
1553 if (find_rdev(mddev
, rdev
->bdev
->bd_dev
))
1556 /* make sure rdev->sectors exceeds mddev->dev_sectors */
1557 if (rdev
->sectors
&& (mddev
->dev_sectors
== 0 ||
1558 rdev
->sectors
< mddev
->dev_sectors
)) {
1560 /* Cannot change size, so fail
1561 * If mddev->level <= 0, then we don't care
1562 * about aligning sizes (e.g. linear)
1564 if (mddev
->level
> 0)
1567 mddev
->dev_sectors
= rdev
->sectors
;
1570 /* Verify rdev->desc_nr is unique.
1571 * If it is -1, assign a free number, else
1572 * check number is not in use
1574 if (rdev
->desc_nr
< 0) {
1576 if (mddev
->pers
) choice
= mddev
->raid_disks
;
1577 while (find_rdev_nr(mddev
, choice
))
1579 rdev
->desc_nr
= choice
;
1581 if (find_rdev_nr(mddev
, rdev
->desc_nr
))
1584 if (mddev
->max_disks
&& rdev
->desc_nr
>= mddev
->max_disks
) {
1585 printk(KERN_WARNING
"md: %s: array is limited to %d devices\n",
1586 mdname(mddev
), mddev
->max_disks
);
1589 bdevname(rdev
->bdev
,b
);
1590 while ( (s
=strchr(b
, '/')) != NULL
)
1593 rdev
->mddev
= mddev
;
1594 printk(KERN_INFO
"md: bind<%s>\n", b
);
1596 if ((err
= kobject_add(&rdev
->kobj
, &mddev
->kobj
, "dev-%s", b
)))
1599 ko
= &part_to_dev(rdev
->bdev
->bd_part
)->kobj
;
1600 if ((err
= sysfs_create_link(&rdev
->kobj
, ko
, "block"))) {
1601 kobject_del(&rdev
->kobj
);
1604 rdev
->sysfs_state
= sysfs_get_dirent(rdev
->kobj
.sd
, "state");
1606 list_add_rcu(&rdev
->same_set
, &mddev
->disks
);
1607 bd_claim_by_disk(rdev
->bdev
, rdev
->bdev
->bd_holder
, mddev
->gendisk
);
1609 /* May as well allow recovery to be retried once */
1610 mddev
->recovery_disabled
= 0;
1612 md_integrity_check(rdev
, mddev
);
1616 printk(KERN_WARNING
"md: failed to register dev-%s for %s\n",
1621 static void md_delayed_delete(struct work_struct
*ws
)
1623 mdk_rdev_t
*rdev
= container_of(ws
, mdk_rdev_t
, del_work
);
1624 kobject_del(&rdev
->kobj
);
1625 kobject_put(&rdev
->kobj
);
1628 static void unbind_rdev_from_array(mdk_rdev_t
* rdev
)
1630 char b
[BDEVNAME_SIZE
];
1635 bd_release_from_disk(rdev
->bdev
, rdev
->mddev
->gendisk
);
1636 list_del_rcu(&rdev
->same_set
);
1637 printk(KERN_INFO
"md: unbind<%s>\n", bdevname(rdev
->bdev
,b
));
1639 sysfs_remove_link(&rdev
->kobj
, "block");
1640 sysfs_put(rdev
->sysfs_state
);
1641 rdev
->sysfs_state
= NULL
;
1642 /* We need to delay this, otherwise we can deadlock when
1643 * writing to 'remove' to "dev/state". We also need
1644 * to delay it due to rcu usage.
1647 INIT_WORK(&rdev
->del_work
, md_delayed_delete
);
1648 kobject_get(&rdev
->kobj
);
1649 schedule_work(&rdev
->del_work
);
1653 * prevent the device from being mounted, repartitioned or
1654 * otherwise reused by a RAID array (or any other kernel
1655 * subsystem), by bd_claiming the device.
1657 static int lock_rdev(mdk_rdev_t
*rdev
, dev_t dev
, int shared
)
1660 struct block_device
*bdev
;
1661 char b
[BDEVNAME_SIZE
];
1663 bdev
= open_by_devnum(dev
, FMODE_READ
|FMODE_WRITE
);
1665 printk(KERN_ERR
"md: could not open %s.\n",
1666 __bdevname(dev
, b
));
1667 return PTR_ERR(bdev
);
1669 err
= bd_claim(bdev
, shared
? (mdk_rdev_t
*)lock_rdev
: rdev
);
1671 printk(KERN_ERR
"md: could not bd_claim %s.\n",
1673 blkdev_put(bdev
, FMODE_READ
|FMODE_WRITE
);
1677 set_bit(AllReserved
, &rdev
->flags
);
1682 static void unlock_rdev(mdk_rdev_t
*rdev
)
1684 struct block_device
*bdev
= rdev
->bdev
;
1689 blkdev_put(bdev
, FMODE_READ
|FMODE_WRITE
);
1692 void md_autodetect_dev(dev_t dev
);
1694 static void export_rdev(mdk_rdev_t
* rdev
)
1696 char b
[BDEVNAME_SIZE
];
1697 printk(KERN_INFO
"md: export_rdev(%s)\n",
1698 bdevname(rdev
->bdev
,b
));
1703 if (test_bit(AutoDetected
, &rdev
->flags
))
1704 md_autodetect_dev(rdev
->bdev
->bd_dev
);
1707 kobject_put(&rdev
->kobj
);
1710 static void kick_rdev_from_array(mdk_rdev_t
* rdev
)
1712 unbind_rdev_from_array(rdev
);
1716 static void export_array(mddev_t
*mddev
)
1718 mdk_rdev_t
*rdev
, *tmp
;
1720 rdev_for_each(rdev
, tmp
, mddev
) {
1725 kick_rdev_from_array(rdev
);
1727 if (!list_empty(&mddev
->disks
))
1729 mddev
->raid_disks
= 0;
1730 mddev
->major_version
= 0;
1733 static void print_desc(mdp_disk_t
*desc
)
1735 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc
->number
,
1736 desc
->major
,desc
->minor
,desc
->raid_disk
,desc
->state
);
1739 static void print_sb_90(mdp_super_t
*sb
)
1744 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1745 sb
->major_version
, sb
->minor_version
, sb
->patch_version
,
1746 sb
->set_uuid0
, sb
->set_uuid1
, sb
->set_uuid2
, sb
->set_uuid3
,
1748 printk(KERN_INFO
"md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1749 sb
->level
, sb
->size
, sb
->nr_disks
, sb
->raid_disks
,
1750 sb
->md_minor
, sb
->layout
, sb
->chunk_size
);
1751 printk(KERN_INFO
"md: UT:%08x ST:%d AD:%d WD:%d"
1752 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1753 sb
->utime
, sb
->state
, sb
->active_disks
, sb
->working_disks
,
1754 sb
->failed_disks
, sb
->spare_disks
,
1755 sb
->sb_csum
, (unsigned long)sb
->events_lo
);
1758 for (i
= 0; i
< MD_SB_DISKS
; i
++) {
1761 desc
= sb
->disks
+ i
;
1762 if (desc
->number
|| desc
->major
|| desc
->minor
||
1763 desc
->raid_disk
|| (desc
->state
&& (desc
->state
!= 4))) {
1764 printk(" D %2d: ", i
);
1768 printk(KERN_INFO
"md: THIS: ");
1769 print_desc(&sb
->this_disk
);
1772 static void print_sb_1(struct mdp_superblock_1
*sb
)
1776 uuid
= sb
->set_uuid
;
1777 printk(KERN_INFO
"md: SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x"
1778 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n"
1779 KERN_INFO
"md: Name: \"%s\" CT:%llu\n",
1780 le32_to_cpu(sb
->major_version
),
1781 le32_to_cpu(sb
->feature_map
),
1782 uuid
[0], uuid
[1], uuid
[2], uuid
[3],
1783 uuid
[4], uuid
[5], uuid
[6], uuid
[7],
1784 uuid
[8], uuid
[9], uuid
[10], uuid
[11],
1785 uuid
[12], uuid
[13], uuid
[14], uuid
[15],
1787 (unsigned long long)le64_to_cpu(sb
->ctime
)
1788 & MD_SUPERBLOCK_1_TIME_SEC_MASK
);
1790 uuid
= sb
->device_uuid
;
1791 printk(KERN_INFO
"md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
1793 KERN_INFO
"md: Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x"
1794 ":%02x%02x%02x%02x%02x%02x\n"
1795 KERN_INFO
"md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
1796 KERN_INFO
"md: (MaxDev:%u) \n",
1797 le32_to_cpu(sb
->level
),
1798 (unsigned long long)le64_to_cpu(sb
->size
),
1799 le32_to_cpu(sb
->raid_disks
),
1800 le32_to_cpu(sb
->layout
),
1801 le32_to_cpu(sb
->chunksize
),
1802 (unsigned long long)le64_to_cpu(sb
->data_offset
),
1803 (unsigned long long)le64_to_cpu(sb
->data_size
),
1804 (unsigned long long)le64_to_cpu(sb
->super_offset
),
1805 (unsigned long long)le64_to_cpu(sb
->recovery_offset
),
1806 le32_to_cpu(sb
->dev_number
),
1807 uuid
[0], uuid
[1], uuid
[2], uuid
[3],
1808 uuid
[4], uuid
[5], uuid
[6], uuid
[7],
1809 uuid
[8], uuid
[9], uuid
[10], uuid
[11],
1810 uuid
[12], uuid
[13], uuid
[14], uuid
[15],
1812 (unsigned long long)le64_to_cpu(sb
->utime
) & MD_SUPERBLOCK_1_TIME_SEC_MASK
,
1813 (unsigned long long)le64_to_cpu(sb
->events
),
1814 (unsigned long long)le64_to_cpu(sb
->resync_offset
),
1815 le32_to_cpu(sb
->sb_csum
),
1816 le32_to_cpu(sb
->max_dev
)
1820 static void print_rdev(mdk_rdev_t
*rdev
, int major_version
)
1822 char b
[BDEVNAME_SIZE
];
1823 printk(KERN_INFO
"md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n",
1824 bdevname(rdev
->bdev
, b
), (unsigned long long)rdev
->sectors
,
1825 test_bit(Faulty
, &rdev
->flags
), test_bit(In_sync
, &rdev
->flags
),
1827 if (rdev
->sb_loaded
) {
1828 printk(KERN_INFO
"md: rdev superblock (MJ:%d):\n", major_version
);
1829 switch (major_version
) {
1831 print_sb_90((mdp_super_t
*)page_address(rdev
->sb_page
));
1834 print_sb_1((struct mdp_superblock_1
*)page_address(rdev
->sb_page
));
1838 printk(KERN_INFO
"md: no rdev superblock!\n");
1841 static void md_print_devices(void)
1843 struct list_head
*tmp
;
1846 char b
[BDEVNAME_SIZE
];
1849 printk("md: **********************************\n");
1850 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
1851 printk("md: **********************************\n");
1852 for_each_mddev(mddev
, tmp
) {
1855 bitmap_print_sb(mddev
->bitmap
);
1857 printk("%s: ", mdname(mddev
));
1858 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
1859 printk("<%s>", bdevname(rdev
->bdev
,b
));
1862 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
1863 print_rdev(rdev
, mddev
->major_version
);
1865 printk("md: **********************************\n");
1870 static void sync_sbs(mddev_t
* mddev
, int nospares
)
1872 /* Update each superblock (in-memory image), but
1873 * if we are allowed to, skip spares which already
1874 * have the right event counter, or have one earlier
1875 * (which would mean they aren't being marked as dirty
1876 * with the rest of the array)
1880 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
1881 if (rdev
->sb_events
== mddev
->events
||
1883 rdev
->raid_disk
< 0 &&
1884 (rdev
->sb_events
&1)==0 &&
1885 rdev
->sb_events
+1 == mddev
->events
)) {
1886 /* Don't update this superblock */
1887 rdev
->sb_loaded
= 2;
1889 super_types
[mddev
->major_version
].
1890 sync_super(mddev
, rdev
);
1891 rdev
->sb_loaded
= 1;
1896 static void md_update_sb(mddev_t
* mddev
, int force_change
)
1902 if (mddev
->external
)
1905 spin_lock_irq(&mddev
->write_lock
);
1907 set_bit(MD_CHANGE_PENDING
, &mddev
->flags
);
1908 if (test_and_clear_bit(MD_CHANGE_DEVS
, &mddev
->flags
))
1910 if (test_and_clear_bit(MD_CHANGE_CLEAN
, &mddev
->flags
))
1911 /* just a clean<-> dirty transition, possibly leave spares alone,
1912 * though if events isn't the right even/odd, we will have to do
1918 if (mddev
->degraded
)
1919 /* If the array is degraded, then skipping spares is both
1920 * dangerous and fairly pointless.
1921 * Dangerous because a device that was removed from the array
1922 * might have a event_count that still looks up-to-date,
1923 * so it can be re-added without a resync.
1924 * Pointless because if there are any spares to skip,
1925 * then a recovery will happen and soon that array won't
1926 * be degraded any more and the spare can go back to sleep then.
1930 sync_req
= mddev
->in_sync
;
1931 mddev
->utime
= get_seconds();
1933 /* If this is just a dirty<->clean transition, and the array is clean
1934 * and 'events' is odd, we can roll back to the previous clean state */
1936 && (mddev
->in_sync
&& mddev
->recovery_cp
== MaxSector
)
1937 && (mddev
->events
& 1)
1938 && mddev
->events
!= 1)
1941 /* otherwise we have to go forward and ... */
1943 if (!mddev
->in_sync
|| mddev
->recovery_cp
!= MaxSector
) { /* not clean */
1944 /* .. if the array isn't clean, insist on an odd 'events' */
1945 if ((mddev
->events
&1)==0) {
1950 /* otherwise insist on an even 'events' (for clean states) */
1951 if ((mddev
->events
&1)) {
1958 if (!mddev
->events
) {
1960 * oops, this 64-bit counter should never wrap.
1961 * Either we are in around ~1 trillion A.C., assuming
1962 * 1 reboot per second, or we have a bug:
1969 * do not write anything to disk if using
1970 * nonpersistent superblocks
1972 if (!mddev
->persistent
) {
1973 if (!mddev
->external
)
1974 clear_bit(MD_CHANGE_PENDING
, &mddev
->flags
);
1976 spin_unlock_irq(&mddev
->write_lock
);
1977 wake_up(&mddev
->sb_wait
);
1980 sync_sbs(mddev
, nospares
);
1981 spin_unlock_irq(&mddev
->write_lock
);
1984 "md: updating %s RAID superblock on device (in sync %d)\n",
1985 mdname(mddev
),mddev
->in_sync
);
1987 bitmap_update_sb(mddev
->bitmap
);
1988 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
1989 char b
[BDEVNAME_SIZE
];
1990 dprintk(KERN_INFO
"md: ");
1991 if (rdev
->sb_loaded
!= 1)
1992 continue; /* no noise on spare devices */
1993 if (test_bit(Faulty
, &rdev
->flags
))
1994 dprintk("(skipping faulty ");
1996 dprintk("%s ", bdevname(rdev
->bdev
,b
));
1997 if (!test_bit(Faulty
, &rdev
->flags
)) {
1998 md_super_write(mddev
,rdev
,
1999 rdev
->sb_start
, rdev
->sb_size
,
2001 dprintk(KERN_INFO
"(write) %s's sb offset: %llu\n",
2002 bdevname(rdev
->bdev
,b
),
2003 (unsigned long long)rdev
->sb_start
);
2004 rdev
->sb_events
= mddev
->events
;
2008 if (mddev
->level
== LEVEL_MULTIPATH
)
2009 /* only need to write one superblock... */
2012 md_super_wait(mddev
);
2013 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
2015 spin_lock_irq(&mddev
->write_lock
);
2016 if (mddev
->in_sync
!= sync_req
||
2017 test_bit(MD_CHANGE_DEVS
, &mddev
->flags
)) {
2018 /* have to write it out again */
2019 spin_unlock_irq(&mddev
->write_lock
);
2022 clear_bit(MD_CHANGE_PENDING
, &mddev
->flags
);
2023 spin_unlock_irq(&mddev
->write_lock
);
2024 wake_up(&mddev
->sb_wait
);
2028 /* words written to sysfs files may, or may not, be \n terminated.
2029 * We want to accept with case. For this we use cmd_match.
2031 static int cmd_match(const char *cmd
, const char *str
)
2033 /* See if cmd, written into a sysfs file, matches
2034 * str. They must either be the same, or cmd can
2035 * have a trailing newline
2037 while (*cmd
&& *str
&& *cmd
== *str
) {
2048 struct rdev_sysfs_entry
{
2049 struct attribute attr
;
2050 ssize_t (*show
)(mdk_rdev_t
*, char *);
2051 ssize_t (*store
)(mdk_rdev_t
*, const char *, size_t);
2055 state_show(mdk_rdev_t
*rdev
, char *page
)
2060 if (test_bit(Faulty
, &rdev
->flags
)) {
2061 len
+= sprintf(page
+len
, "%sfaulty",sep
);
2064 if (test_bit(In_sync
, &rdev
->flags
)) {
2065 len
+= sprintf(page
+len
, "%sin_sync",sep
);
2068 if (test_bit(WriteMostly
, &rdev
->flags
)) {
2069 len
+= sprintf(page
+len
, "%swrite_mostly",sep
);
2072 if (test_bit(Blocked
, &rdev
->flags
)) {
2073 len
+= sprintf(page
+len
, "%sblocked", sep
);
2076 if (!test_bit(Faulty
, &rdev
->flags
) &&
2077 !test_bit(In_sync
, &rdev
->flags
)) {
2078 len
+= sprintf(page
+len
, "%sspare", sep
);
2081 return len
+sprintf(page
+len
, "\n");
2085 state_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
2088 * faulty - simulates and error
2089 * remove - disconnects the device
2090 * writemostly - sets write_mostly
2091 * -writemostly - clears write_mostly
2092 * blocked - sets the Blocked flag
2093 * -blocked - clears the Blocked flag
2096 if (cmd_match(buf
, "faulty") && rdev
->mddev
->pers
) {
2097 md_error(rdev
->mddev
, rdev
);
2099 } else if (cmd_match(buf
, "remove")) {
2100 if (rdev
->raid_disk
>= 0)
2103 mddev_t
*mddev
= rdev
->mddev
;
2104 kick_rdev_from_array(rdev
);
2106 md_update_sb(mddev
, 1);
2107 md_new_event(mddev
);
2110 } else if (cmd_match(buf
, "writemostly")) {
2111 set_bit(WriteMostly
, &rdev
->flags
);
2113 } else if (cmd_match(buf
, "-writemostly")) {
2114 clear_bit(WriteMostly
, &rdev
->flags
);
2116 } else if (cmd_match(buf
, "blocked")) {
2117 set_bit(Blocked
, &rdev
->flags
);
2119 } else if (cmd_match(buf
, "-blocked")) {
2120 clear_bit(Blocked
, &rdev
->flags
);
2121 wake_up(&rdev
->blocked_wait
);
2122 set_bit(MD_RECOVERY_NEEDED
, &rdev
->mddev
->recovery
);
2123 md_wakeup_thread(rdev
->mddev
->thread
);
2127 if (!err
&& rdev
->sysfs_state
)
2128 sysfs_notify_dirent(rdev
->sysfs_state
);
2129 return err
? err
: len
;
2131 static struct rdev_sysfs_entry rdev_state
=
2132 __ATTR(state
, S_IRUGO
|S_IWUSR
, state_show
, state_store
);
2135 errors_show(mdk_rdev_t
*rdev
, char *page
)
2137 return sprintf(page
, "%d\n", atomic_read(&rdev
->corrected_errors
));
2141 errors_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
2144 unsigned long n
= simple_strtoul(buf
, &e
, 10);
2145 if (*buf
&& (*e
== 0 || *e
== '\n')) {
2146 atomic_set(&rdev
->corrected_errors
, n
);
2151 static struct rdev_sysfs_entry rdev_errors
=
2152 __ATTR(errors
, S_IRUGO
|S_IWUSR
, errors_show
, errors_store
);
2155 slot_show(mdk_rdev_t
*rdev
, char *page
)
2157 if (rdev
->raid_disk
< 0)
2158 return sprintf(page
, "none\n");
2160 return sprintf(page
, "%d\n", rdev
->raid_disk
);
2164 slot_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
2169 int slot
= simple_strtoul(buf
, &e
, 10);
2170 if (strncmp(buf
, "none", 4)==0)
2172 else if (e
==buf
|| (*e
&& *e
!= '\n'))
2174 if (rdev
->mddev
->pers
&& slot
== -1) {
2175 /* Setting 'slot' on an active array requires also
2176 * updating the 'rd%d' link, and communicating
2177 * with the personality with ->hot_*_disk.
2178 * For now we only support removing
2179 * failed/spare devices. This normally happens automatically,
2180 * but not when the metadata is externally managed.
2182 if (rdev
->raid_disk
== -1)
2184 /* personality does all needed checks */
2185 if (rdev
->mddev
->pers
->hot_add_disk
== NULL
)
2187 err
= rdev
->mddev
->pers
->
2188 hot_remove_disk(rdev
->mddev
, rdev
->raid_disk
);
2191 sprintf(nm
, "rd%d", rdev
->raid_disk
);
2192 sysfs_remove_link(&rdev
->mddev
->kobj
, nm
);
2193 set_bit(MD_RECOVERY_NEEDED
, &rdev
->mddev
->recovery
);
2194 md_wakeup_thread(rdev
->mddev
->thread
);
2195 } else if (rdev
->mddev
->pers
) {
2197 /* Activating a spare .. or possibly reactivating
2198 * if we every get bitmaps working here.
2201 if (rdev
->raid_disk
!= -1)
2204 if (rdev
->mddev
->pers
->hot_add_disk
== NULL
)
2207 list_for_each_entry(rdev2
, &rdev
->mddev
->disks
, same_set
)
2208 if (rdev2
->raid_disk
== slot
)
2211 rdev
->raid_disk
= slot
;
2212 if (test_bit(In_sync
, &rdev
->flags
))
2213 rdev
->saved_raid_disk
= slot
;
2215 rdev
->saved_raid_disk
= -1;
2216 err
= rdev
->mddev
->pers
->
2217 hot_add_disk(rdev
->mddev
, rdev
);
2219 rdev
->raid_disk
= -1;
2222 sysfs_notify_dirent(rdev
->sysfs_state
);
2223 sprintf(nm
, "rd%d", rdev
->raid_disk
);
2224 if (sysfs_create_link(&rdev
->mddev
->kobj
, &rdev
->kobj
, nm
))
2226 "md: cannot register "
2228 nm
, mdname(rdev
->mddev
));
2230 /* don't wakeup anyone, leave that to userspace. */
2232 if (slot
>= rdev
->mddev
->raid_disks
)
2234 rdev
->raid_disk
= slot
;
2235 /* assume it is working */
2236 clear_bit(Faulty
, &rdev
->flags
);
2237 clear_bit(WriteMostly
, &rdev
->flags
);
2238 set_bit(In_sync
, &rdev
->flags
);
2239 sysfs_notify_dirent(rdev
->sysfs_state
);
2245 static struct rdev_sysfs_entry rdev_slot
=
2246 __ATTR(slot
, S_IRUGO
|S_IWUSR
, slot_show
, slot_store
);
2249 offset_show(mdk_rdev_t
*rdev
, char *page
)
2251 return sprintf(page
, "%llu\n", (unsigned long long)rdev
->data_offset
);
2255 offset_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
2258 unsigned long long offset
= simple_strtoull(buf
, &e
, 10);
2259 if (e
==buf
|| (*e
&& *e
!= '\n'))
2261 if (rdev
->mddev
->pers
&& rdev
->raid_disk
>= 0)
2263 if (rdev
->sectors
&& rdev
->mddev
->external
)
2264 /* Must set offset before size, so overlap checks
2267 rdev
->data_offset
= offset
;
2271 static struct rdev_sysfs_entry rdev_offset
=
2272 __ATTR(offset
, S_IRUGO
|S_IWUSR
, offset_show
, offset_store
);
2275 rdev_size_show(mdk_rdev_t
*rdev
, char *page
)
2277 return sprintf(page
, "%llu\n", (unsigned long long)rdev
->sectors
/ 2);
2280 static int overlaps(sector_t s1
, sector_t l1
, sector_t s2
, sector_t l2
)
2282 /* check if two start/length pairs overlap */
2290 static int strict_blocks_to_sectors(const char *buf
, sector_t
*sectors
)
2292 unsigned long long blocks
;
2295 if (strict_strtoull(buf
, 10, &blocks
) < 0)
2298 if (blocks
& 1ULL << (8 * sizeof(blocks
) - 1))
2299 return -EINVAL
; /* sector conversion overflow */
2302 if (new != blocks
* 2)
2303 return -EINVAL
; /* unsigned long long to sector_t overflow */
2310 rdev_size_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
2312 mddev_t
*my_mddev
= rdev
->mddev
;
2313 sector_t oldsectors
= rdev
->sectors
;
2316 if (strict_blocks_to_sectors(buf
, §ors
) < 0)
2318 if (my_mddev
->pers
&& rdev
->raid_disk
>= 0) {
2319 if (my_mddev
->persistent
) {
2320 sectors
= super_types
[my_mddev
->major_version
].
2321 rdev_size_change(rdev
, sectors
);
2324 } else if (!sectors
)
2325 sectors
= (rdev
->bdev
->bd_inode
->i_size
>> 9) -
2328 if (sectors
< my_mddev
->dev_sectors
)
2329 return -EINVAL
; /* component must fit device */
2331 rdev
->sectors
= sectors
;
2332 if (sectors
> oldsectors
&& my_mddev
->external
) {
2333 /* need to check that all other rdevs with the same ->bdev
2334 * do not overlap. We need to unlock the mddev to avoid
2335 * a deadlock. We have already changed rdev->sectors, and if
2336 * we have to change it back, we will have the lock again.
2340 struct list_head
*tmp
;
2342 mddev_unlock(my_mddev
);
2343 for_each_mddev(mddev
, tmp
) {
2347 list_for_each_entry(rdev2
, &mddev
->disks
, same_set
)
2348 if (test_bit(AllReserved
, &rdev2
->flags
) ||
2349 (rdev
->bdev
== rdev2
->bdev
&&
2351 overlaps(rdev
->data_offset
, rdev
->sectors
,
2357 mddev_unlock(mddev
);
2363 mddev_lock(my_mddev
);
2365 /* Someone else could have slipped in a size
2366 * change here, but doing so is just silly.
2367 * We put oldsectors back because we *know* it is
2368 * safe, and trust userspace not to race with
2371 rdev
->sectors
= oldsectors
;
2378 static struct rdev_sysfs_entry rdev_size
=
2379 __ATTR(size
, S_IRUGO
|S_IWUSR
, rdev_size_show
, rdev_size_store
);
2381 static struct attribute
*rdev_default_attrs
[] = {
2390 rdev_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
2392 struct rdev_sysfs_entry
*entry
= container_of(attr
, struct rdev_sysfs_entry
, attr
);
2393 mdk_rdev_t
*rdev
= container_of(kobj
, mdk_rdev_t
, kobj
);
2394 mddev_t
*mddev
= rdev
->mddev
;
2400 rv
= mddev
? mddev_lock(mddev
) : -EBUSY
;
2402 if (rdev
->mddev
== NULL
)
2405 rv
= entry
->show(rdev
, page
);
2406 mddev_unlock(mddev
);
2412 rdev_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
2413 const char *page
, size_t length
)
2415 struct rdev_sysfs_entry
*entry
= container_of(attr
, struct rdev_sysfs_entry
, attr
);
2416 mdk_rdev_t
*rdev
= container_of(kobj
, mdk_rdev_t
, kobj
);
2418 mddev_t
*mddev
= rdev
->mddev
;
2422 if (!capable(CAP_SYS_ADMIN
))
2424 rv
= mddev
? mddev_lock(mddev
): -EBUSY
;
2426 if (rdev
->mddev
== NULL
)
2429 rv
= entry
->store(rdev
, page
, length
);
2430 mddev_unlock(mddev
);
2435 static void rdev_free(struct kobject
*ko
)
2437 mdk_rdev_t
*rdev
= container_of(ko
, mdk_rdev_t
, kobj
);
2440 static struct sysfs_ops rdev_sysfs_ops
= {
2441 .show
= rdev_attr_show
,
2442 .store
= rdev_attr_store
,
2444 static struct kobj_type rdev_ktype
= {
2445 .release
= rdev_free
,
2446 .sysfs_ops
= &rdev_sysfs_ops
,
2447 .default_attrs
= rdev_default_attrs
,
2451 * Import a device. If 'super_format' >= 0, then sanity check the superblock
2453 * mark the device faulty if:
2455 * - the device is nonexistent (zero size)
2456 * - the device has no valid superblock
2458 * a faulty rdev _never_ has rdev->sb set.
2460 static mdk_rdev_t
*md_import_device(dev_t newdev
, int super_format
, int super_minor
)
2462 char b
[BDEVNAME_SIZE
];
2467 rdev
= kzalloc(sizeof(*rdev
), GFP_KERNEL
);
2469 printk(KERN_ERR
"md: could not alloc mem for new device!\n");
2470 return ERR_PTR(-ENOMEM
);
2473 if ((err
= alloc_disk_sb(rdev
)))
2476 err
= lock_rdev(rdev
, newdev
, super_format
== -2);
2480 kobject_init(&rdev
->kobj
, &rdev_ktype
);
2483 rdev
->saved_raid_disk
= -1;
2484 rdev
->raid_disk
= -1;
2486 rdev
->data_offset
= 0;
2487 rdev
->sb_events
= 0;
2488 atomic_set(&rdev
->nr_pending
, 0);
2489 atomic_set(&rdev
->read_errors
, 0);
2490 atomic_set(&rdev
->corrected_errors
, 0);
2492 size
= rdev
->bdev
->bd_inode
->i_size
>> BLOCK_SIZE_BITS
;
2495 "md: %s has zero or unknown size, marking faulty!\n",
2496 bdevname(rdev
->bdev
,b
));
2501 if (super_format
>= 0) {
2502 err
= super_types
[super_format
].
2503 load_super(rdev
, NULL
, super_minor
);
2504 if (err
== -EINVAL
) {
2506 "md: %s does not have a valid v%d.%d "
2507 "superblock, not importing!\n",
2508 bdevname(rdev
->bdev
,b
),
2509 super_format
, super_minor
);
2514 "md: could not read %s's sb, not importing!\n",
2515 bdevname(rdev
->bdev
,b
));
2520 INIT_LIST_HEAD(&rdev
->same_set
);
2521 init_waitqueue_head(&rdev
->blocked_wait
);
2526 if (rdev
->sb_page
) {
2532 return ERR_PTR(err
);
2536 * Check a full RAID array for plausibility
2540 static void analyze_sbs(mddev_t
* mddev
)
2543 mdk_rdev_t
*rdev
, *freshest
, *tmp
;
2544 char b
[BDEVNAME_SIZE
];
2547 rdev_for_each(rdev
, tmp
, mddev
)
2548 switch (super_types
[mddev
->major_version
].
2549 load_super(rdev
, freshest
, mddev
->minor_version
)) {
2557 "md: fatal superblock inconsistency in %s"
2558 " -- removing from array\n",
2559 bdevname(rdev
->bdev
,b
));
2560 kick_rdev_from_array(rdev
);
2564 super_types
[mddev
->major_version
].
2565 validate_super(mddev
, freshest
);
2568 rdev_for_each(rdev
, tmp
, mddev
) {
2569 if (rdev
->desc_nr
>= mddev
->max_disks
||
2570 i
> mddev
->max_disks
) {
2572 "md: %s: %s: only %d devices permitted\n",
2573 mdname(mddev
), bdevname(rdev
->bdev
, b
),
2575 kick_rdev_from_array(rdev
);
2578 if (rdev
!= freshest
)
2579 if (super_types
[mddev
->major_version
].
2580 validate_super(mddev
, rdev
)) {
2581 printk(KERN_WARNING
"md: kicking non-fresh %s"
2583 bdevname(rdev
->bdev
,b
));
2584 kick_rdev_from_array(rdev
);
2587 if (mddev
->level
== LEVEL_MULTIPATH
) {
2588 rdev
->desc_nr
= i
++;
2589 rdev
->raid_disk
= rdev
->desc_nr
;
2590 set_bit(In_sync
, &rdev
->flags
);
2591 } else if (rdev
->raid_disk
>= mddev
->raid_disks
) {
2592 rdev
->raid_disk
= -1;
2593 clear_bit(In_sync
, &rdev
->flags
);
2599 if (mddev
->recovery_cp
!= MaxSector
&&
2601 printk(KERN_ERR
"md: %s: raid array is not clean"
2602 " -- starting background reconstruction\n",
2607 static void md_safemode_timeout(unsigned long data
);
2610 safe_delay_show(mddev_t
*mddev
, char *page
)
2612 int msec
= (mddev
->safemode_delay
*1000)/HZ
;
2613 return sprintf(page
, "%d.%03d\n", msec
/1000, msec
%1000);
2616 safe_delay_store(mddev_t
*mddev
, const char *cbuf
, size_t len
)
2624 /* remove a period, and count digits after it */
2625 if (len
>= sizeof(buf
))
2627 strlcpy(buf
, cbuf
, sizeof(buf
));
2628 for (i
=0; i
<len
; i
++) {
2630 if (isdigit(buf
[i
])) {
2635 } else if (buf
[i
] == '.') {
2640 if (strict_strtoul(buf
, 10, &msec
) < 0)
2642 msec
= (msec
* 1000) / scale
;
2644 mddev
->safemode_delay
= 0;
2646 unsigned long old_delay
= mddev
->safemode_delay
;
2647 mddev
->safemode_delay
= (msec
*HZ
)/1000;
2648 if (mddev
->safemode_delay
== 0)
2649 mddev
->safemode_delay
= 1;
2650 if (mddev
->safemode_delay
< old_delay
)
2651 md_safemode_timeout((unsigned long)mddev
);
2655 static struct md_sysfs_entry md_safe_delay
=
2656 __ATTR(safe_mode_delay
, S_IRUGO
|S_IWUSR
,safe_delay_show
, safe_delay_store
);
2659 level_show(mddev_t
*mddev
, char *page
)
2661 struct mdk_personality
*p
= mddev
->pers
;
2663 return sprintf(page
, "%s\n", p
->name
);
2664 else if (mddev
->clevel
[0])
2665 return sprintf(page
, "%s\n", mddev
->clevel
);
2666 else if (mddev
->level
!= LEVEL_NONE
)
2667 return sprintf(page
, "%d\n", mddev
->level
);
2673 level_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2677 struct mdk_personality
*pers
;
2680 if (mddev
->pers
== NULL
) {
2683 if (len
>= sizeof(mddev
->clevel
))
2685 strncpy(mddev
->clevel
, buf
, len
);
2686 if (mddev
->clevel
[len
-1] == '\n')
2688 mddev
->clevel
[len
] = 0;
2689 mddev
->level
= LEVEL_NONE
;
2693 /* request to change the personality. Need to ensure:
2694 * - array is not engaged in resync/recovery/reshape
2695 * - old personality can be suspended
2696 * - new personality will access other array.
2699 if (mddev
->sync_thread
|| mddev
->reshape_position
!= MaxSector
)
2702 if (!mddev
->pers
->quiesce
) {
2703 printk(KERN_WARNING
"md: %s: %s does not support online personality change\n",
2704 mdname(mddev
), mddev
->pers
->name
);
2708 /* Now find the new personality */
2709 if (len
== 0 || len
>= sizeof(level
))
2711 strncpy(level
, buf
, len
);
2712 if (level
[len
-1] == '\n')
2716 request_module("md-%s", level
);
2717 spin_lock(&pers_lock
);
2718 pers
= find_pers(LEVEL_NONE
, level
);
2719 if (!pers
|| !try_module_get(pers
->owner
)) {
2720 spin_unlock(&pers_lock
);
2721 printk(KERN_WARNING
"md: personality %s not loaded\n", level
);
2724 spin_unlock(&pers_lock
);
2726 if (pers
== mddev
->pers
) {
2727 /* Nothing to do! */
2728 module_put(pers
->owner
);
2731 if (!pers
->takeover
) {
2732 module_put(pers
->owner
);
2733 printk(KERN_WARNING
"md: %s: %s does not support personality takeover\n",
2734 mdname(mddev
), level
);
2738 /* ->takeover must set new_* and/or delta_disks
2739 * if it succeeds, and may set them when it fails.
2741 priv
= pers
->takeover(mddev
);
2743 mddev
->new_level
= mddev
->level
;
2744 mddev
->new_layout
= mddev
->layout
;
2745 mddev
->new_chunk
= mddev
->chunk_size
;
2746 mddev
->raid_disks
-= mddev
->delta_disks
;
2747 mddev
->delta_disks
= 0;
2748 module_put(pers
->owner
);
2749 printk(KERN_WARNING
"md: %s: %s would not accept array\n",
2750 mdname(mddev
), level
);
2751 return PTR_ERR(priv
);
2754 /* Looks like we have a winner */
2755 mddev_suspend(mddev
);
2756 mddev
->pers
->stop(mddev
);
2757 module_put(mddev
->pers
->owner
);
2759 mddev
->private = priv
;
2760 strlcpy(mddev
->clevel
, pers
->name
, sizeof(mddev
->clevel
));
2761 mddev
->level
= mddev
->new_level
;
2762 mddev
->layout
= mddev
->new_layout
;
2763 mddev
->chunk_size
= mddev
->new_chunk
;
2764 mddev
->delta_disks
= 0;
2766 mddev_resume(mddev
);
2767 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
2768 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
2769 md_wakeup_thread(mddev
->thread
);
2773 static struct md_sysfs_entry md_level
=
2774 __ATTR(level
, S_IRUGO
|S_IWUSR
, level_show
, level_store
);
2778 layout_show(mddev_t
*mddev
, char *page
)
2780 /* just a number, not meaningful for all levels */
2781 if (mddev
->reshape_position
!= MaxSector
&&
2782 mddev
->layout
!= mddev
->new_layout
)
2783 return sprintf(page
, "%d (%d)\n",
2784 mddev
->new_layout
, mddev
->layout
);
2785 return sprintf(page
, "%d\n", mddev
->layout
);
2789 layout_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2792 unsigned long n
= simple_strtoul(buf
, &e
, 10);
2794 if (!*buf
|| (*e
&& *e
!= '\n'))
2799 if (mddev
->pers
->reconfig
== NULL
)
2801 err
= mddev
->pers
->reconfig(mddev
, n
, -1);
2805 mddev
->new_layout
= n
;
2806 if (mddev
->reshape_position
== MaxSector
)
2811 static struct md_sysfs_entry md_layout
=
2812 __ATTR(layout
, S_IRUGO
|S_IWUSR
, layout_show
, layout_store
);
2816 raid_disks_show(mddev_t
*mddev
, char *page
)
2818 if (mddev
->raid_disks
== 0)
2820 if (mddev
->reshape_position
!= MaxSector
&&
2821 mddev
->delta_disks
!= 0)
2822 return sprintf(page
, "%d (%d)\n", mddev
->raid_disks
,
2823 mddev
->raid_disks
- mddev
->delta_disks
);
2824 return sprintf(page
, "%d\n", mddev
->raid_disks
);
2827 static int update_raid_disks(mddev_t
*mddev
, int raid_disks
);
2830 raid_disks_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2834 unsigned long n
= simple_strtoul(buf
, &e
, 10);
2836 if (!*buf
|| (*e
&& *e
!= '\n'))
2840 rv
= update_raid_disks(mddev
, n
);
2841 else if (mddev
->reshape_position
!= MaxSector
) {
2842 int olddisks
= mddev
->raid_disks
- mddev
->delta_disks
;
2843 mddev
->delta_disks
= n
- olddisks
;
2844 mddev
->raid_disks
= n
;
2846 mddev
->raid_disks
= n
;
2847 return rv
? rv
: len
;
2849 static struct md_sysfs_entry md_raid_disks
=
2850 __ATTR(raid_disks
, S_IRUGO
|S_IWUSR
, raid_disks_show
, raid_disks_store
);
2853 chunk_size_show(mddev_t
*mddev
, char *page
)
2855 if (mddev
->reshape_position
!= MaxSector
&&
2856 mddev
->chunk_size
!= mddev
->new_chunk
)
2857 return sprintf(page
, "%d (%d)\n", mddev
->new_chunk
,
2859 return sprintf(page
, "%d\n", mddev
->chunk_size
);
2863 chunk_size_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2866 unsigned long n
= simple_strtoul(buf
, &e
, 10);
2868 if (!*buf
|| (*e
&& *e
!= '\n'))
2873 if (mddev
->pers
->reconfig
== NULL
)
2875 err
= mddev
->pers
->reconfig(mddev
, -1, n
);
2879 mddev
->new_chunk
= n
;
2880 if (mddev
->reshape_position
== MaxSector
)
2881 mddev
->chunk_size
= n
;
2885 static struct md_sysfs_entry md_chunk_size
=
2886 __ATTR(chunk_size
, S_IRUGO
|S_IWUSR
, chunk_size_show
, chunk_size_store
);
2889 resync_start_show(mddev_t
*mddev
, char *page
)
2891 return sprintf(page
, "%llu\n", (unsigned long long)mddev
->recovery_cp
);
2895 resync_start_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2898 unsigned long long n
= simple_strtoull(buf
, &e
, 10);
2902 if (!*buf
|| (*e
&& *e
!= '\n'))
2905 mddev
->recovery_cp
= n
;
2908 static struct md_sysfs_entry md_resync_start
=
2909 __ATTR(resync_start
, S_IRUGO
|S_IWUSR
, resync_start_show
, resync_start_store
);
2912 * The array state can be:
2915 * No devices, no size, no level
2916 * Equivalent to STOP_ARRAY ioctl
2918 * May have some settings, but array is not active
2919 * all IO results in error
2920 * When written, doesn't tear down array, but just stops it
2921 * suspended (not supported yet)
2922 * All IO requests will block. The array can be reconfigured.
2923 * Writing this, if accepted, will block until array is quiescent
2925 * no resync can happen. no superblocks get written.
2926 * write requests fail
2928 * like readonly, but behaves like 'clean' on a write request.
2930 * clean - no pending writes, but otherwise active.
2931 * When written to inactive array, starts without resync
2932 * If a write request arrives then
2933 * if metadata is known, mark 'dirty' and switch to 'active'.
2934 * if not known, block and switch to write-pending
2935 * If written to an active array that has pending writes, then fails.
2937 * fully active: IO and resync can be happening.
2938 * When written to inactive array, starts with resync
2941 * clean, but writes are blocked waiting for 'active' to be written.
2944 * like active, but no writes have been seen for a while (100msec).
2947 enum array_state
{ clear
, inactive
, suspended
, readonly
, read_auto
, clean
, active
,
2948 write_pending
, active_idle
, bad_word
};
2949 static char *array_states
[] = {
2950 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
2951 "write-pending", "active-idle", NULL
};
2953 static int match_word(const char *word
, char **list
)
2956 for (n
=0; list
[n
]; n
++)
2957 if (cmd_match(word
, list
[n
]))
2963 array_state_show(mddev_t
*mddev
, char *page
)
2965 enum array_state st
= inactive
;
2978 else if (test_bit(MD_CHANGE_CLEAN
, &mddev
->flags
))
2980 else if (mddev
->safemode
)
2986 if (list_empty(&mddev
->disks
) &&
2987 mddev
->raid_disks
== 0 &&
2988 mddev
->dev_sectors
== 0)
2993 return sprintf(page
, "%s\n", array_states
[st
]);
2996 static int do_md_stop(mddev_t
* mddev
, int ro
, int is_open
);
2997 static int do_md_run(mddev_t
* mddev
);
2998 static int restart_array(mddev_t
*mddev
);
3001 array_state_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3004 enum array_state st
= match_word(buf
, array_states
);
3009 /* stopping an active array */
3010 if (atomic_read(&mddev
->openers
) > 0)
3012 err
= do_md_stop(mddev
, 0, 0);
3015 /* stopping an active array */
3017 if (atomic_read(&mddev
->openers
) > 0)
3019 err
= do_md_stop(mddev
, 2, 0);
3021 err
= 0; /* already inactive */
3024 break; /* not supported yet */
3027 err
= do_md_stop(mddev
, 1, 0);
3030 set_disk_ro(mddev
->gendisk
, 1);
3031 err
= do_md_run(mddev
);
3037 err
= do_md_stop(mddev
, 1, 0);
3038 else if (mddev
->ro
== 1)
3039 err
= restart_array(mddev
);
3042 set_disk_ro(mddev
->gendisk
, 0);
3046 err
= do_md_run(mddev
);
3051 restart_array(mddev
);
3052 spin_lock_irq(&mddev
->write_lock
);
3053 if (atomic_read(&mddev
->writes_pending
) == 0) {
3054 if (mddev
->in_sync
== 0) {
3056 if (mddev
->safemode
== 1)
3057 mddev
->safemode
= 0;
3058 if (mddev
->persistent
)
3059 set_bit(MD_CHANGE_CLEAN
,
3065 spin_unlock_irq(&mddev
->write_lock
);
3068 mddev
->recovery_cp
= MaxSector
;
3069 err
= do_md_run(mddev
);
3074 restart_array(mddev
);
3075 if (mddev
->external
)
3076 clear_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
3077 wake_up(&mddev
->sb_wait
);
3081 set_disk_ro(mddev
->gendisk
, 0);
3082 err
= do_md_run(mddev
);
3087 /* these cannot be set */
3093 sysfs_notify_dirent(mddev
->sysfs_state
);
3097 static struct md_sysfs_entry md_array_state
=
3098 __ATTR(array_state
, S_IRUGO
|S_IWUSR
, array_state_show
, array_state_store
);
3101 null_show(mddev_t
*mddev
, char *page
)
3107 new_dev_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3109 /* buf must be %d:%d\n? giving major and minor numbers */
3110 /* The new device is added to the array.
3111 * If the array has a persistent superblock, we read the
3112 * superblock to initialise info and check validity.
3113 * Otherwise, only checking done is that in bind_rdev_to_array,
3114 * which mainly checks size.
3117 int major
= simple_strtoul(buf
, &e
, 10);
3123 if (!*buf
|| *e
!= ':' || !e
[1] || e
[1] == '\n')
3125 minor
= simple_strtoul(e
+1, &e
, 10);
3126 if (*e
&& *e
!= '\n')
3128 dev
= MKDEV(major
, minor
);
3129 if (major
!= MAJOR(dev
) ||
3130 minor
!= MINOR(dev
))
3134 if (mddev
->persistent
) {
3135 rdev
= md_import_device(dev
, mddev
->major_version
,
3136 mddev
->minor_version
);
3137 if (!IS_ERR(rdev
) && !list_empty(&mddev
->disks
)) {
3138 mdk_rdev_t
*rdev0
= list_entry(mddev
->disks
.next
,
3139 mdk_rdev_t
, same_set
);
3140 err
= super_types
[mddev
->major_version
]
3141 .load_super(rdev
, rdev0
, mddev
->minor_version
);
3145 } else if (mddev
->external
)
3146 rdev
= md_import_device(dev
, -2, -1);
3148 rdev
= md_import_device(dev
, -1, -1);
3151 return PTR_ERR(rdev
);
3152 err
= bind_rdev_to_array(rdev
, mddev
);
3156 return err
? err
: len
;
3159 static struct md_sysfs_entry md_new_device
=
3160 __ATTR(new_dev
, S_IWUSR
, null_show
, new_dev_store
);
3163 bitmap_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3166 unsigned long chunk
, end_chunk
;
3170 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
3172 chunk
= end_chunk
= simple_strtoul(buf
, &end
, 0);
3173 if (buf
== end
) break;
3174 if (*end
== '-') { /* range */
3176 end_chunk
= simple_strtoul(buf
, &end
, 0);
3177 if (buf
== end
) break;
3179 if (*end
&& !isspace(*end
)) break;
3180 bitmap_dirty_bits(mddev
->bitmap
, chunk
, end_chunk
);
3182 while (isspace(*buf
)) buf
++;
3184 bitmap_unplug(mddev
->bitmap
); /* flush the bits to disk */
3189 static struct md_sysfs_entry md_bitmap
=
3190 __ATTR(bitmap_set_bits
, S_IWUSR
, null_show
, bitmap_store
);
3193 size_show(mddev_t
*mddev
, char *page
)
3195 return sprintf(page
, "%llu\n",
3196 (unsigned long long)mddev
->dev_sectors
/ 2);
3199 static int update_size(mddev_t
*mddev
, sector_t num_sectors
);
3202 size_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3204 /* If array is inactive, we can reduce the component size, but
3205 * not increase it (except from 0).
3206 * If array is active, we can try an on-line resize
3209 int err
= strict_blocks_to_sectors(buf
, §ors
);
3214 err
= update_size(mddev
, sectors
);
3215 md_update_sb(mddev
, 1);
3217 if (mddev
->dev_sectors
== 0 ||
3218 mddev
->dev_sectors
> sectors
)
3219 mddev
->dev_sectors
= sectors
;
3223 return err
? err
: len
;
3226 static struct md_sysfs_entry md_size
=
3227 __ATTR(component_size
, S_IRUGO
|S_IWUSR
, size_show
, size_store
);
3232 * 'none' for arrays with no metadata (good luck...)
3233 * 'external' for arrays with externally managed metadata,
3234 * or N.M for internally known formats
3237 metadata_show(mddev_t
*mddev
, char *page
)
3239 if (mddev
->persistent
)
3240 return sprintf(page
, "%d.%d\n",
3241 mddev
->major_version
, mddev
->minor_version
);
3242 else if (mddev
->external
)
3243 return sprintf(page
, "external:%s\n", mddev
->metadata_type
);
3245 return sprintf(page
, "none\n");
3249 metadata_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3253 /* Changing the details of 'external' metadata is
3254 * always permitted. Otherwise there must be
3255 * no devices attached to the array.
3257 if (mddev
->external
&& strncmp(buf
, "external:", 9) == 0)
3259 else if (!list_empty(&mddev
->disks
))
3262 if (cmd_match(buf
, "none")) {
3263 mddev
->persistent
= 0;
3264 mddev
->external
= 0;
3265 mddev
->major_version
= 0;
3266 mddev
->minor_version
= 90;
3269 if (strncmp(buf
, "external:", 9) == 0) {
3270 size_t namelen
= len
-9;
3271 if (namelen
>= sizeof(mddev
->metadata_type
))
3272 namelen
= sizeof(mddev
->metadata_type
)-1;
3273 strncpy(mddev
->metadata_type
, buf
+9, namelen
);
3274 mddev
->metadata_type
[namelen
] = 0;
3275 if (namelen
&& mddev
->metadata_type
[namelen
-1] == '\n')
3276 mddev
->metadata_type
[--namelen
] = 0;
3277 mddev
->persistent
= 0;
3278 mddev
->external
= 1;
3279 mddev
->major_version
= 0;
3280 mddev
->minor_version
= 90;
3283 major
= simple_strtoul(buf
, &e
, 10);
3284 if (e
==buf
|| *e
!= '.')
3287 minor
= simple_strtoul(buf
, &e
, 10);
3288 if (e
==buf
|| (*e
&& *e
!= '\n') )
3290 if (major
>= ARRAY_SIZE(super_types
) || super_types
[major
].name
== NULL
)
3292 mddev
->major_version
= major
;
3293 mddev
->minor_version
= minor
;
3294 mddev
->persistent
= 1;
3295 mddev
->external
= 0;
3299 static struct md_sysfs_entry md_metadata
=
3300 __ATTR(metadata_version
, S_IRUGO
|S_IWUSR
, metadata_show
, metadata_store
);
3303 action_show(mddev_t
*mddev
, char *page
)
3305 char *type
= "idle";
3306 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
) ||
3307 (!mddev
->ro
&& test_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
))) {
3308 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
3310 else if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
3311 if (!test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
))
3313 else if (test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
))
3317 } else if (test_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
))
3320 return sprintf(page
, "%s\n", type
);
3324 action_store(mddev_t
*mddev
, const char *page
, size_t len
)
3326 if (!mddev
->pers
|| !mddev
->pers
->sync_request
)
3329 if (cmd_match(page
, "idle")) {
3330 if (mddev
->sync_thread
) {
3331 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
3332 md_unregister_thread(mddev
->sync_thread
);
3333 mddev
->sync_thread
= NULL
;
3334 mddev
->recovery
= 0;
3336 } else if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
) ||
3337 test_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
))
3339 else if (cmd_match(page
, "resync"))
3340 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
3341 else if (cmd_match(page
, "recover")) {
3342 set_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
);
3343 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
3344 } else if (cmd_match(page
, "reshape")) {
3346 if (mddev
->pers
->start_reshape
== NULL
)
3348 err
= mddev
->pers
->start_reshape(mddev
);
3351 sysfs_notify(&mddev
->kobj
, NULL
, "degraded");
3353 if (cmd_match(page
, "check"))
3354 set_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
3355 else if (!cmd_match(page
, "repair"))
3357 set_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
);
3358 set_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
3360 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
3361 md_wakeup_thread(mddev
->thread
);
3362 sysfs_notify_dirent(mddev
->sysfs_action
);
3367 mismatch_cnt_show(mddev_t
*mddev
, char *page
)
3369 return sprintf(page
, "%llu\n",
3370 (unsigned long long) mddev
->resync_mismatches
);
3373 static struct md_sysfs_entry md_scan_mode
=
3374 __ATTR(sync_action
, S_IRUGO
|S_IWUSR
, action_show
, action_store
);
3377 static struct md_sysfs_entry md_mismatches
= __ATTR_RO(mismatch_cnt
);
3380 sync_min_show(mddev_t
*mddev
, char *page
)
3382 return sprintf(page
, "%d (%s)\n", speed_min(mddev
),
3383 mddev
->sync_speed_min
? "local": "system");
3387 sync_min_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3391 if (strncmp(buf
, "system", 6)==0) {
3392 mddev
->sync_speed_min
= 0;
3395 min
= simple_strtoul(buf
, &e
, 10);
3396 if (buf
== e
|| (*e
&& *e
!= '\n') || min
<= 0)
3398 mddev
->sync_speed_min
= min
;
3402 static struct md_sysfs_entry md_sync_min
=
3403 __ATTR(sync_speed_min
, S_IRUGO
|S_IWUSR
, sync_min_show
, sync_min_store
);
3406 sync_max_show(mddev_t
*mddev
, char *page
)
3408 return sprintf(page
, "%d (%s)\n", speed_max(mddev
),
3409 mddev
->sync_speed_max
? "local": "system");
3413 sync_max_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3417 if (strncmp(buf
, "system", 6)==0) {
3418 mddev
->sync_speed_max
= 0;
3421 max
= simple_strtoul(buf
, &e
, 10);
3422 if (buf
== e
|| (*e
&& *e
!= '\n') || max
<= 0)
3424 mddev
->sync_speed_max
= max
;
3428 static struct md_sysfs_entry md_sync_max
=
3429 __ATTR(sync_speed_max
, S_IRUGO
|S_IWUSR
, sync_max_show
, sync_max_store
);
3432 degraded_show(mddev_t
*mddev
, char *page
)
3434 return sprintf(page
, "%d\n", mddev
->degraded
);
3436 static struct md_sysfs_entry md_degraded
= __ATTR_RO(degraded
);
3439 sync_force_parallel_show(mddev_t
*mddev
, char *page
)
3441 return sprintf(page
, "%d\n", mddev
->parallel_resync
);
3445 sync_force_parallel_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3449 if (strict_strtol(buf
, 10, &n
))
3452 if (n
!= 0 && n
!= 1)
3455 mddev
->parallel_resync
= n
;
3457 if (mddev
->sync_thread
)
3458 wake_up(&resync_wait
);
3463 /* force parallel resync, even with shared block devices */
3464 static struct md_sysfs_entry md_sync_force_parallel
=
3465 __ATTR(sync_force_parallel
, S_IRUGO
|S_IWUSR
,
3466 sync_force_parallel_show
, sync_force_parallel_store
);
3469 sync_speed_show(mddev_t
*mddev
, char *page
)
3471 unsigned long resync
, dt
, db
;
3472 resync
= mddev
->curr_mark_cnt
- atomic_read(&mddev
->recovery_active
);
3473 dt
= (jiffies
- mddev
->resync_mark
) / HZ
;
3475 db
= resync
- mddev
->resync_mark_cnt
;
3476 return sprintf(page
, "%lu\n", db
/dt
/2); /* K/sec */
3479 static struct md_sysfs_entry md_sync_speed
= __ATTR_RO(sync_speed
);
3482 sync_completed_show(mddev_t
*mddev
, char *page
)
3484 unsigned long max_sectors
, resync
;
3486 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
))
3487 max_sectors
= mddev
->resync_max_sectors
;
3489 max_sectors
= mddev
->dev_sectors
;
3491 resync
= (mddev
->curr_resync
- atomic_read(&mddev
->recovery_active
));
3492 return sprintf(page
, "%lu / %lu\n", resync
, max_sectors
);
3495 static struct md_sysfs_entry md_sync_completed
= __ATTR_RO(sync_completed
);
3498 min_sync_show(mddev_t
*mddev
, char *page
)
3500 return sprintf(page
, "%llu\n",
3501 (unsigned long long)mddev
->resync_min
);
3504 min_sync_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3506 unsigned long long min
;
3507 if (strict_strtoull(buf
, 10, &min
))
3509 if (min
> mddev
->resync_max
)
3511 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
))
3514 /* Must be a multiple of chunk_size */
3515 if (mddev
->chunk_size
) {
3516 if (min
& (sector_t
)((mddev
->chunk_size
>>9)-1))
3519 mddev
->resync_min
= min
;
3524 static struct md_sysfs_entry md_min_sync
=
3525 __ATTR(sync_min
, S_IRUGO
|S_IWUSR
, min_sync_show
, min_sync_store
);
3528 max_sync_show(mddev_t
*mddev
, char *page
)
3530 if (mddev
->resync_max
== MaxSector
)
3531 return sprintf(page
, "max\n");
3533 return sprintf(page
, "%llu\n",
3534 (unsigned long long)mddev
->resync_max
);
3537 max_sync_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3539 if (strncmp(buf
, "max", 3) == 0)
3540 mddev
->resync_max
= MaxSector
;
3542 unsigned long long max
;
3543 if (strict_strtoull(buf
, 10, &max
))
3545 if (max
< mddev
->resync_min
)
3547 if (max
< mddev
->resync_max
&&
3548 test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
))
3551 /* Must be a multiple of chunk_size */
3552 if (mddev
->chunk_size
) {
3553 if (max
& (sector_t
)((mddev
->chunk_size
>>9)-1))
3556 mddev
->resync_max
= max
;
3558 wake_up(&mddev
->recovery_wait
);
3562 static struct md_sysfs_entry md_max_sync
=
3563 __ATTR(sync_max
, S_IRUGO
|S_IWUSR
, max_sync_show
, max_sync_store
);
3566 suspend_lo_show(mddev_t
*mddev
, char *page
)
3568 return sprintf(page
, "%llu\n", (unsigned long long)mddev
->suspend_lo
);
3572 suspend_lo_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3575 unsigned long long new = simple_strtoull(buf
, &e
, 10);
3577 if (mddev
->pers
->quiesce
== NULL
)
3579 if (buf
== e
|| (*e
&& *e
!= '\n'))
3581 if (new >= mddev
->suspend_hi
||
3582 (new > mddev
->suspend_lo
&& new < mddev
->suspend_hi
)) {
3583 mddev
->suspend_lo
= new;
3584 mddev
->pers
->quiesce(mddev
, 2);
3589 static struct md_sysfs_entry md_suspend_lo
=
3590 __ATTR(suspend_lo
, S_IRUGO
|S_IWUSR
, suspend_lo_show
, suspend_lo_store
);
3594 suspend_hi_show(mddev_t
*mddev
, char *page
)
3596 return sprintf(page
, "%llu\n", (unsigned long long)mddev
->suspend_hi
);
3600 suspend_hi_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3603 unsigned long long new = simple_strtoull(buf
, &e
, 10);
3605 if (mddev
->pers
->quiesce
== NULL
)
3607 if (buf
== e
|| (*e
&& *e
!= '\n'))
3609 if ((new <= mddev
->suspend_lo
&& mddev
->suspend_lo
>= mddev
->suspend_hi
) ||
3610 (new > mddev
->suspend_lo
&& new > mddev
->suspend_hi
)) {
3611 mddev
->suspend_hi
= new;
3612 mddev
->pers
->quiesce(mddev
, 1);
3613 mddev
->pers
->quiesce(mddev
, 0);
3618 static struct md_sysfs_entry md_suspend_hi
=
3619 __ATTR(suspend_hi
, S_IRUGO
|S_IWUSR
, suspend_hi_show
, suspend_hi_store
);
3622 reshape_position_show(mddev_t
*mddev
, char *page
)
3624 if (mddev
->reshape_position
!= MaxSector
)
3625 return sprintf(page
, "%llu\n",
3626 (unsigned long long)mddev
->reshape_position
);
3627 strcpy(page
, "none\n");
3632 reshape_position_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3635 unsigned long long new = simple_strtoull(buf
, &e
, 10);
3638 if (buf
== e
|| (*e
&& *e
!= '\n'))
3640 mddev
->reshape_position
= new;
3641 mddev
->delta_disks
= 0;
3642 mddev
->new_level
= mddev
->level
;
3643 mddev
->new_layout
= mddev
->layout
;
3644 mddev
->new_chunk
= mddev
->chunk_size
;
3648 static struct md_sysfs_entry md_reshape_position
=
3649 __ATTR(reshape_position
, S_IRUGO
|S_IWUSR
, reshape_position_show
,
3650 reshape_position_store
);
3653 array_size_show(mddev_t
*mddev
, char *page
)
3655 if (mddev
->external_size
)
3656 return sprintf(page
, "%llu\n",
3657 (unsigned long long)mddev
->array_sectors
/2);
3659 return sprintf(page
, "default\n");
3663 array_size_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3667 if (strncmp(buf
, "default", 7) == 0) {
3669 sectors
= mddev
->pers
->size(mddev
, 0, 0);
3671 sectors
= mddev
->array_sectors
;
3673 mddev
->external_size
= 0;
3675 if (strict_blocks_to_sectors(buf
, §ors
) < 0)
3677 if (mddev
->pers
&& mddev
->pers
->size(mddev
, 0, 0) < sectors
)
3680 mddev
->external_size
= 1;
3683 mddev
->array_sectors
= sectors
;
3684 set_capacity(mddev
->gendisk
, mddev
->array_sectors
);
3686 struct block_device
*bdev
= bdget_disk(mddev
->gendisk
, 0);
3689 mutex_lock(&bdev
->bd_inode
->i_mutex
);
3690 i_size_write(bdev
->bd_inode
,
3691 (loff_t
)mddev
->array_sectors
<< 9);
3692 mutex_unlock(&bdev
->bd_inode
->i_mutex
);
3700 static struct md_sysfs_entry md_array_size
=
3701 __ATTR(array_size
, S_IRUGO
|S_IWUSR
, array_size_show
,
3704 static struct attribute
*md_default_attrs
[] = {
3707 &md_raid_disks
.attr
,
3708 &md_chunk_size
.attr
,
3710 &md_resync_start
.attr
,
3712 &md_new_device
.attr
,
3713 &md_safe_delay
.attr
,
3714 &md_array_state
.attr
,
3715 &md_reshape_position
.attr
,
3716 &md_array_size
.attr
,
3720 static struct attribute
*md_redundancy_attrs
[] = {
3722 &md_mismatches
.attr
,
3725 &md_sync_speed
.attr
,
3726 &md_sync_force_parallel
.attr
,
3727 &md_sync_completed
.attr
,
3730 &md_suspend_lo
.attr
,
3731 &md_suspend_hi
.attr
,
3736 static struct attribute_group md_redundancy_group
= {
3738 .attrs
= md_redundancy_attrs
,
3743 md_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
3745 struct md_sysfs_entry
*entry
= container_of(attr
, struct md_sysfs_entry
, attr
);
3746 mddev_t
*mddev
= container_of(kobj
, struct mddev_s
, kobj
);
3751 rv
= mddev_lock(mddev
);
3753 rv
= entry
->show(mddev
, page
);
3754 mddev_unlock(mddev
);
3760 md_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
3761 const char *page
, size_t length
)
3763 struct md_sysfs_entry
*entry
= container_of(attr
, struct md_sysfs_entry
, attr
);
3764 mddev_t
*mddev
= container_of(kobj
, struct mddev_s
, kobj
);
3769 if (!capable(CAP_SYS_ADMIN
))
3771 rv
= mddev_lock(mddev
);
3772 if (mddev
->hold_active
== UNTIL_IOCTL
)
3773 mddev
->hold_active
= 0;
3775 rv
= entry
->store(mddev
, page
, length
);
3776 mddev_unlock(mddev
);
3781 static void md_free(struct kobject
*ko
)
3783 mddev_t
*mddev
= container_of(ko
, mddev_t
, kobj
);
3785 if (mddev
->sysfs_state
)
3786 sysfs_put(mddev
->sysfs_state
);
3788 if (mddev
->gendisk
) {
3789 del_gendisk(mddev
->gendisk
);
3790 put_disk(mddev
->gendisk
);
3793 blk_cleanup_queue(mddev
->queue
);
3798 static struct sysfs_ops md_sysfs_ops
= {
3799 .show
= md_attr_show
,
3800 .store
= md_attr_store
,
3802 static struct kobj_type md_ktype
= {
3804 .sysfs_ops
= &md_sysfs_ops
,
3805 .default_attrs
= md_default_attrs
,
3810 static int md_alloc(dev_t dev
, char *name
)
3812 static DEFINE_MUTEX(disks_mutex
);
3813 mddev_t
*mddev
= mddev_find(dev
);
3814 struct gendisk
*disk
;
3823 partitioned
= (MAJOR(mddev
->unit
) != MD_MAJOR
);
3824 shift
= partitioned
? MdpMinorShift
: 0;
3825 unit
= MINOR(mddev
->unit
) >> shift
;
3827 /* wait for any previous instance if this device
3828 * to be completed removed (mddev_delayed_delete).
3830 flush_scheduled_work();
3832 mutex_lock(&disks_mutex
);
3833 if (mddev
->gendisk
) {
3834 mutex_unlock(&disks_mutex
);
3840 /* Need to ensure that 'name' is not a duplicate.
3843 spin_lock(&all_mddevs_lock
);
3845 list_for_each_entry(mddev2
, &all_mddevs
, all_mddevs
)
3846 if (mddev2
->gendisk
&&
3847 strcmp(mddev2
->gendisk
->disk_name
, name
) == 0) {
3848 spin_unlock(&all_mddevs_lock
);
3851 spin_unlock(&all_mddevs_lock
);
3854 mddev
->queue
= blk_alloc_queue(GFP_KERNEL
);
3855 if (!mddev
->queue
) {
3856 mutex_unlock(&disks_mutex
);
3860 mddev
->queue
->queuedata
= mddev
;
3862 /* Can be unlocked because the queue is new: no concurrency */
3863 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER
, mddev
->queue
);
3865 blk_queue_make_request(mddev
->queue
, md_make_request
);
3867 disk
= alloc_disk(1 << shift
);
3869 mutex_unlock(&disks_mutex
);
3870 blk_cleanup_queue(mddev
->queue
);
3871 mddev
->queue
= NULL
;
3875 disk
->major
= MAJOR(mddev
->unit
);
3876 disk
->first_minor
= unit
<< shift
;
3878 strcpy(disk
->disk_name
, name
);
3879 else if (partitioned
)
3880 sprintf(disk
->disk_name
, "md_d%d", unit
);
3882 sprintf(disk
->disk_name
, "md%d", unit
);
3883 disk
->fops
= &md_fops
;
3884 disk
->private_data
= mddev
;
3885 disk
->queue
= mddev
->queue
;
3886 /* Allow extended partitions. This makes the
3887 * 'mdp' device redundant, but we can't really
3890 disk
->flags
|= GENHD_FL_EXT_DEVT
;
3892 mddev
->gendisk
= disk
;
3893 error
= kobject_init_and_add(&mddev
->kobj
, &md_ktype
,
3894 &disk_to_dev(disk
)->kobj
, "%s", "md");
3895 mutex_unlock(&disks_mutex
);
3897 printk(KERN_WARNING
"md: cannot register %s/md - name in use\n",
3900 kobject_uevent(&mddev
->kobj
, KOBJ_ADD
);
3901 mddev
->sysfs_state
= sysfs_get_dirent(mddev
->kobj
.sd
, "array_state");
3907 static struct kobject
*md_probe(dev_t dev
, int *part
, void *data
)
3909 md_alloc(dev
, NULL
);
3913 static int add_named_array(const char *val
, struct kernel_param
*kp
)
3915 /* val must be "md_*" where * is not all digits.
3916 * We allocate an array with a large free minor number, and
3917 * set the name to val. val must not already be an active name.
3919 int len
= strlen(val
);
3920 char buf
[DISK_NAME_LEN
];
3922 while (len
&& val
[len
-1] == '\n')
3924 if (len
>= DISK_NAME_LEN
)
3926 strlcpy(buf
, val
, len
+1);
3927 if (strncmp(buf
, "md_", 3) != 0)
3929 return md_alloc(0, buf
);
3932 static void md_safemode_timeout(unsigned long data
)
3934 mddev_t
*mddev
= (mddev_t
*) data
;
3936 if (!atomic_read(&mddev
->writes_pending
)) {
3937 mddev
->safemode
= 1;
3938 if (mddev
->external
)
3939 sysfs_notify_dirent(mddev
->sysfs_state
);
3941 md_wakeup_thread(mddev
->thread
);
3944 static int start_dirty_degraded
;
3946 static int do_md_run(mddev_t
* mddev
)
3951 struct gendisk
*disk
;
3952 struct mdk_personality
*pers
;
3953 char b
[BDEVNAME_SIZE
];
3955 if (list_empty(&mddev
->disks
))
3956 /* cannot run an array with no devices.. */
3963 * Analyze all RAID superblock(s)
3965 if (!mddev
->raid_disks
) {
3966 if (!mddev
->persistent
)
3971 chunk_size
= mddev
->chunk_size
;
3974 if (chunk_size
> MAX_CHUNK_SIZE
) {
3975 printk(KERN_ERR
"too big chunk_size: %d > %d\n",
3976 chunk_size
, MAX_CHUNK_SIZE
);
3980 * chunk-size has to be a power of 2
3982 if ( (1 << ffz(~chunk_size
)) != chunk_size
) {
3983 printk(KERN_ERR
"chunk_size of %d not valid\n", chunk_size
);
3987 /* devices must have minimum size of one chunk */
3988 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
3989 if (test_bit(Faulty
, &rdev
->flags
))
3991 if (rdev
->sectors
< chunk_size
/ 512) {
3993 "md: Dev %s smaller than chunk_size:"
3995 bdevname(rdev
->bdev
,b
),
3996 (unsigned long long)rdev
->sectors
,
4003 if (mddev
->level
!= LEVEL_NONE
)
4004 request_module("md-level-%d", mddev
->level
);
4005 else if (mddev
->clevel
[0])
4006 request_module("md-%s", mddev
->clevel
);
4009 * Drop all container device buffers, from now on
4010 * the only valid external interface is through the md
4013 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
4014 if (test_bit(Faulty
, &rdev
->flags
))
4016 sync_blockdev(rdev
->bdev
);
4017 invalidate_bdev(rdev
->bdev
);
4019 /* perform some consistency tests on the device.
4020 * We don't want the data to overlap the metadata,
4021 * Internal Bitmap issues have been handled elsewhere.
4023 if (rdev
->data_offset
< rdev
->sb_start
) {
4024 if (mddev
->dev_sectors
&&
4025 rdev
->data_offset
+ mddev
->dev_sectors
4027 printk("md: %s: data overlaps metadata\n",
4032 if (rdev
->sb_start
+ rdev
->sb_size
/512
4033 > rdev
->data_offset
) {
4034 printk("md: %s: metadata overlaps data\n",
4039 sysfs_notify_dirent(rdev
->sysfs_state
);
4042 md_probe(mddev
->unit
, NULL
, NULL
);
4043 disk
= mddev
->gendisk
;
4047 spin_lock(&pers_lock
);
4048 pers
= find_pers(mddev
->level
, mddev
->clevel
);
4049 if (!pers
|| !try_module_get(pers
->owner
)) {
4050 spin_unlock(&pers_lock
);
4051 if (mddev
->level
!= LEVEL_NONE
)
4052 printk(KERN_WARNING
"md: personality for level %d is not loaded!\n",
4055 printk(KERN_WARNING
"md: personality for level %s is not loaded!\n",
4060 spin_unlock(&pers_lock
);
4061 if (mddev
->level
!= pers
->level
) {
4062 mddev
->level
= pers
->level
;
4063 mddev
->new_level
= pers
->level
;
4065 strlcpy(mddev
->clevel
, pers
->name
, sizeof(mddev
->clevel
));
4067 if (pers
->level
>= 4 && pers
->level
<= 6)
4068 /* Cannot support integrity (yet) */
4069 blk_integrity_unregister(mddev
->gendisk
);
4071 if (mddev
->reshape_position
!= MaxSector
&&
4072 pers
->start_reshape
== NULL
) {
4073 /* This personality cannot handle reshaping... */
4075 module_put(pers
->owner
);
4079 if (pers
->sync_request
) {
4080 /* Warn if this is a potentially silly
4083 char b
[BDEVNAME_SIZE
], b2
[BDEVNAME_SIZE
];
4087 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
4088 list_for_each_entry(rdev2
, &mddev
->disks
, same_set
) {
4090 rdev
->bdev
->bd_contains
==
4091 rdev2
->bdev
->bd_contains
) {
4093 "%s: WARNING: %s appears to be"
4094 " on the same physical disk as"
4097 bdevname(rdev
->bdev
,b
),
4098 bdevname(rdev2
->bdev
,b2
));
4105 "True protection against single-disk"
4106 " failure might be compromised.\n");
4109 mddev
->recovery
= 0;
4110 /* may be over-ridden by personality */
4111 mddev
->resync_max_sectors
= mddev
->dev_sectors
;
4113 mddev
->barriers_work
= 1;
4114 mddev
->ok_start_degraded
= start_dirty_degraded
;
4117 mddev
->ro
= 2; /* read-only, but switch on first write */
4119 err
= mddev
->pers
->run(mddev
);
4121 printk(KERN_ERR
"md: pers->run() failed ...\n");
4122 else if (mddev
->pers
->size(mddev
, 0, 0) < mddev
->array_sectors
) {
4123 WARN_ONCE(!mddev
->external_size
, "%s: default size too small,"
4124 " but 'external_size' not in effect?\n", __func__
);
4126 "md: invalid array_size %llu > default size %llu\n",
4127 (unsigned long long)mddev
->array_sectors
/ 2,
4128 (unsigned long long)mddev
->pers
->size(mddev
, 0, 0) / 2);
4130 mddev
->pers
->stop(mddev
);
4132 if (err
== 0 && mddev
->pers
->sync_request
) {
4133 err
= bitmap_create(mddev
);
4135 printk(KERN_ERR
"%s: failed to create bitmap (%d)\n",
4136 mdname(mddev
), err
);
4137 mddev
->pers
->stop(mddev
);
4141 module_put(mddev
->pers
->owner
);
4143 bitmap_destroy(mddev
);
4146 if (mddev
->pers
->sync_request
) {
4147 if (sysfs_create_group(&mddev
->kobj
, &md_redundancy_group
))
4149 "md: cannot register extra attributes for %s\n",
4151 mddev
->sysfs_action
= sysfs_get_dirent(mddev
->kobj
.sd
, "sync_action");
4152 } else if (mddev
->ro
== 2) /* auto-readonly not meaningful */
4155 atomic_set(&mddev
->writes_pending
,0);
4156 mddev
->safemode
= 0;
4157 mddev
->safemode_timer
.function
= md_safemode_timeout
;
4158 mddev
->safemode_timer
.data
= (unsigned long) mddev
;
4159 mddev
->safemode_delay
= (200 * HZ
)/1000 +1; /* 200 msec delay */
4162 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
4163 if (rdev
->raid_disk
>= 0) {
4165 sprintf(nm
, "rd%d", rdev
->raid_disk
);
4166 if (sysfs_create_link(&mddev
->kobj
, &rdev
->kobj
, nm
))
4167 printk("md: cannot register %s for %s\n",
4171 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
4174 md_update_sb(mddev
, 0);
4176 set_capacity(disk
, mddev
->array_sectors
);
4178 /* If there is a partially-recovered drive we need to
4179 * start recovery here. If we leave it to md_check_recovery,
4180 * it will remove the drives and not do the right thing
4182 if (mddev
->degraded
&& !mddev
->sync_thread
) {
4184 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
4185 if (rdev
->raid_disk
>= 0 &&
4186 !test_bit(In_sync
, &rdev
->flags
) &&
4187 !test_bit(Faulty
, &rdev
->flags
))
4188 /* complete an interrupted recovery */
4190 if (spares
&& mddev
->pers
->sync_request
) {
4191 mddev
->recovery
= 0;
4192 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
4193 mddev
->sync_thread
= md_register_thread(md_do_sync
,
4196 if (!mddev
->sync_thread
) {
4197 printk(KERN_ERR
"%s: could not start resync"
4200 /* leave the spares where they are, it shouldn't hurt */
4201 mddev
->recovery
= 0;
4205 md_wakeup_thread(mddev
->thread
);
4206 md_wakeup_thread(mddev
->sync_thread
); /* possibly kick off a reshape */
4209 md_new_event(mddev
);
4210 sysfs_notify_dirent(mddev
->sysfs_state
);
4211 if (mddev
->sysfs_action
)
4212 sysfs_notify_dirent(mddev
->sysfs_action
);
4213 sysfs_notify(&mddev
->kobj
, NULL
, "degraded");
4214 kobject_uevent(&disk_to_dev(mddev
->gendisk
)->kobj
, KOBJ_CHANGE
);
4218 static int restart_array(mddev_t
*mddev
)
4220 struct gendisk
*disk
= mddev
->gendisk
;
4222 /* Complain if it has no devices */
4223 if (list_empty(&mddev
->disks
))
4229 mddev
->safemode
= 0;
4231 set_disk_ro(disk
, 0);
4232 printk(KERN_INFO
"md: %s switched to read-write mode.\n",
4234 /* Kick recovery or resync if necessary */
4235 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
4236 md_wakeup_thread(mddev
->thread
);
4237 md_wakeup_thread(mddev
->sync_thread
);
4238 sysfs_notify_dirent(mddev
->sysfs_state
);
4242 /* similar to deny_write_access, but accounts for our holding a reference
4243 * to the file ourselves */
4244 static int deny_bitmap_write_access(struct file
* file
)
4246 struct inode
*inode
= file
->f_mapping
->host
;
4248 spin_lock(&inode
->i_lock
);
4249 if (atomic_read(&inode
->i_writecount
) > 1) {
4250 spin_unlock(&inode
->i_lock
);
4253 atomic_set(&inode
->i_writecount
, -1);
4254 spin_unlock(&inode
->i_lock
);
4259 static void restore_bitmap_write_access(struct file
*file
)
4261 struct inode
*inode
= file
->f_mapping
->host
;
4263 spin_lock(&inode
->i_lock
);
4264 atomic_set(&inode
->i_writecount
, 1);
4265 spin_unlock(&inode
->i_lock
);
4269 * 0 - completely stop and dis-assemble array
4270 * 1 - switch to readonly
4271 * 2 - stop but do not disassemble array
4273 static int do_md_stop(mddev_t
* mddev
, int mode
, int is_open
)
4276 struct gendisk
*disk
= mddev
->gendisk
;
4278 if (atomic_read(&mddev
->openers
) > is_open
) {
4279 printk("md: %s still in use.\n",mdname(mddev
));
4285 if (mddev
->sync_thread
) {
4286 set_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
);
4287 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
4288 md_unregister_thread(mddev
->sync_thread
);
4289 mddev
->sync_thread
= NULL
;
4292 del_timer_sync(&mddev
->safemode_timer
);
4295 case 1: /* readonly */
4301 case 0: /* disassemble */
4303 bitmap_flush(mddev
);
4304 md_super_wait(mddev
);
4306 set_disk_ro(disk
, 0);
4308 mddev
->pers
->stop(mddev
);
4309 mddev
->queue
->merge_bvec_fn
= NULL
;
4310 mddev
->queue
->unplug_fn
= NULL
;
4311 mddev
->queue
->backing_dev_info
.congested_fn
= NULL
;
4312 if (mddev
->pers
->sync_request
) {
4313 sysfs_remove_group(&mddev
->kobj
, &md_redundancy_group
);
4314 if (mddev
->sysfs_action
)
4315 sysfs_put(mddev
->sysfs_action
);
4316 mddev
->sysfs_action
= NULL
;
4318 module_put(mddev
->pers
->owner
);
4320 /* tell userspace to handle 'inactive' */
4321 sysfs_notify_dirent(mddev
->sysfs_state
);
4323 set_capacity(disk
, 0);
4329 if (!mddev
->in_sync
|| mddev
->flags
) {
4330 /* mark array as shutdown cleanly */
4332 md_update_sb(mddev
, 1);
4335 set_disk_ro(disk
, 1);
4336 clear_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
);
4340 * Free resources if final stop
4345 printk(KERN_INFO
"md: %s stopped.\n", mdname(mddev
));
4347 bitmap_destroy(mddev
);
4348 if (mddev
->bitmap_file
) {
4349 restore_bitmap_write_access(mddev
->bitmap_file
);
4350 fput(mddev
->bitmap_file
);
4351 mddev
->bitmap_file
= NULL
;
4353 mddev
->bitmap_offset
= 0;
4355 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
4356 if (rdev
->raid_disk
>= 0) {
4358 sprintf(nm
, "rd%d", rdev
->raid_disk
);
4359 sysfs_remove_link(&mddev
->kobj
, nm
);
4362 /* make sure all md_delayed_delete calls have finished */
4363 flush_scheduled_work();
4365 export_array(mddev
);
4367 mddev
->array_sectors
= 0;
4368 mddev
->external_size
= 0;
4369 mddev
->dev_sectors
= 0;
4370 mddev
->raid_disks
= 0;
4371 mddev
->recovery_cp
= 0;
4372 mddev
->resync_min
= 0;
4373 mddev
->resync_max
= MaxSector
;
4374 mddev
->reshape_position
= MaxSector
;
4375 mddev
->external
= 0;
4376 mddev
->persistent
= 0;
4377 mddev
->level
= LEVEL_NONE
;
4378 mddev
->clevel
[0] = 0;
4381 mddev
->metadata_type
[0] = 0;
4382 mddev
->chunk_size
= 0;
4383 mddev
->ctime
= mddev
->utime
= 0;
4385 mddev
->max_disks
= 0;
4387 mddev
->delta_disks
= 0;
4388 mddev
->new_level
= LEVEL_NONE
;
4389 mddev
->new_layout
= 0;
4390 mddev
->new_chunk
= 0;
4391 mddev
->curr_resync
= 0;
4392 mddev
->resync_mismatches
= 0;
4393 mddev
->suspend_lo
= mddev
->suspend_hi
= 0;
4394 mddev
->sync_speed_min
= mddev
->sync_speed_max
= 0;
4395 mddev
->recovery
= 0;
4398 mddev
->degraded
= 0;
4399 mddev
->barriers_work
= 0;
4400 mddev
->safemode
= 0;
4401 kobject_uevent(&disk_to_dev(mddev
->gendisk
)->kobj
, KOBJ_CHANGE
);
4402 if (mddev
->hold_active
== UNTIL_STOP
)
4403 mddev
->hold_active
= 0;
4405 } else if (mddev
->pers
)
4406 printk(KERN_INFO
"md: %s switched to read-only mode.\n",
4409 blk_integrity_unregister(disk
);
4410 md_new_event(mddev
);
4411 sysfs_notify_dirent(mddev
->sysfs_state
);
4417 static void autorun_array(mddev_t
*mddev
)
4422 if (list_empty(&mddev
->disks
))
4425 printk(KERN_INFO
"md: running: ");
4427 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
4428 char b
[BDEVNAME_SIZE
];
4429 printk("<%s>", bdevname(rdev
->bdev
,b
));
4433 err
= do_md_run(mddev
);
4435 printk(KERN_WARNING
"md: do_md_run() returned %d\n", err
);
4436 do_md_stop(mddev
, 0, 0);
4441 * lets try to run arrays based on all disks that have arrived
4442 * until now. (those are in pending_raid_disks)
4444 * the method: pick the first pending disk, collect all disks with
4445 * the same UUID, remove all from the pending list and put them into
4446 * the 'same_array' list. Then order this list based on superblock
4447 * update time (freshest comes first), kick out 'old' disks and
4448 * compare superblocks. If everything's fine then run it.
4450 * If "unit" is allocated, then bump its reference count
4452 static void autorun_devices(int part
)
4454 mdk_rdev_t
*rdev0
, *rdev
, *tmp
;
4456 char b
[BDEVNAME_SIZE
];
4458 printk(KERN_INFO
"md: autorun ...\n");
4459 while (!list_empty(&pending_raid_disks
)) {
4462 LIST_HEAD(candidates
);
4463 rdev0
= list_entry(pending_raid_disks
.next
,
4464 mdk_rdev_t
, same_set
);
4466 printk(KERN_INFO
"md: considering %s ...\n",
4467 bdevname(rdev0
->bdev
,b
));
4468 INIT_LIST_HEAD(&candidates
);
4469 rdev_for_each_list(rdev
, tmp
, &pending_raid_disks
)
4470 if (super_90_load(rdev
, rdev0
, 0) >= 0) {
4471 printk(KERN_INFO
"md: adding %s ...\n",
4472 bdevname(rdev
->bdev
,b
));
4473 list_move(&rdev
->same_set
, &candidates
);
4476 * now we have a set of devices, with all of them having
4477 * mostly sane superblocks. It's time to allocate the
4481 dev
= MKDEV(mdp_major
,
4482 rdev0
->preferred_minor
<< MdpMinorShift
);
4483 unit
= MINOR(dev
) >> MdpMinorShift
;
4485 dev
= MKDEV(MD_MAJOR
, rdev0
->preferred_minor
);
4488 if (rdev0
->preferred_minor
!= unit
) {
4489 printk(KERN_INFO
"md: unit number in %s is bad: %d\n",
4490 bdevname(rdev0
->bdev
, b
), rdev0
->preferred_minor
);
4494 md_probe(dev
, NULL
, NULL
);
4495 mddev
= mddev_find(dev
);
4496 if (!mddev
|| !mddev
->gendisk
) {
4500 "md: cannot allocate memory for md drive.\n");
4503 if (mddev_lock(mddev
))
4504 printk(KERN_WARNING
"md: %s locked, cannot run\n",
4506 else if (mddev
->raid_disks
|| mddev
->major_version
4507 || !list_empty(&mddev
->disks
)) {
4509 "md: %s already running, cannot run %s\n",
4510 mdname(mddev
), bdevname(rdev0
->bdev
,b
));
4511 mddev_unlock(mddev
);
4513 printk(KERN_INFO
"md: created %s\n", mdname(mddev
));
4514 mddev
->persistent
= 1;
4515 rdev_for_each_list(rdev
, tmp
, &candidates
) {
4516 list_del_init(&rdev
->same_set
);
4517 if (bind_rdev_to_array(rdev
, mddev
))
4520 autorun_array(mddev
);
4521 mddev_unlock(mddev
);
4523 /* on success, candidates will be empty, on error
4526 rdev_for_each_list(rdev
, tmp
, &candidates
) {
4527 list_del_init(&rdev
->same_set
);
4532 printk(KERN_INFO
"md: ... autorun DONE.\n");
4534 #endif /* !MODULE */
4536 static int get_version(void __user
* arg
)
4540 ver
.major
= MD_MAJOR_VERSION
;
4541 ver
.minor
= MD_MINOR_VERSION
;
4542 ver
.patchlevel
= MD_PATCHLEVEL_VERSION
;
4544 if (copy_to_user(arg
, &ver
, sizeof(ver
)))
4550 static int get_array_info(mddev_t
* mddev
, void __user
* arg
)
4552 mdu_array_info_t info
;
4553 int nr
,working
,active
,failed
,spare
;
4556 nr
=working
=active
=failed
=spare
=0;
4557 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
4559 if (test_bit(Faulty
, &rdev
->flags
))
4563 if (test_bit(In_sync
, &rdev
->flags
))
4570 info
.major_version
= mddev
->major_version
;
4571 info
.minor_version
= mddev
->minor_version
;
4572 info
.patch_version
= MD_PATCHLEVEL_VERSION
;
4573 info
.ctime
= mddev
->ctime
;
4574 info
.level
= mddev
->level
;
4575 info
.size
= mddev
->dev_sectors
/ 2;
4576 if (info
.size
!= mddev
->dev_sectors
/ 2) /* overflow */
4579 info
.raid_disks
= mddev
->raid_disks
;
4580 info
.md_minor
= mddev
->md_minor
;
4581 info
.not_persistent
= !mddev
->persistent
;
4583 info
.utime
= mddev
->utime
;
4586 info
.state
= (1<<MD_SB_CLEAN
);
4587 if (mddev
->bitmap
&& mddev
->bitmap_offset
)
4588 info
.state
= (1<<MD_SB_BITMAP_PRESENT
);
4589 info
.active_disks
= active
;
4590 info
.working_disks
= working
;
4591 info
.failed_disks
= failed
;
4592 info
.spare_disks
= spare
;
4594 info
.layout
= mddev
->layout
;
4595 info
.chunk_size
= mddev
->chunk_size
;
4597 if (copy_to_user(arg
, &info
, sizeof(info
)))
4603 static int get_bitmap_file(mddev_t
* mddev
, void __user
* arg
)
4605 mdu_bitmap_file_t
*file
= NULL
; /* too big for stack allocation */
4606 char *ptr
, *buf
= NULL
;
4609 if (md_allow_write(mddev
))
4610 file
= kmalloc(sizeof(*file
), GFP_NOIO
);
4612 file
= kmalloc(sizeof(*file
), GFP_KERNEL
);
4617 /* bitmap disabled, zero the first byte and copy out */
4618 if (!mddev
->bitmap
|| !mddev
->bitmap
->file
) {
4619 file
->pathname
[0] = '\0';
4623 buf
= kmalloc(sizeof(file
->pathname
), GFP_KERNEL
);
4627 ptr
= d_path(&mddev
->bitmap
->file
->f_path
, buf
, sizeof(file
->pathname
));
4631 strcpy(file
->pathname
, ptr
);
4635 if (copy_to_user(arg
, file
, sizeof(*file
)))
4643 static int get_disk_info(mddev_t
* mddev
, void __user
* arg
)
4645 mdu_disk_info_t info
;
4648 if (copy_from_user(&info
, arg
, sizeof(info
)))
4651 rdev
= find_rdev_nr(mddev
, info
.number
);
4653 info
.major
= MAJOR(rdev
->bdev
->bd_dev
);
4654 info
.minor
= MINOR(rdev
->bdev
->bd_dev
);
4655 info
.raid_disk
= rdev
->raid_disk
;
4657 if (test_bit(Faulty
, &rdev
->flags
))
4658 info
.state
|= (1<<MD_DISK_FAULTY
);
4659 else if (test_bit(In_sync
, &rdev
->flags
)) {
4660 info
.state
|= (1<<MD_DISK_ACTIVE
);
4661 info
.state
|= (1<<MD_DISK_SYNC
);
4663 if (test_bit(WriteMostly
, &rdev
->flags
))
4664 info
.state
|= (1<<MD_DISK_WRITEMOSTLY
);
4666 info
.major
= info
.minor
= 0;
4667 info
.raid_disk
= -1;
4668 info
.state
= (1<<MD_DISK_REMOVED
);
4671 if (copy_to_user(arg
, &info
, sizeof(info
)))
4677 static int add_new_disk(mddev_t
* mddev
, mdu_disk_info_t
*info
)
4679 char b
[BDEVNAME_SIZE
], b2
[BDEVNAME_SIZE
];
4681 dev_t dev
= MKDEV(info
->major
,info
->minor
);
4683 if (info
->major
!= MAJOR(dev
) || info
->minor
!= MINOR(dev
))
4686 if (!mddev
->raid_disks
) {
4688 /* expecting a device which has a superblock */
4689 rdev
= md_import_device(dev
, mddev
->major_version
, mddev
->minor_version
);
4692 "md: md_import_device returned %ld\n",
4694 return PTR_ERR(rdev
);
4696 if (!list_empty(&mddev
->disks
)) {
4697 mdk_rdev_t
*rdev0
= list_entry(mddev
->disks
.next
,
4698 mdk_rdev_t
, same_set
);
4699 int err
= super_types
[mddev
->major_version
]
4700 .load_super(rdev
, rdev0
, mddev
->minor_version
);
4703 "md: %s has different UUID to %s\n",
4704 bdevname(rdev
->bdev
,b
),
4705 bdevname(rdev0
->bdev
,b2
));
4710 err
= bind_rdev_to_array(rdev
, mddev
);
4717 * add_new_disk can be used once the array is assembled
4718 * to add "hot spares". They must already have a superblock
4723 if (!mddev
->pers
->hot_add_disk
) {
4725 "%s: personality does not support diskops!\n",
4729 if (mddev
->persistent
)
4730 rdev
= md_import_device(dev
, mddev
->major_version
,
4731 mddev
->minor_version
);
4733 rdev
= md_import_device(dev
, -1, -1);
4736 "md: md_import_device returned %ld\n",
4738 return PTR_ERR(rdev
);
4740 /* set save_raid_disk if appropriate */
4741 if (!mddev
->persistent
) {
4742 if (info
->state
& (1<<MD_DISK_SYNC
) &&
4743 info
->raid_disk
< mddev
->raid_disks
)
4744 rdev
->raid_disk
= info
->raid_disk
;
4746 rdev
->raid_disk
= -1;
4748 super_types
[mddev
->major_version
].
4749 validate_super(mddev
, rdev
);
4750 rdev
->saved_raid_disk
= rdev
->raid_disk
;
4752 clear_bit(In_sync
, &rdev
->flags
); /* just to be sure */
4753 if (info
->state
& (1<<MD_DISK_WRITEMOSTLY
))
4754 set_bit(WriteMostly
, &rdev
->flags
);
4756 clear_bit(WriteMostly
, &rdev
->flags
);
4758 rdev
->raid_disk
= -1;
4759 err
= bind_rdev_to_array(rdev
, mddev
);
4760 if (!err
&& !mddev
->pers
->hot_remove_disk
) {
4761 /* If there is hot_add_disk but no hot_remove_disk
4762 * then added disks for geometry changes,
4763 * and should be added immediately.
4765 super_types
[mddev
->major_version
].
4766 validate_super(mddev
, rdev
);
4767 err
= mddev
->pers
->hot_add_disk(mddev
, rdev
);
4769 unbind_rdev_from_array(rdev
);
4774 sysfs_notify_dirent(rdev
->sysfs_state
);
4776 md_update_sb(mddev
, 1);
4777 if (mddev
->degraded
)
4778 set_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
);
4779 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
4780 md_wakeup_thread(mddev
->thread
);
4784 /* otherwise, add_new_disk is only allowed
4785 * for major_version==0 superblocks
4787 if (mddev
->major_version
!= 0) {
4788 printk(KERN_WARNING
"%s: ADD_NEW_DISK not supported\n",
4793 if (!(info
->state
& (1<<MD_DISK_FAULTY
))) {
4795 rdev
= md_import_device(dev
, -1, 0);
4798 "md: error, md_import_device() returned %ld\n",
4800 return PTR_ERR(rdev
);
4802 rdev
->desc_nr
= info
->number
;
4803 if (info
->raid_disk
< mddev
->raid_disks
)
4804 rdev
->raid_disk
= info
->raid_disk
;
4806 rdev
->raid_disk
= -1;
4808 if (rdev
->raid_disk
< mddev
->raid_disks
)
4809 if (info
->state
& (1<<MD_DISK_SYNC
))
4810 set_bit(In_sync
, &rdev
->flags
);
4812 if (info
->state
& (1<<MD_DISK_WRITEMOSTLY
))
4813 set_bit(WriteMostly
, &rdev
->flags
);
4815 if (!mddev
->persistent
) {
4816 printk(KERN_INFO
"md: nonpersistent superblock ...\n");
4817 rdev
->sb_start
= rdev
->bdev
->bd_inode
->i_size
/ 512;
4819 rdev
->sb_start
= calc_dev_sboffset(rdev
->bdev
);
4820 rdev
->sectors
= calc_num_sectors(rdev
, mddev
->chunk_size
);
4822 err
= bind_rdev_to_array(rdev
, mddev
);
4832 static int hot_remove_disk(mddev_t
* mddev
, dev_t dev
)
4834 char b
[BDEVNAME_SIZE
];
4837 rdev
= find_rdev(mddev
, dev
);
4841 if (rdev
->raid_disk
>= 0)
4844 kick_rdev_from_array(rdev
);
4845 md_update_sb(mddev
, 1);
4846 md_new_event(mddev
);
4850 printk(KERN_WARNING
"md: cannot remove active disk %s from %s ...\n",
4851 bdevname(rdev
->bdev
,b
), mdname(mddev
));
4855 static int hot_add_disk(mddev_t
* mddev
, dev_t dev
)
4857 char b
[BDEVNAME_SIZE
];
4864 if (mddev
->major_version
!= 0) {
4865 printk(KERN_WARNING
"%s: HOT_ADD may only be used with"
4866 " version-0 superblocks.\n",
4870 if (!mddev
->pers
->hot_add_disk
) {
4872 "%s: personality does not support diskops!\n",
4877 rdev
= md_import_device(dev
, -1, 0);
4880 "md: error, md_import_device() returned %ld\n",
4885 if (mddev
->persistent
)
4886 rdev
->sb_start
= calc_dev_sboffset(rdev
->bdev
);
4888 rdev
->sb_start
= rdev
->bdev
->bd_inode
->i_size
/ 512;
4890 rdev
->sectors
= calc_num_sectors(rdev
, mddev
->chunk_size
);
4892 if (test_bit(Faulty
, &rdev
->flags
)) {
4894 "md: can not hot-add faulty %s disk to %s!\n",
4895 bdevname(rdev
->bdev
,b
), mdname(mddev
));
4899 clear_bit(In_sync
, &rdev
->flags
);
4901 rdev
->saved_raid_disk
= -1;
4902 err
= bind_rdev_to_array(rdev
, mddev
);
4907 * The rest should better be atomic, we can have disk failures
4908 * noticed in interrupt contexts ...
4911 rdev
->raid_disk
= -1;
4913 md_update_sb(mddev
, 1);
4916 * Kick recovery, maybe this spare has to be added to the
4917 * array immediately.
4919 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
4920 md_wakeup_thread(mddev
->thread
);
4921 md_new_event(mddev
);
4929 static int set_bitmap_file(mddev_t
*mddev
, int fd
)
4934 if (!mddev
->pers
->quiesce
)
4936 if (mddev
->recovery
|| mddev
->sync_thread
)
4938 /* we should be able to change the bitmap.. */
4944 return -EEXIST
; /* cannot add when bitmap is present */
4945 mddev
->bitmap_file
= fget(fd
);
4947 if (mddev
->bitmap_file
== NULL
) {
4948 printk(KERN_ERR
"%s: error: failed to get bitmap file\n",
4953 err
= deny_bitmap_write_access(mddev
->bitmap_file
);
4955 printk(KERN_ERR
"%s: error: bitmap file is already in use\n",
4957 fput(mddev
->bitmap_file
);
4958 mddev
->bitmap_file
= NULL
;
4961 mddev
->bitmap_offset
= 0; /* file overrides offset */
4962 } else if (mddev
->bitmap
== NULL
)
4963 return -ENOENT
; /* cannot remove what isn't there */
4966 mddev
->pers
->quiesce(mddev
, 1);
4968 err
= bitmap_create(mddev
);
4969 if (fd
< 0 || err
) {
4970 bitmap_destroy(mddev
);
4971 fd
= -1; /* make sure to put the file */
4973 mddev
->pers
->quiesce(mddev
, 0);
4976 if (mddev
->bitmap_file
) {
4977 restore_bitmap_write_access(mddev
->bitmap_file
);
4978 fput(mddev
->bitmap_file
);
4980 mddev
->bitmap_file
= NULL
;
4987 * set_array_info is used two different ways
4988 * The original usage is when creating a new array.
4989 * In this usage, raid_disks is > 0 and it together with
4990 * level, size, not_persistent,layout,chunksize determine the
4991 * shape of the array.
4992 * This will always create an array with a type-0.90.0 superblock.
4993 * The newer usage is when assembling an array.
4994 * In this case raid_disks will be 0, and the major_version field is
4995 * use to determine which style super-blocks are to be found on the devices.
4996 * The minor and patch _version numbers are also kept incase the
4997 * super_block handler wishes to interpret them.
4999 static int set_array_info(mddev_t
* mddev
, mdu_array_info_t
*info
)
5002 if (info
->raid_disks
== 0) {
5003 /* just setting version number for superblock loading */
5004 if (info
->major_version
< 0 ||
5005 info
->major_version
>= ARRAY_SIZE(super_types
) ||
5006 super_types
[info
->major_version
].name
== NULL
) {
5007 /* maybe try to auto-load a module? */
5009 "md: superblock version %d not known\n",
5010 info
->major_version
);
5013 mddev
->major_version
= info
->major_version
;
5014 mddev
->minor_version
= info
->minor_version
;
5015 mddev
->patch_version
= info
->patch_version
;
5016 mddev
->persistent
= !info
->not_persistent
;
5019 mddev
->major_version
= MD_MAJOR_VERSION
;
5020 mddev
->minor_version
= MD_MINOR_VERSION
;
5021 mddev
->patch_version
= MD_PATCHLEVEL_VERSION
;
5022 mddev
->ctime
= get_seconds();
5024 mddev
->level
= info
->level
;
5025 mddev
->clevel
[0] = 0;
5026 mddev
->dev_sectors
= 2 * (sector_t
)info
->size
;
5027 mddev
->raid_disks
= info
->raid_disks
;
5028 /* don't set md_minor, it is determined by which /dev/md* was
5031 if (info
->state
& (1<<MD_SB_CLEAN
))
5032 mddev
->recovery_cp
= MaxSector
;
5034 mddev
->recovery_cp
= 0;
5035 mddev
->persistent
= ! info
->not_persistent
;
5036 mddev
->external
= 0;
5038 mddev
->layout
= info
->layout
;
5039 mddev
->chunk_size
= info
->chunk_size
;
5041 mddev
->max_disks
= MD_SB_DISKS
;
5043 if (mddev
->persistent
)
5045 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
5047 mddev
->default_bitmap_offset
= MD_SB_BYTES
>> 9;
5048 mddev
->bitmap_offset
= 0;
5050 mddev
->reshape_position
= MaxSector
;
5053 * Generate a 128 bit UUID
5055 get_random_bytes(mddev
->uuid
, 16);
5057 mddev
->new_level
= mddev
->level
;
5058 mddev
->new_chunk
= mddev
->chunk_size
;
5059 mddev
->new_layout
= mddev
->layout
;
5060 mddev
->delta_disks
= 0;
5065 void md_set_array_sectors(mddev_t
*mddev
, sector_t array_sectors
)
5067 WARN(!mddev_is_locked(mddev
), "%s: unlocked mddev!\n", __func__
);
5069 if (mddev
->external_size
)
5072 mddev
->array_sectors
= array_sectors
;
5074 EXPORT_SYMBOL(md_set_array_sectors
);
5076 static int update_size(mddev_t
*mddev
, sector_t num_sectors
)
5080 int fit
= (num_sectors
== 0);
5082 if (mddev
->pers
->resize
== NULL
)
5084 /* The "num_sectors" is the number of sectors of each device that
5085 * is used. This can only make sense for arrays with redundancy.
5086 * linear and raid0 always use whatever space is available. We can only
5087 * consider changing this number if no resync or reconstruction is
5088 * happening, and if the new size is acceptable. It must fit before the
5089 * sb_start or, if that is <data_offset, it must fit before the size
5090 * of each device. If num_sectors is zero, we find the largest size
5094 if (mddev
->sync_thread
)
5097 /* Sorry, cannot grow a bitmap yet, just remove it,
5101 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
5102 sector_t avail
= rdev
->sectors
;
5104 if (fit
&& (num_sectors
== 0 || num_sectors
> avail
))
5105 num_sectors
= avail
;
5106 if (avail
< num_sectors
)
5109 rv
= mddev
->pers
->resize(mddev
, num_sectors
);
5111 struct block_device
*bdev
;
5113 bdev
= bdget_disk(mddev
->gendisk
, 0);
5115 mutex_lock(&bdev
->bd_inode
->i_mutex
);
5116 i_size_write(bdev
->bd_inode
,
5117 (loff_t
)mddev
->array_sectors
<< 9);
5118 mutex_unlock(&bdev
->bd_inode
->i_mutex
);
5125 static int update_raid_disks(mddev_t
*mddev
, int raid_disks
)
5128 /* change the number of raid disks */
5129 if (mddev
->pers
->check_reshape
== NULL
)
5131 if (raid_disks
<= 0 ||
5132 raid_disks
>= mddev
->max_disks
)
5134 if (mddev
->sync_thread
|| mddev
->reshape_position
!= MaxSector
)
5136 mddev
->delta_disks
= raid_disks
- mddev
->raid_disks
;
5138 rv
= mddev
->pers
->check_reshape(mddev
);
5144 * update_array_info is used to change the configuration of an
5146 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
5147 * fields in the info are checked against the array.
5148 * Any differences that cannot be handled will cause an error.
5149 * Normally, only one change can be managed at a time.
5151 static int update_array_info(mddev_t
*mddev
, mdu_array_info_t
*info
)
5157 /* calculate expected state,ignoring low bits */
5158 if (mddev
->bitmap
&& mddev
->bitmap_offset
)
5159 state
|= (1 << MD_SB_BITMAP_PRESENT
);
5161 if (mddev
->major_version
!= info
->major_version
||
5162 mddev
->minor_version
!= info
->minor_version
||
5163 /* mddev->patch_version != info->patch_version || */
5164 mddev
->ctime
!= info
->ctime
||
5165 mddev
->level
!= info
->level
||
5166 /* mddev->layout != info->layout || */
5167 !mddev
->persistent
!= info
->not_persistent
||
5168 mddev
->chunk_size
!= info
->chunk_size
||
5169 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
5170 ((state
^info
->state
) & 0xfffffe00)
5173 /* Check there is only one change */
5174 if (info
->size
>= 0 && mddev
->dev_sectors
/ 2 != info
->size
)
5176 if (mddev
->raid_disks
!= info
->raid_disks
)
5178 if (mddev
->layout
!= info
->layout
)
5180 if ((state
^ info
->state
) & (1<<MD_SB_BITMAP_PRESENT
))
5187 if (mddev
->layout
!= info
->layout
) {
5189 * we don't need to do anything at the md level, the
5190 * personality will take care of it all.
5192 if (mddev
->pers
->reconfig
== NULL
)
5195 return mddev
->pers
->reconfig(mddev
, info
->layout
, -1);
5197 if (info
->size
>= 0 && mddev
->dev_sectors
/ 2 != info
->size
)
5198 rv
= update_size(mddev
, (sector_t
)info
->size
* 2);
5200 if (mddev
->raid_disks
!= info
->raid_disks
)
5201 rv
= update_raid_disks(mddev
, info
->raid_disks
);
5203 if ((state
^ info
->state
) & (1<<MD_SB_BITMAP_PRESENT
)) {
5204 if (mddev
->pers
->quiesce
== NULL
)
5206 if (mddev
->recovery
|| mddev
->sync_thread
)
5208 if (info
->state
& (1<<MD_SB_BITMAP_PRESENT
)) {
5209 /* add the bitmap */
5212 if (mddev
->default_bitmap_offset
== 0)
5214 mddev
->bitmap_offset
= mddev
->default_bitmap_offset
;
5215 mddev
->pers
->quiesce(mddev
, 1);
5216 rv
= bitmap_create(mddev
);
5218 bitmap_destroy(mddev
);
5219 mddev
->pers
->quiesce(mddev
, 0);
5221 /* remove the bitmap */
5224 if (mddev
->bitmap
->file
)
5226 mddev
->pers
->quiesce(mddev
, 1);
5227 bitmap_destroy(mddev
);
5228 mddev
->pers
->quiesce(mddev
, 0);
5229 mddev
->bitmap_offset
= 0;
5232 md_update_sb(mddev
, 1);
5236 static int set_disk_faulty(mddev_t
*mddev
, dev_t dev
)
5240 if (mddev
->pers
== NULL
)
5243 rdev
= find_rdev(mddev
, dev
);
5247 md_error(mddev
, rdev
);
5252 * We have a problem here : there is no easy way to give a CHS
5253 * virtual geometry. We currently pretend that we have a 2 heads
5254 * 4 sectors (with a BIG number of cylinders...). This drives
5255 * dosfs just mad... ;-)
5257 static int md_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
5259 mddev_t
*mddev
= bdev
->bd_disk
->private_data
;
5263 geo
->cylinders
= get_capacity(mddev
->gendisk
) / 8;
5267 static int md_ioctl(struct block_device
*bdev
, fmode_t mode
,
5268 unsigned int cmd
, unsigned long arg
)
5271 void __user
*argp
= (void __user
*)arg
;
5272 mddev_t
*mddev
= NULL
;
5274 if (!capable(CAP_SYS_ADMIN
))
5278 * Commands dealing with the RAID driver but not any
5284 err
= get_version(argp
);
5287 case PRINT_RAID_DEBUG
:
5295 autostart_arrays(arg
);
5302 * Commands creating/starting a new array:
5305 mddev
= bdev
->bd_disk
->private_data
;
5312 err
= mddev_lock(mddev
);
5315 "md: ioctl lock interrupted, reason %d, cmd %d\n",
5322 case SET_ARRAY_INFO
:
5324 mdu_array_info_t info
;
5326 memset(&info
, 0, sizeof(info
));
5327 else if (copy_from_user(&info
, argp
, sizeof(info
))) {
5332 err
= update_array_info(mddev
, &info
);
5334 printk(KERN_WARNING
"md: couldn't update"
5335 " array info. %d\n", err
);
5340 if (!list_empty(&mddev
->disks
)) {
5342 "md: array %s already has disks!\n",
5347 if (mddev
->raid_disks
) {
5349 "md: array %s already initialised!\n",
5354 err
= set_array_info(mddev
, &info
);
5356 printk(KERN_WARNING
"md: couldn't set"
5357 " array info. %d\n", err
);
5367 * Commands querying/configuring an existing array:
5369 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
5370 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
5371 if ((!mddev
->raid_disks
&& !mddev
->external
)
5372 && cmd
!= ADD_NEW_DISK
&& cmd
!= STOP_ARRAY
5373 && cmd
!= RUN_ARRAY
&& cmd
!= SET_BITMAP_FILE
5374 && cmd
!= GET_BITMAP_FILE
) {
5380 * Commands even a read-only array can execute:
5384 case GET_ARRAY_INFO
:
5385 err
= get_array_info(mddev
, argp
);
5388 case GET_BITMAP_FILE
:
5389 err
= get_bitmap_file(mddev
, argp
);
5393 err
= get_disk_info(mddev
, argp
);
5396 case RESTART_ARRAY_RW
:
5397 err
= restart_array(mddev
);
5401 err
= do_md_stop(mddev
, 0, 1);
5405 err
= do_md_stop(mddev
, 1, 1);
5411 * The remaining ioctls are changing the state of the
5412 * superblock, so we do not allow them on read-only arrays.
5413 * However non-MD ioctls (e.g. get-size) will still come through
5414 * here and hit the 'default' below, so only disallow
5415 * 'md' ioctls, and switch to rw mode if started auto-readonly.
5417 if (_IOC_TYPE(cmd
) == MD_MAJOR
&& mddev
->ro
&& mddev
->pers
) {
5418 if (mddev
->ro
== 2) {
5420 sysfs_notify_dirent(mddev
->sysfs_state
);
5421 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
5422 md_wakeup_thread(mddev
->thread
);
5433 mdu_disk_info_t info
;
5434 if (copy_from_user(&info
, argp
, sizeof(info
)))
5437 err
= add_new_disk(mddev
, &info
);
5441 case HOT_REMOVE_DISK
:
5442 err
= hot_remove_disk(mddev
, new_decode_dev(arg
));
5446 err
= hot_add_disk(mddev
, new_decode_dev(arg
));
5449 case SET_DISK_FAULTY
:
5450 err
= set_disk_faulty(mddev
, new_decode_dev(arg
));
5454 err
= do_md_run(mddev
);
5457 case SET_BITMAP_FILE
:
5458 err
= set_bitmap_file(mddev
, (int)arg
);
5468 if (mddev
->hold_active
== UNTIL_IOCTL
&&
5470 mddev
->hold_active
= 0;
5471 mddev_unlock(mddev
);
5481 static int md_open(struct block_device
*bdev
, fmode_t mode
)
5484 * Succeed if we can lock the mddev, which confirms that
5485 * it isn't being stopped right now.
5487 mddev_t
*mddev
= mddev_find(bdev
->bd_dev
);
5490 if (mddev
->gendisk
!= bdev
->bd_disk
) {
5491 /* we are racing with mddev_put which is discarding this
5495 /* Wait until bdev->bd_disk is definitely gone */
5496 flush_scheduled_work();
5497 /* Then retry the open from the top */
5498 return -ERESTARTSYS
;
5500 BUG_ON(mddev
!= bdev
->bd_disk
->private_data
);
5502 if ((err
= mutex_lock_interruptible_nested(&mddev
->reconfig_mutex
, 1)))
5506 atomic_inc(&mddev
->openers
);
5507 mddev_unlock(mddev
);
5509 check_disk_change(bdev
);
5514 static int md_release(struct gendisk
*disk
, fmode_t mode
)
5516 mddev_t
*mddev
= disk
->private_data
;
5519 atomic_dec(&mddev
->openers
);
5525 static int md_media_changed(struct gendisk
*disk
)
5527 mddev_t
*mddev
= disk
->private_data
;
5529 return mddev
->changed
;
5532 static int md_revalidate(struct gendisk
*disk
)
5534 mddev_t
*mddev
= disk
->private_data
;
5539 static struct block_device_operations md_fops
=
5541 .owner
= THIS_MODULE
,
5543 .release
= md_release
,
5544 .locked_ioctl
= md_ioctl
,
5545 .getgeo
= md_getgeo
,
5546 .media_changed
= md_media_changed
,
5547 .revalidate_disk
= md_revalidate
,
5550 static int md_thread(void * arg
)
5552 mdk_thread_t
*thread
= arg
;
5555 * md_thread is a 'system-thread', it's priority should be very
5556 * high. We avoid resource deadlocks individually in each
5557 * raid personality. (RAID5 does preallocation) We also use RR and
5558 * the very same RT priority as kswapd, thus we will never get
5559 * into a priority inversion deadlock.
5561 * we definitely have to have equal or higher priority than
5562 * bdflush, otherwise bdflush will deadlock if there are too
5563 * many dirty RAID5 blocks.
5566 allow_signal(SIGKILL
);
5567 while (!kthread_should_stop()) {
5569 /* We need to wait INTERRUPTIBLE so that
5570 * we don't add to the load-average.
5571 * That means we need to be sure no signals are
5574 if (signal_pending(current
))
5575 flush_signals(current
);
5577 wait_event_interruptible_timeout
5579 test_bit(THREAD_WAKEUP
, &thread
->flags
)
5580 || kthread_should_stop(),
5583 clear_bit(THREAD_WAKEUP
, &thread
->flags
);
5585 thread
->run(thread
->mddev
);
5591 void md_wakeup_thread(mdk_thread_t
*thread
)
5594 dprintk("md: waking up MD thread %s.\n", thread
->tsk
->comm
);
5595 set_bit(THREAD_WAKEUP
, &thread
->flags
);
5596 wake_up(&thread
->wqueue
);
5600 mdk_thread_t
*md_register_thread(void (*run
) (mddev_t
*), mddev_t
*mddev
,
5603 mdk_thread_t
*thread
;
5605 thread
= kzalloc(sizeof(mdk_thread_t
), GFP_KERNEL
);
5609 init_waitqueue_head(&thread
->wqueue
);
5612 thread
->mddev
= mddev
;
5613 thread
->timeout
= MAX_SCHEDULE_TIMEOUT
;
5614 thread
->tsk
= kthread_run(md_thread
, thread
, name
, mdname(thread
->mddev
));
5615 if (IS_ERR(thread
->tsk
)) {
5622 void md_unregister_thread(mdk_thread_t
*thread
)
5626 dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread
->tsk
));
5628 kthread_stop(thread
->tsk
);
5632 void md_error(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
5639 if (!rdev
|| test_bit(Faulty
, &rdev
->flags
))
5642 if (mddev
->external
)
5643 set_bit(Blocked
, &rdev
->flags
);
5645 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
5647 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
5648 __builtin_return_address(0),__builtin_return_address(1),
5649 __builtin_return_address(2),__builtin_return_address(3));
5653 if (!mddev
->pers
->error_handler
)
5655 mddev
->pers
->error_handler(mddev
,rdev
);
5656 if (mddev
->degraded
)
5657 set_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
);
5658 set_bit(StateChanged
, &rdev
->flags
);
5659 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
5660 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
5661 md_wakeup_thread(mddev
->thread
);
5662 md_new_event_inintr(mddev
);
5665 /* seq_file implementation /proc/mdstat */
5667 static void status_unused(struct seq_file
*seq
)
5672 seq_printf(seq
, "unused devices: ");
5674 list_for_each_entry(rdev
, &pending_raid_disks
, same_set
) {
5675 char b
[BDEVNAME_SIZE
];
5677 seq_printf(seq
, "%s ",
5678 bdevname(rdev
->bdev
,b
));
5681 seq_printf(seq
, "<none>");
5683 seq_printf(seq
, "\n");
5687 static void status_resync(struct seq_file
*seq
, mddev_t
* mddev
)
5689 sector_t max_blocks
, resync
, res
;
5690 unsigned long dt
, db
, rt
;
5692 unsigned int per_milli
;
5694 resync
= (mddev
->curr_resync
- atomic_read(&mddev
->recovery_active
))/2;
5696 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
))
5697 max_blocks
= mddev
->resync_max_sectors
>> 1;
5699 max_blocks
= mddev
->dev_sectors
/ 2;
5702 * Should not happen.
5708 /* Pick 'scale' such that (resync>>scale)*1000 will fit
5709 * in a sector_t, and (max_blocks>>scale) will fit in a
5710 * u32, as those are the requirements for sector_div.
5711 * Thus 'scale' must be at least 10
5714 if (sizeof(sector_t
) > sizeof(unsigned long)) {
5715 while ( max_blocks
/2 > (1ULL<<(scale
+32)))
5718 res
= (resync
>>scale
)*1000;
5719 sector_div(res
, (u32
)((max_blocks
>>scale
)+1));
5723 int i
, x
= per_milli
/50, y
= 20-x
;
5724 seq_printf(seq
, "[");
5725 for (i
= 0; i
< x
; i
++)
5726 seq_printf(seq
, "=");
5727 seq_printf(seq
, ">");
5728 for (i
= 0; i
< y
; i
++)
5729 seq_printf(seq
, ".");
5730 seq_printf(seq
, "] ");
5732 seq_printf(seq
, " %s =%3u.%u%% (%llu/%llu)",
5733 (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
)?
5735 (test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
)?
5737 (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
) ?
5738 "resync" : "recovery"))),
5739 per_milli
/10, per_milli
% 10,
5740 (unsigned long long) resync
,
5741 (unsigned long long) max_blocks
);
5744 * We do not want to overflow, so the order of operands and
5745 * the * 100 / 100 trick are important. We do a +1 to be
5746 * safe against division by zero. We only estimate anyway.
5748 * dt: time from mark until now
5749 * db: blocks written from mark until now
5750 * rt: remaining time
5752 dt
= ((jiffies
- mddev
->resync_mark
) / HZ
);
5754 db
= (mddev
->curr_mark_cnt
- atomic_read(&mddev
->recovery_active
))
5755 - mddev
->resync_mark_cnt
;
5756 rt
= (dt
* ((unsigned long)(max_blocks
-resync
) / (db
/2/100+1)))/100;
5758 seq_printf(seq
, " finish=%lu.%lumin", rt
/ 60, (rt
% 60)/6);
5760 seq_printf(seq
, " speed=%ldK/sec", db
/2/dt
);
5763 static void *md_seq_start(struct seq_file
*seq
, loff_t
*pos
)
5765 struct list_head
*tmp
;
5775 spin_lock(&all_mddevs_lock
);
5776 list_for_each(tmp
,&all_mddevs
)
5778 mddev
= list_entry(tmp
, mddev_t
, all_mddevs
);
5780 spin_unlock(&all_mddevs_lock
);
5783 spin_unlock(&all_mddevs_lock
);
5785 return (void*)2;/* tail */
5789 static void *md_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
5791 struct list_head
*tmp
;
5792 mddev_t
*next_mddev
, *mddev
= v
;
5798 spin_lock(&all_mddevs_lock
);
5800 tmp
= all_mddevs
.next
;
5802 tmp
= mddev
->all_mddevs
.next
;
5803 if (tmp
!= &all_mddevs
)
5804 next_mddev
= mddev_get(list_entry(tmp
,mddev_t
,all_mddevs
));
5806 next_mddev
= (void*)2;
5809 spin_unlock(&all_mddevs_lock
);
5817 static void md_seq_stop(struct seq_file
*seq
, void *v
)
5821 if (mddev
&& v
!= (void*)1 && v
!= (void*)2)
5825 struct mdstat_info
{
5829 static int md_seq_show(struct seq_file
*seq
, void *v
)
5834 struct mdstat_info
*mi
= seq
->private;
5835 struct bitmap
*bitmap
;
5837 if (v
== (void*)1) {
5838 struct mdk_personality
*pers
;
5839 seq_printf(seq
, "Personalities : ");
5840 spin_lock(&pers_lock
);
5841 list_for_each_entry(pers
, &pers_list
, list
)
5842 seq_printf(seq
, "[%s] ", pers
->name
);
5844 spin_unlock(&pers_lock
);
5845 seq_printf(seq
, "\n");
5846 mi
->event
= atomic_read(&md_event_count
);
5849 if (v
== (void*)2) {
5854 if (mddev_lock(mddev
) < 0)
5857 if (mddev
->pers
|| mddev
->raid_disks
|| !list_empty(&mddev
->disks
)) {
5858 seq_printf(seq
, "%s : %sactive", mdname(mddev
),
5859 mddev
->pers
? "" : "in");
5862 seq_printf(seq
, " (read-only)");
5864 seq_printf(seq
, " (auto-read-only)");
5865 seq_printf(seq
, " %s", mddev
->pers
->name
);
5869 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
5870 char b
[BDEVNAME_SIZE
];
5871 seq_printf(seq
, " %s[%d]",
5872 bdevname(rdev
->bdev
,b
), rdev
->desc_nr
);
5873 if (test_bit(WriteMostly
, &rdev
->flags
))
5874 seq_printf(seq
, "(W)");
5875 if (test_bit(Faulty
, &rdev
->flags
)) {
5876 seq_printf(seq
, "(F)");
5878 } else if (rdev
->raid_disk
< 0)
5879 seq_printf(seq
, "(S)"); /* spare */
5880 sectors
+= rdev
->sectors
;
5883 if (!list_empty(&mddev
->disks
)) {
5885 seq_printf(seq
, "\n %llu blocks",
5886 (unsigned long long)
5887 mddev
->array_sectors
/ 2);
5889 seq_printf(seq
, "\n %llu blocks",
5890 (unsigned long long)sectors
/ 2);
5892 if (mddev
->persistent
) {
5893 if (mddev
->major_version
!= 0 ||
5894 mddev
->minor_version
!= 90) {
5895 seq_printf(seq
," super %d.%d",
5896 mddev
->major_version
,
5897 mddev
->minor_version
);
5899 } else if (mddev
->external
)
5900 seq_printf(seq
, " super external:%s",
5901 mddev
->metadata_type
);
5903 seq_printf(seq
, " super non-persistent");
5906 mddev
->pers
->status(seq
, mddev
);
5907 seq_printf(seq
, "\n ");
5908 if (mddev
->pers
->sync_request
) {
5909 if (mddev
->curr_resync
> 2) {
5910 status_resync(seq
, mddev
);
5911 seq_printf(seq
, "\n ");
5912 } else if (mddev
->curr_resync
== 1 || mddev
->curr_resync
== 2)
5913 seq_printf(seq
, "\tresync=DELAYED\n ");
5914 else if (mddev
->recovery_cp
< MaxSector
)
5915 seq_printf(seq
, "\tresync=PENDING\n ");
5918 seq_printf(seq
, "\n ");
5920 if ((bitmap
= mddev
->bitmap
)) {
5921 unsigned long chunk_kb
;
5922 unsigned long flags
;
5923 spin_lock_irqsave(&bitmap
->lock
, flags
);
5924 chunk_kb
= bitmap
->chunksize
>> 10;
5925 seq_printf(seq
, "bitmap: %lu/%lu pages [%luKB], "
5927 bitmap
->pages
- bitmap
->missing_pages
,
5929 (bitmap
->pages
- bitmap
->missing_pages
)
5930 << (PAGE_SHIFT
- 10),
5931 chunk_kb
? chunk_kb
: bitmap
->chunksize
,
5932 chunk_kb
? "KB" : "B");
5934 seq_printf(seq
, ", file: ");
5935 seq_path(seq
, &bitmap
->file
->f_path
, " \t\n");
5938 seq_printf(seq
, "\n");
5939 spin_unlock_irqrestore(&bitmap
->lock
, flags
);
5942 seq_printf(seq
, "\n");
5944 mddev_unlock(mddev
);
5949 static struct seq_operations md_seq_ops
= {
5950 .start
= md_seq_start
,
5951 .next
= md_seq_next
,
5952 .stop
= md_seq_stop
,
5953 .show
= md_seq_show
,
5956 static int md_seq_open(struct inode
*inode
, struct file
*file
)
5959 struct mdstat_info
*mi
= kmalloc(sizeof(*mi
), GFP_KERNEL
);
5963 error
= seq_open(file
, &md_seq_ops
);
5967 struct seq_file
*p
= file
->private_data
;
5969 mi
->event
= atomic_read(&md_event_count
);
5974 static unsigned int mdstat_poll(struct file
*filp
, poll_table
*wait
)
5976 struct seq_file
*m
= filp
->private_data
;
5977 struct mdstat_info
*mi
= m
->private;
5980 poll_wait(filp
, &md_event_waiters
, wait
);
5982 /* always allow read */
5983 mask
= POLLIN
| POLLRDNORM
;
5985 if (mi
->event
!= atomic_read(&md_event_count
))
5986 mask
|= POLLERR
| POLLPRI
;
5990 static const struct file_operations md_seq_fops
= {
5991 .owner
= THIS_MODULE
,
5992 .open
= md_seq_open
,
5994 .llseek
= seq_lseek
,
5995 .release
= seq_release_private
,
5996 .poll
= mdstat_poll
,
5999 int register_md_personality(struct mdk_personality
*p
)
6001 spin_lock(&pers_lock
);
6002 list_add_tail(&p
->list
, &pers_list
);
6003 printk(KERN_INFO
"md: %s personality registered for level %d\n", p
->name
, p
->level
);
6004 spin_unlock(&pers_lock
);
6008 int unregister_md_personality(struct mdk_personality
*p
)
6010 printk(KERN_INFO
"md: %s personality unregistered\n", p
->name
);
6011 spin_lock(&pers_lock
);
6012 list_del_init(&p
->list
);
6013 spin_unlock(&pers_lock
);
6017 static int is_mddev_idle(mddev_t
*mddev
, int init
)
6025 rdev_for_each_rcu(rdev
, mddev
) {
6026 struct gendisk
*disk
= rdev
->bdev
->bd_contains
->bd_disk
;
6027 curr_events
= (int)part_stat_read(&disk
->part0
, sectors
[0]) +
6028 (int)part_stat_read(&disk
->part0
, sectors
[1]) -
6029 atomic_read(&disk
->sync_io
);
6030 /* sync IO will cause sync_io to increase before the disk_stats
6031 * as sync_io is counted when a request starts, and
6032 * disk_stats is counted when it completes.
6033 * So resync activity will cause curr_events to be smaller than
6034 * when there was no such activity.
6035 * non-sync IO will cause disk_stat to increase without
6036 * increasing sync_io so curr_events will (eventually)
6037 * be larger than it was before. Once it becomes
6038 * substantially larger, the test below will cause
6039 * the array to appear non-idle, and resync will slow
6041 * If there is a lot of outstanding resync activity when
6042 * we set last_event to curr_events, then all that activity
6043 * completing might cause the array to appear non-idle
6044 * and resync will be slowed down even though there might
6045 * not have been non-resync activity. This will only
6046 * happen once though. 'last_events' will soon reflect
6047 * the state where there is little or no outstanding
6048 * resync requests, and further resync activity will
6049 * always make curr_events less than last_events.
6052 if (init
|| curr_events
- rdev
->last_events
> 64) {
6053 rdev
->last_events
= curr_events
;
6061 void md_done_sync(mddev_t
*mddev
, int blocks
, int ok
)
6063 /* another "blocks" (512byte) blocks have been synced */
6064 atomic_sub(blocks
, &mddev
->recovery_active
);
6065 wake_up(&mddev
->recovery_wait
);
6067 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
6068 md_wakeup_thread(mddev
->thread
);
6069 // stop recovery, signal do_sync ....
6074 /* md_write_start(mddev, bi)
6075 * If we need to update some array metadata (e.g. 'active' flag
6076 * in superblock) before writing, schedule a superblock update
6077 * and wait for it to complete.
6079 void md_write_start(mddev_t
*mddev
, struct bio
*bi
)
6082 if (bio_data_dir(bi
) != WRITE
)
6085 BUG_ON(mddev
->ro
== 1);
6086 if (mddev
->ro
== 2) {
6087 /* need to switch to read/write */
6089 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
6090 md_wakeup_thread(mddev
->thread
);
6091 md_wakeup_thread(mddev
->sync_thread
);
6094 atomic_inc(&mddev
->writes_pending
);
6095 if (mddev
->safemode
== 1)
6096 mddev
->safemode
= 0;
6097 if (mddev
->in_sync
) {
6098 spin_lock_irq(&mddev
->write_lock
);
6099 if (mddev
->in_sync
) {
6101 set_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
6102 md_wakeup_thread(mddev
->thread
);
6105 spin_unlock_irq(&mddev
->write_lock
);
6108 sysfs_notify_dirent(mddev
->sysfs_state
);
6109 wait_event(mddev
->sb_wait
,
6110 !test_bit(MD_CHANGE_CLEAN
, &mddev
->flags
) &&
6111 !test_bit(MD_CHANGE_PENDING
, &mddev
->flags
));
6114 void md_write_end(mddev_t
*mddev
)
6116 if (atomic_dec_and_test(&mddev
->writes_pending
)) {
6117 if (mddev
->safemode
== 2)
6118 md_wakeup_thread(mddev
->thread
);
6119 else if (mddev
->safemode_delay
)
6120 mod_timer(&mddev
->safemode_timer
, jiffies
+ mddev
->safemode_delay
);
6124 /* md_allow_write(mddev)
6125 * Calling this ensures that the array is marked 'active' so that writes
6126 * may proceed without blocking. It is important to call this before
6127 * attempting a GFP_KERNEL allocation while holding the mddev lock.
6128 * Must be called with mddev_lock held.
6130 * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
6131 * is dropped, so return -EAGAIN after notifying userspace.
6133 int md_allow_write(mddev_t
*mddev
)
6139 if (!mddev
->pers
->sync_request
)
6142 spin_lock_irq(&mddev
->write_lock
);
6143 if (mddev
->in_sync
) {
6145 set_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
6146 if (mddev
->safemode_delay
&&
6147 mddev
->safemode
== 0)
6148 mddev
->safemode
= 1;
6149 spin_unlock_irq(&mddev
->write_lock
);
6150 md_update_sb(mddev
, 0);
6151 sysfs_notify_dirent(mddev
->sysfs_state
);
6153 spin_unlock_irq(&mddev
->write_lock
);
6155 if (test_bit(MD_CHANGE_CLEAN
, &mddev
->flags
))
6160 EXPORT_SYMBOL_GPL(md_allow_write
);
6162 #define SYNC_MARKS 10
6163 #define SYNC_MARK_STEP (3*HZ)
6164 void md_do_sync(mddev_t
*mddev
)
6167 unsigned int currspeed
= 0,
6169 sector_t max_sectors
,j
, io_sectors
;
6170 unsigned long mark
[SYNC_MARKS
];
6171 sector_t mark_cnt
[SYNC_MARKS
];
6173 struct list_head
*tmp
;
6174 sector_t last_check
;
6179 /* just incase thread restarts... */
6180 if (test_bit(MD_RECOVERY_DONE
, &mddev
->recovery
))
6182 if (mddev
->ro
) /* never try to sync a read-only array */
6185 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
6186 if (test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
))
6187 desc
= "data-check";
6188 else if (test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
))
6189 desc
= "requested-resync";
6192 } else if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
6197 /* we overload curr_resync somewhat here.
6198 * 0 == not engaged in resync at all
6199 * 2 == checking that there is no conflict with another sync
6200 * 1 == like 2, but have yielded to allow conflicting resync to
6202 * other == active in resync - this many blocks
6204 * Before starting a resync we must have set curr_resync to
6205 * 2, and then checked that every "conflicting" array has curr_resync
6206 * less than ours. When we find one that is the same or higher
6207 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
6208 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
6209 * This will mean we have to start checking from the beginning again.
6214 mddev
->curr_resync
= 2;
6217 if (kthread_should_stop()) {
6218 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
6221 for_each_mddev(mddev2
, tmp
) {
6222 if (mddev2
== mddev
)
6224 if (!mddev
->parallel_resync
6225 && mddev2
->curr_resync
6226 && match_mddev_units(mddev
, mddev2
)) {
6228 if (mddev
< mddev2
&& mddev
->curr_resync
== 2) {
6229 /* arbitrarily yield */
6230 mddev
->curr_resync
= 1;
6231 wake_up(&resync_wait
);
6233 if (mddev
> mddev2
&& mddev
->curr_resync
== 1)
6234 /* no need to wait here, we can wait the next
6235 * time 'round when curr_resync == 2
6238 /* We need to wait 'interruptible' so as not to
6239 * contribute to the load average, and not to
6240 * be caught by 'softlockup'
6242 prepare_to_wait(&resync_wait
, &wq
, TASK_INTERRUPTIBLE
);
6243 if (!kthread_should_stop() &&
6244 mddev2
->curr_resync
>= mddev
->curr_resync
) {
6245 printk(KERN_INFO
"md: delaying %s of %s"
6246 " until %s has finished (they"
6247 " share one or more physical units)\n",
6248 desc
, mdname(mddev
), mdname(mddev2
));
6250 if (signal_pending(current
))
6251 flush_signals(current
);
6253 finish_wait(&resync_wait
, &wq
);
6256 finish_wait(&resync_wait
, &wq
);
6259 } while (mddev
->curr_resync
< 2);
6262 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
6263 /* resync follows the size requested by the personality,
6264 * which defaults to physical size, but can be virtual size
6266 max_sectors
= mddev
->resync_max_sectors
;
6267 mddev
->resync_mismatches
= 0;
6268 /* we don't use the checkpoint if there's a bitmap */
6269 if (test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
))
6270 j
= mddev
->resync_min
;
6271 else if (!mddev
->bitmap
)
6272 j
= mddev
->recovery_cp
;
6274 } else if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
6275 max_sectors
= mddev
->dev_sectors
;
6277 /* recovery follows the physical size of devices */
6278 max_sectors
= mddev
->dev_sectors
;
6280 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
6281 if (rdev
->raid_disk
>= 0 &&
6282 !test_bit(Faulty
, &rdev
->flags
) &&
6283 !test_bit(In_sync
, &rdev
->flags
) &&
6284 rdev
->recovery_offset
< j
)
6285 j
= rdev
->recovery_offset
;
6288 printk(KERN_INFO
"md: %s of RAID array %s\n", desc
, mdname(mddev
));
6289 printk(KERN_INFO
"md: minimum _guaranteed_ speed:"
6290 " %d KB/sec/disk.\n", speed_min(mddev
));
6291 printk(KERN_INFO
"md: using maximum available idle IO bandwidth "
6292 "(but not more than %d KB/sec) for %s.\n",
6293 speed_max(mddev
), desc
);
6295 is_mddev_idle(mddev
, 1); /* this initializes IO event counters */
6298 for (m
= 0; m
< SYNC_MARKS
; m
++) {
6300 mark_cnt
[m
] = io_sectors
;
6303 mddev
->resync_mark
= mark
[last_mark
];
6304 mddev
->resync_mark_cnt
= mark_cnt
[last_mark
];
6307 * Tune reconstruction:
6309 window
= 32*(PAGE_SIZE
/512);
6310 printk(KERN_INFO
"md: using %dk window, over a total of %llu blocks.\n",
6311 window
/2,(unsigned long long) max_sectors
/2);
6313 atomic_set(&mddev
->recovery_active
, 0);
6318 "md: resuming %s of %s from checkpoint.\n",
6319 desc
, mdname(mddev
));
6320 mddev
->curr_resync
= j
;
6323 while (j
< max_sectors
) {
6327 if (j
>= mddev
->resync_max
) {
6328 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
6329 wait_event(mddev
->recovery_wait
,
6330 mddev
->resync_max
> j
6331 || kthread_should_stop());
6333 if (kthread_should_stop())
6336 if (mddev
->curr_resync
> mddev
->curr_resync_completed
&&
6337 (mddev
->curr_resync
- mddev
->curr_resync_completed
)
6338 > (max_sectors
>> 4)) {
6339 /* time to update curr_resync_completed */
6340 blk_unplug(mddev
->queue
);
6341 wait_event(mddev
->recovery_wait
,
6342 atomic_read(&mddev
->recovery_active
) == 0);
6343 mddev
->curr_resync_completed
=
6345 set_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
6347 sectors
= mddev
->pers
->sync_request(mddev
, j
, &skipped
,
6348 currspeed
< speed_min(mddev
));
6350 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
6354 if (!skipped
) { /* actual IO requested */
6355 io_sectors
+= sectors
;
6356 atomic_add(sectors
, &mddev
->recovery_active
);
6360 if (j
>1) mddev
->curr_resync
= j
;
6361 mddev
->curr_mark_cnt
= io_sectors
;
6362 if (last_check
== 0)
6363 /* this is the earliers that rebuilt will be
6364 * visible in /proc/mdstat
6366 md_new_event(mddev
);
6368 if (last_check
+ window
> io_sectors
|| j
== max_sectors
)
6371 last_check
= io_sectors
;
6373 if (test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
))
6377 if (time_after_eq(jiffies
, mark
[last_mark
] + SYNC_MARK_STEP
)) {
6379 int next
= (last_mark
+1) % SYNC_MARKS
;
6381 mddev
->resync_mark
= mark
[next
];
6382 mddev
->resync_mark_cnt
= mark_cnt
[next
];
6383 mark
[next
] = jiffies
;
6384 mark_cnt
[next
] = io_sectors
- atomic_read(&mddev
->recovery_active
);
6389 if (kthread_should_stop())
6394 * this loop exits only if either when we are slower than
6395 * the 'hard' speed limit, or the system was IO-idle for
6397 * the system might be non-idle CPU-wise, but we only care
6398 * about not overloading the IO subsystem. (things like an
6399 * e2fsck being done on the RAID array should execute fast)
6401 blk_unplug(mddev
->queue
);
6404 currspeed
= ((unsigned long)(io_sectors
-mddev
->resync_mark_cnt
))/2
6405 /((jiffies
-mddev
->resync_mark
)/HZ
+1) +1;
6407 if (currspeed
> speed_min(mddev
)) {
6408 if ((currspeed
> speed_max(mddev
)) ||
6409 !is_mddev_idle(mddev
, 0)) {
6415 printk(KERN_INFO
"md: %s: %s done.\n",mdname(mddev
), desc
);
6417 * this also signals 'finished resyncing' to md_stop
6420 blk_unplug(mddev
->queue
);
6422 wait_event(mddev
->recovery_wait
, !atomic_read(&mddev
->recovery_active
));
6424 /* tell personality that we are finished */
6425 mddev
->pers
->sync_request(mddev
, max_sectors
, &skipped
, 1);
6427 if (!test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
) &&
6428 mddev
->curr_resync
> 2) {
6429 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
6430 if (test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
)) {
6431 if (mddev
->curr_resync
>= mddev
->recovery_cp
) {
6433 "md: checkpointing %s of %s.\n",
6434 desc
, mdname(mddev
));
6435 mddev
->recovery_cp
= mddev
->curr_resync
;
6438 mddev
->recovery_cp
= MaxSector
;
6440 if (!test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
))
6441 mddev
->curr_resync
= MaxSector
;
6442 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
6443 if (rdev
->raid_disk
>= 0 &&
6444 !test_bit(Faulty
, &rdev
->flags
) &&
6445 !test_bit(In_sync
, &rdev
->flags
) &&
6446 rdev
->recovery_offset
< mddev
->curr_resync
)
6447 rdev
->recovery_offset
= mddev
->curr_resync
;
6450 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
6453 mddev
->curr_resync
= 0;
6454 mddev
->resync_min
= 0;
6455 mddev
->resync_max
= MaxSector
;
6456 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
6457 wake_up(&resync_wait
);
6458 set_bit(MD_RECOVERY_DONE
, &mddev
->recovery
);
6459 md_wakeup_thread(mddev
->thread
);
6464 * got a signal, exit.
6467 "md: md_do_sync() got signal ... exiting\n");
6468 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
6472 EXPORT_SYMBOL_GPL(md_do_sync
);
6475 static int remove_and_add_spares(mddev_t
*mddev
)
6480 mddev
->curr_resync_completed
= 0;
6482 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
6483 if (rdev
->raid_disk
>= 0 &&
6484 !test_bit(Blocked
, &rdev
->flags
) &&
6485 (test_bit(Faulty
, &rdev
->flags
) ||
6486 ! test_bit(In_sync
, &rdev
->flags
)) &&
6487 atomic_read(&rdev
->nr_pending
)==0) {
6488 if (mddev
->pers
->hot_remove_disk(
6489 mddev
, rdev
->raid_disk
)==0) {
6491 sprintf(nm
,"rd%d", rdev
->raid_disk
);
6492 sysfs_remove_link(&mddev
->kobj
, nm
);
6493 rdev
->raid_disk
= -1;
6497 if (mddev
->degraded
&& ! mddev
->ro
&& !mddev
->recovery_disabled
) {
6498 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
6499 if (rdev
->raid_disk
>= 0 &&
6500 !test_bit(In_sync
, &rdev
->flags
) &&
6501 !test_bit(Blocked
, &rdev
->flags
))
6503 if (rdev
->raid_disk
< 0
6504 && !test_bit(Faulty
, &rdev
->flags
)) {
6505 rdev
->recovery_offset
= 0;
6507 hot_add_disk(mddev
, rdev
) == 0) {
6509 sprintf(nm
, "rd%d", rdev
->raid_disk
);
6510 if (sysfs_create_link(&mddev
->kobj
,
6513 "md: cannot register "
6517 md_new_event(mddev
);
6526 * This routine is regularly called by all per-raid-array threads to
6527 * deal with generic issues like resync and super-block update.
6528 * Raid personalities that don't have a thread (linear/raid0) do not
6529 * need this as they never do any recovery or update the superblock.
6531 * It does not do any resync itself, but rather "forks" off other threads
6532 * to do that as needed.
6533 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
6534 * "->recovery" and create a thread at ->sync_thread.
6535 * When the thread finishes it sets MD_RECOVERY_DONE
6536 * and wakeups up this thread which will reap the thread and finish up.
6537 * This thread also removes any faulty devices (with nr_pending == 0).
6539 * The overall approach is:
6540 * 1/ if the superblock needs updating, update it.
6541 * 2/ If a recovery thread is running, don't do anything else.
6542 * 3/ If recovery has finished, clean up, possibly marking spares active.
6543 * 4/ If there are any faulty devices, remove them.
6544 * 5/ If array is degraded, try to add spares devices
6545 * 6/ If array has spares or is not in-sync, start a resync thread.
6547 void md_check_recovery(mddev_t
*mddev
)
6553 bitmap_daemon_work(mddev
->bitmap
);
6558 if (signal_pending(current
)) {
6559 if (mddev
->pers
->sync_request
&& !mddev
->external
) {
6560 printk(KERN_INFO
"md: %s in immediate safe mode\n",
6562 mddev
->safemode
= 2;
6564 flush_signals(current
);
6567 if (mddev
->ro
&& !test_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
))
6570 (mddev
->flags
&& !mddev
->external
) ||
6571 test_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
) ||
6572 test_bit(MD_RECOVERY_DONE
, &mddev
->recovery
) ||
6573 (mddev
->external
== 0 && mddev
->safemode
== 1) ||
6574 (mddev
->safemode
== 2 && ! atomic_read(&mddev
->writes_pending
)
6575 && !mddev
->in_sync
&& mddev
->recovery_cp
== MaxSector
)
6579 if (mddev_trylock(mddev
)) {
6583 /* Only thing we do on a ro array is remove
6586 remove_and_add_spares(mddev
);
6587 clear_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
6591 if (!mddev
->external
) {
6593 spin_lock_irq(&mddev
->write_lock
);
6594 if (mddev
->safemode
&&
6595 !atomic_read(&mddev
->writes_pending
) &&
6597 mddev
->recovery_cp
== MaxSector
) {
6600 if (mddev
->persistent
)
6601 set_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
6603 if (mddev
->safemode
== 1)
6604 mddev
->safemode
= 0;
6605 spin_unlock_irq(&mddev
->write_lock
);
6607 sysfs_notify_dirent(mddev
->sysfs_state
);
6611 md_update_sb(mddev
, 0);
6613 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
6614 if (test_and_clear_bit(StateChanged
, &rdev
->flags
))
6615 sysfs_notify_dirent(rdev
->sysfs_state
);
6618 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
) &&
6619 !test_bit(MD_RECOVERY_DONE
, &mddev
->recovery
)) {
6620 /* resync/recovery still happening */
6621 clear_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
6624 if (mddev
->sync_thread
) {
6625 /* resync has finished, collect result */
6626 md_unregister_thread(mddev
->sync_thread
);
6627 mddev
->sync_thread
= NULL
;
6628 if (!test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
) &&
6629 !test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
)) {
6631 /* activate any spares */
6632 if (mddev
->pers
->spare_active(mddev
))
6633 sysfs_notify(&mddev
->kobj
, NULL
,
6636 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
) &&
6637 mddev
->pers
->finish_reshape
)
6638 mddev
->pers
->finish_reshape(mddev
);
6639 md_update_sb(mddev
, 1);
6641 /* if array is no-longer degraded, then any saved_raid_disk
6642 * information must be scrapped
6644 if (!mddev
->degraded
)
6645 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
6646 rdev
->saved_raid_disk
= -1;
6648 mddev
->recovery
= 0;
6649 /* flag recovery needed just to double check */
6650 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
6651 sysfs_notify_dirent(mddev
->sysfs_action
);
6652 md_new_event(mddev
);
6655 /* Set RUNNING before clearing NEEDED to avoid
6656 * any transients in the value of "sync_action".
6658 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
6659 clear_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
6660 /* Clear some bits that don't mean anything, but
6663 clear_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
6664 clear_bit(MD_RECOVERY_DONE
, &mddev
->recovery
);
6666 if (test_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
))
6668 /* no recovery is running.
6669 * remove any failed drives, then
6670 * add spares if possible.
6671 * Spare are also removed and re-added, to allow
6672 * the personality to fail the re-add.
6675 if (mddev
->reshape_position
!= MaxSector
) {
6676 if (mddev
->pers
->check_reshape(mddev
) != 0)
6677 /* Cannot proceed */
6679 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
6680 clear_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
);
6681 } else if ((spares
= remove_and_add_spares(mddev
))) {
6682 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
6683 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
6684 clear_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
);
6685 set_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
);
6686 } else if (mddev
->recovery_cp
< MaxSector
) {
6687 set_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
6688 clear_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
);
6689 } else if (!test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
))
6690 /* nothing to be done ... */
6693 if (mddev
->pers
->sync_request
) {
6694 if (spares
&& mddev
->bitmap
&& ! mddev
->bitmap
->file
) {
6695 /* We are adding a device or devices to an array
6696 * which has the bitmap stored on all devices.
6697 * So make sure all bitmap pages get written
6699 bitmap_write_all(mddev
->bitmap
);
6701 mddev
->sync_thread
= md_register_thread(md_do_sync
,
6704 if (!mddev
->sync_thread
) {
6705 printk(KERN_ERR
"%s: could not start resync"
6708 /* leave the spares where they are, it shouldn't hurt */
6709 mddev
->recovery
= 0;
6711 md_wakeup_thread(mddev
->sync_thread
);
6712 sysfs_notify_dirent(mddev
->sysfs_action
);
6713 md_new_event(mddev
);
6716 if (!mddev
->sync_thread
) {
6717 clear_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
6718 if (test_and_clear_bit(MD_RECOVERY_RECOVER
,
6720 if (mddev
->sysfs_action
)
6721 sysfs_notify_dirent(mddev
->sysfs_action
);
6723 mddev_unlock(mddev
);
6727 void md_wait_for_blocked_rdev(mdk_rdev_t
*rdev
, mddev_t
*mddev
)
6729 sysfs_notify_dirent(rdev
->sysfs_state
);
6730 wait_event_timeout(rdev
->blocked_wait
,
6731 !test_bit(Blocked
, &rdev
->flags
),
6732 msecs_to_jiffies(5000));
6733 rdev_dec_pending(rdev
, mddev
);
6735 EXPORT_SYMBOL(md_wait_for_blocked_rdev
);
6737 static int md_notify_reboot(struct notifier_block
*this,
6738 unsigned long code
, void *x
)
6740 struct list_head
*tmp
;
6743 if ((code
== SYS_DOWN
) || (code
== SYS_HALT
) || (code
== SYS_POWER_OFF
)) {
6745 printk(KERN_INFO
"md: stopping all md devices.\n");
6747 for_each_mddev(mddev
, tmp
)
6748 if (mddev_trylock(mddev
)) {
6749 /* Force a switch to readonly even array
6750 * appears to still be in use. Hence
6753 do_md_stop(mddev
, 1, 100);
6754 mddev_unlock(mddev
);
6757 * certain more exotic SCSI devices are known to be
6758 * volatile wrt too early system reboots. While the
6759 * right place to handle this issue is the given
6760 * driver, we do want to have a safe RAID driver ...
6767 static struct notifier_block md_notifier
= {
6768 .notifier_call
= md_notify_reboot
,
6770 .priority
= INT_MAX
, /* before any real devices */
6773 static void md_geninit(void)
6775 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t
));
6777 proc_create("mdstat", S_IRUGO
, NULL
, &md_seq_fops
);
6780 static int __init
md_init(void)
6782 if (register_blkdev(MD_MAJOR
, "md"))
6784 if ((mdp_major
=register_blkdev(0, "mdp"))<=0) {
6785 unregister_blkdev(MD_MAJOR
, "md");
6788 blk_register_region(MKDEV(MD_MAJOR
, 0), 1UL<<MINORBITS
, THIS_MODULE
,
6789 md_probe
, NULL
, NULL
);
6790 blk_register_region(MKDEV(mdp_major
, 0), 1UL<<MINORBITS
, THIS_MODULE
,
6791 md_probe
, NULL
, NULL
);
6793 register_reboot_notifier(&md_notifier
);
6794 raid_table_header
= register_sysctl_table(raid_root_table
);
6804 * Searches all registered partitions for autorun RAID arrays
6808 static LIST_HEAD(all_detected_devices
);
6809 struct detected_devices_node
{
6810 struct list_head list
;
6814 void md_autodetect_dev(dev_t dev
)
6816 struct detected_devices_node
*node_detected_dev
;
6818 node_detected_dev
= kzalloc(sizeof(*node_detected_dev
), GFP_KERNEL
);
6819 if (node_detected_dev
) {
6820 node_detected_dev
->dev
= dev
;
6821 list_add_tail(&node_detected_dev
->list
, &all_detected_devices
);
6823 printk(KERN_CRIT
"md: md_autodetect_dev: kzalloc failed"
6824 ", skipping dev(%d,%d)\n", MAJOR(dev
), MINOR(dev
));
6829 static void autostart_arrays(int part
)
6832 struct detected_devices_node
*node_detected_dev
;
6834 int i_scanned
, i_passed
;
6839 printk(KERN_INFO
"md: Autodetecting RAID arrays.\n");
6841 while (!list_empty(&all_detected_devices
) && i_scanned
< INT_MAX
) {
6843 node_detected_dev
= list_entry(all_detected_devices
.next
,
6844 struct detected_devices_node
, list
);
6845 list_del(&node_detected_dev
->list
);
6846 dev
= node_detected_dev
->dev
;
6847 kfree(node_detected_dev
);
6848 rdev
= md_import_device(dev
,0, 90);
6852 if (test_bit(Faulty
, &rdev
->flags
)) {
6856 set_bit(AutoDetected
, &rdev
->flags
);
6857 list_add(&rdev
->same_set
, &pending_raid_disks
);
6861 printk(KERN_INFO
"md: Scanned %d and added %d devices.\n",
6862 i_scanned
, i_passed
);
6864 autorun_devices(part
);
6867 #endif /* !MODULE */
6869 static __exit
void md_exit(void)
6872 struct list_head
*tmp
;
6874 blk_unregister_region(MKDEV(MD_MAJOR
,0), 1U << MINORBITS
);
6875 blk_unregister_region(MKDEV(mdp_major
,0), 1U << MINORBITS
);
6877 unregister_blkdev(MD_MAJOR
,"md");
6878 unregister_blkdev(mdp_major
, "mdp");
6879 unregister_reboot_notifier(&md_notifier
);
6880 unregister_sysctl_table(raid_table_header
);
6881 remove_proc_entry("mdstat", NULL
);
6882 for_each_mddev(mddev
, tmp
) {
6883 export_array(mddev
);
6884 mddev
->hold_active
= 0;
6888 subsys_initcall(md_init
);
6889 module_exit(md_exit
)
6891 static int get_ro(char *buffer
, struct kernel_param
*kp
)
6893 return sprintf(buffer
, "%d", start_readonly
);
6895 static int set_ro(const char *val
, struct kernel_param
*kp
)
6898 int num
= simple_strtoul(val
, &e
, 10);
6899 if (*val
&& (*e
== '\0' || *e
== '\n')) {
6900 start_readonly
= num
;
6906 module_param_call(start_ro
, set_ro
, get_ro
, NULL
, S_IRUSR
|S_IWUSR
);
6907 module_param(start_dirty_degraded
, int, S_IRUGO
|S_IWUSR
);
6909 module_param_call(new_array
, add_named_array
, NULL
, NULL
, S_IWUSR
);
6911 EXPORT_SYMBOL(register_md_personality
);
6912 EXPORT_SYMBOL(unregister_md_personality
);
6913 EXPORT_SYMBOL(md_error
);
6914 EXPORT_SYMBOL(md_done_sync
);
6915 EXPORT_SYMBOL(md_write_start
);
6916 EXPORT_SYMBOL(md_write_end
);
6917 EXPORT_SYMBOL(md_register_thread
);
6918 EXPORT_SYMBOL(md_unregister_thread
);
6919 EXPORT_SYMBOL(md_wakeup_thread
);
6920 EXPORT_SYMBOL(md_check_recovery
);
6921 MODULE_LICENSE("GPL");
6923 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR
);