2 md.c : Multiple Devices driver for Linux
3 Copyright (C) 1998, 1999, 2000 Ingo Molnar
5 completely rewritten, based on the MD driver code from Marc Zyngier
9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13 - kmod support by: Cyrus Durgin
14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17 - lots of fixes and improvements to the RAID1/RAID5 and generic
18 RAID code (such as request based resynchronization):
20 Neil Brown <neilb@cse.unsw.edu.au>.
22 - persistent bitmap code
23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25 This program is free software; you can redistribute it and/or modify
26 it under the terms of the GNU General Public License as published by
27 the Free Software Foundation; either version 2, or (at your option)
30 You should have received a copy of the GNU General Public License
31 (for example /usr/src/linux/COPYING); if not, write to the Free
32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 #include <linux/kthread.h>
36 #include <linux/raid/md.h>
37 #include <linux/raid/bitmap.h>
38 #include <linux/sysctl.h>
39 #include <linux/buffer_head.h> /* for invalidate_bdev */
40 #include <linux/poll.h>
41 #include <linux/ctype.h>
42 #include <linux/hdreg.h>
43 #include <linux/proc_fs.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
46 #include <linux/file.h>
48 #define MAJOR_NR MD_MAJOR
50 /* 63 partitions with the alternate major number (mdp) */
51 #define MdpMinorShift 6
54 #define dprintk(x...) ((void)(DEBUG && printk(x)))
58 static void autostart_arrays(int part
);
61 static LIST_HEAD(pers_list
);
62 static DEFINE_SPINLOCK(pers_lock
);
64 static void md_print_devices(void);
66 static DECLARE_WAIT_QUEUE_HEAD(resync_wait
);
68 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
71 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
72 * is 1000 KB/sec, so the extra system load does not show up that much.
73 * Increase it if you want to have more _guaranteed_ speed. Note that
74 * the RAID driver will use the maximum available bandwidth if the IO
75 * subsystem is idle. There is also an 'absolute maximum' reconstruction
76 * speed limit - in case reconstruction slows down your system despite
79 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
80 * or /sys/block/mdX/md/sync_speed_{min,max}
83 static int sysctl_speed_limit_min
= 1000;
84 static int sysctl_speed_limit_max
= 200000;
85 static inline int speed_min(mddev_t
*mddev
)
87 return mddev
->sync_speed_min
?
88 mddev
->sync_speed_min
: sysctl_speed_limit_min
;
91 static inline int speed_max(mddev_t
*mddev
)
93 return mddev
->sync_speed_max
?
94 mddev
->sync_speed_max
: sysctl_speed_limit_max
;
97 static struct ctl_table_header
*raid_table_header
;
99 static ctl_table raid_table
[] = {
101 .ctl_name
= DEV_RAID_SPEED_LIMIT_MIN
,
102 .procname
= "speed_limit_min",
103 .data
= &sysctl_speed_limit_min
,
104 .maxlen
= sizeof(int),
105 .mode
= S_IRUGO
|S_IWUSR
,
106 .proc_handler
= &proc_dointvec
,
109 .ctl_name
= DEV_RAID_SPEED_LIMIT_MAX
,
110 .procname
= "speed_limit_max",
111 .data
= &sysctl_speed_limit_max
,
112 .maxlen
= sizeof(int),
113 .mode
= S_IRUGO
|S_IWUSR
,
114 .proc_handler
= &proc_dointvec
,
119 static ctl_table raid_dir_table
[] = {
121 .ctl_name
= DEV_RAID
,
124 .mode
= S_IRUGO
|S_IXUGO
,
130 static ctl_table raid_root_table
[] = {
136 .child
= raid_dir_table
,
141 static struct block_device_operations md_fops
;
143 static int start_readonly
;
146 * We have a system wide 'event count' that is incremented
147 * on any 'interesting' event, and readers of /proc/mdstat
148 * can use 'poll' or 'select' to find out when the event
152 * start array, stop array, error, add device, remove device,
153 * start build, activate spare
155 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters
);
156 static atomic_t md_event_count
;
157 void md_new_event(mddev_t
*mddev
)
159 atomic_inc(&md_event_count
);
160 wake_up(&md_event_waiters
);
162 EXPORT_SYMBOL_GPL(md_new_event
);
164 /* Alternate version that can be called from interrupts
165 * when calling sysfs_notify isn't needed.
167 static void md_new_event_inintr(mddev_t
*mddev
)
169 atomic_inc(&md_event_count
);
170 wake_up(&md_event_waiters
);
174 * Enables to iterate over all existing md arrays
175 * all_mddevs_lock protects this list.
177 static LIST_HEAD(all_mddevs
);
178 static DEFINE_SPINLOCK(all_mddevs_lock
);
182 * iterates through all used mddevs in the system.
183 * We take care to grab the all_mddevs_lock whenever navigating
184 * the list, and to always hold a refcount when unlocked.
185 * Any code which breaks out of this loop while own
186 * a reference to the current mddev and must mddev_put it.
188 #define for_each_mddev(mddev,tmp) \
190 for (({ spin_lock(&all_mddevs_lock); \
191 tmp = all_mddevs.next; \
193 ({ if (tmp != &all_mddevs) \
194 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
195 spin_unlock(&all_mddevs_lock); \
196 if (mddev) mddev_put(mddev); \
197 mddev = list_entry(tmp, mddev_t, all_mddevs); \
198 tmp != &all_mddevs;}); \
199 ({ spin_lock(&all_mddevs_lock); \
204 static int md_fail_request(struct request_queue
*q
, struct bio
*bio
)
210 static inline mddev_t
*mddev_get(mddev_t
*mddev
)
212 atomic_inc(&mddev
->active
);
216 static void mddev_put(mddev_t
*mddev
)
218 if (!atomic_dec_and_lock(&mddev
->active
, &all_mddevs_lock
))
220 if (!mddev
->raid_disks
&& list_empty(&mddev
->disks
)) {
221 list_del(&mddev
->all_mddevs
);
222 spin_unlock(&all_mddevs_lock
);
223 blk_cleanup_queue(mddev
->queue
);
224 kobject_put(&mddev
->kobj
);
226 spin_unlock(&all_mddevs_lock
);
229 static mddev_t
* mddev_find(dev_t unit
)
231 mddev_t
*mddev
, *new = NULL
;
234 spin_lock(&all_mddevs_lock
);
235 list_for_each_entry(mddev
, &all_mddevs
, all_mddevs
)
236 if (mddev
->unit
== unit
) {
238 spin_unlock(&all_mddevs_lock
);
244 list_add(&new->all_mddevs
, &all_mddevs
);
245 spin_unlock(&all_mddevs_lock
);
248 spin_unlock(&all_mddevs_lock
);
250 new = kzalloc(sizeof(*new), GFP_KERNEL
);
255 if (MAJOR(unit
) == MD_MAJOR
)
256 new->md_minor
= MINOR(unit
);
258 new->md_minor
= MINOR(unit
) >> MdpMinorShift
;
260 mutex_init(&new->reconfig_mutex
);
261 INIT_LIST_HEAD(&new->disks
);
262 INIT_LIST_HEAD(&new->all_mddevs
);
263 init_timer(&new->safemode_timer
);
264 atomic_set(&new->active
, 1);
265 atomic_set(&new->openers
, 0);
266 spin_lock_init(&new->write_lock
);
267 init_waitqueue_head(&new->sb_wait
);
268 init_waitqueue_head(&new->recovery_wait
);
269 new->reshape_position
= MaxSector
;
271 new->resync_max
= MaxSector
;
272 new->level
= LEVEL_NONE
;
274 new->queue
= blk_alloc_queue(GFP_KERNEL
);
279 /* Can be unlocked because the queue is new: no concurrency */
280 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER
, new->queue
);
282 blk_queue_make_request(new->queue
, md_fail_request
);
287 static inline int mddev_lock(mddev_t
* mddev
)
289 return mutex_lock_interruptible(&mddev
->reconfig_mutex
);
292 static inline int mddev_trylock(mddev_t
* mddev
)
294 return mutex_trylock(&mddev
->reconfig_mutex
);
297 static inline void mddev_unlock(mddev_t
* mddev
)
299 mutex_unlock(&mddev
->reconfig_mutex
);
301 md_wakeup_thread(mddev
->thread
);
304 static mdk_rdev_t
* find_rdev_nr(mddev_t
*mddev
, int nr
)
307 struct list_head
*tmp
;
309 rdev_for_each(rdev
, tmp
, mddev
) {
310 if (rdev
->desc_nr
== nr
)
316 static mdk_rdev_t
* find_rdev(mddev_t
* mddev
, dev_t dev
)
318 struct list_head
*tmp
;
321 rdev_for_each(rdev
, tmp
, mddev
) {
322 if (rdev
->bdev
->bd_dev
== dev
)
328 static struct mdk_personality
*find_pers(int level
, char *clevel
)
330 struct mdk_personality
*pers
;
331 list_for_each_entry(pers
, &pers_list
, list
) {
332 if (level
!= LEVEL_NONE
&& pers
->level
== level
)
334 if (strcmp(pers
->name
, clevel
)==0)
340 /* return the offset of the super block in 512byte sectors */
341 static inline sector_t
calc_dev_sboffset(struct block_device
*bdev
)
343 sector_t num_sectors
= bdev
->bd_inode
->i_size
/ 512;
344 return MD_NEW_SIZE_SECTORS(num_sectors
);
347 static sector_t
calc_num_sectors(mdk_rdev_t
*rdev
, unsigned chunk_size
)
349 sector_t num_sectors
= rdev
->sb_start
;
352 num_sectors
&= ~((sector_t
)chunk_size
/512 - 1);
356 static int alloc_disk_sb(mdk_rdev_t
* rdev
)
361 rdev
->sb_page
= alloc_page(GFP_KERNEL
);
362 if (!rdev
->sb_page
) {
363 printk(KERN_ALERT
"md: out of memory.\n");
370 static void free_disk_sb(mdk_rdev_t
* rdev
)
373 put_page(rdev
->sb_page
);
375 rdev
->sb_page
= NULL
;
382 static void super_written(struct bio
*bio
, int error
)
384 mdk_rdev_t
*rdev
= bio
->bi_private
;
385 mddev_t
*mddev
= rdev
->mddev
;
387 if (error
|| !test_bit(BIO_UPTODATE
, &bio
->bi_flags
)) {
388 printk("md: super_written gets error=%d, uptodate=%d\n",
389 error
, test_bit(BIO_UPTODATE
, &bio
->bi_flags
));
390 WARN_ON(test_bit(BIO_UPTODATE
, &bio
->bi_flags
));
391 md_error(mddev
, rdev
);
394 if (atomic_dec_and_test(&mddev
->pending_writes
))
395 wake_up(&mddev
->sb_wait
);
399 static void super_written_barrier(struct bio
*bio
, int error
)
401 struct bio
*bio2
= bio
->bi_private
;
402 mdk_rdev_t
*rdev
= bio2
->bi_private
;
403 mddev_t
*mddev
= rdev
->mddev
;
405 if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
) &&
406 error
== -EOPNOTSUPP
) {
408 /* barriers don't appear to be supported :-( */
409 set_bit(BarriersNotsupp
, &rdev
->flags
);
410 mddev
->barriers_work
= 0;
411 spin_lock_irqsave(&mddev
->write_lock
, flags
);
412 bio2
->bi_next
= mddev
->biolist
;
413 mddev
->biolist
= bio2
;
414 spin_unlock_irqrestore(&mddev
->write_lock
, flags
);
415 wake_up(&mddev
->sb_wait
);
419 bio
->bi_private
= rdev
;
420 super_written(bio
, error
);
424 void md_super_write(mddev_t
*mddev
, mdk_rdev_t
*rdev
,
425 sector_t sector
, int size
, struct page
*page
)
427 /* write first size bytes of page to sector of rdev
428 * Increment mddev->pending_writes before returning
429 * and decrement it on completion, waking up sb_wait
430 * if zero is reached.
431 * If an error occurred, call md_error
433 * As we might need to resubmit the request if BIO_RW_BARRIER
434 * causes ENOTSUPP, we allocate a spare bio...
436 struct bio
*bio
= bio_alloc(GFP_NOIO
, 1);
437 int rw
= (1<<BIO_RW
) | (1<<BIO_RW_SYNC
);
439 bio
->bi_bdev
= rdev
->bdev
;
440 bio
->bi_sector
= sector
;
441 bio_add_page(bio
, page
, size
, 0);
442 bio
->bi_private
= rdev
;
443 bio
->bi_end_io
= super_written
;
446 atomic_inc(&mddev
->pending_writes
);
447 if (!test_bit(BarriersNotsupp
, &rdev
->flags
)) {
449 rw
|= (1<<BIO_RW_BARRIER
);
450 rbio
= bio_clone(bio
, GFP_NOIO
);
451 rbio
->bi_private
= bio
;
452 rbio
->bi_end_io
= super_written_barrier
;
453 submit_bio(rw
, rbio
);
458 void md_super_wait(mddev_t
*mddev
)
460 /* wait for all superblock writes that were scheduled to complete.
461 * if any had to be retried (due to BARRIER problems), retry them
465 prepare_to_wait(&mddev
->sb_wait
, &wq
, TASK_UNINTERRUPTIBLE
);
466 if (atomic_read(&mddev
->pending_writes
)==0)
468 while (mddev
->biolist
) {
470 spin_lock_irq(&mddev
->write_lock
);
471 bio
= mddev
->biolist
;
472 mddev
->biolist
= bio
->bi_next
;
474 spin_unlock_irq(&mddev
->write_lock
);
475 submit_bio(bio
->bi_rw
, bio
);
479 finish_wait(&mddev
->sb_wait
, &wq
);
482 static void bi_complete(struct bio
*bio
, int error
)
484 complete((struct completion
*)bio
->bi_private
);
487 int sync_page_io(struct block_device
*bdev
, sector_t sector
, int size
,
488 struct page
*page
, int rw
)
490 struct bio
*bio
= bio_alloc(GFP_NOIO
, 1);
491 struct completion event
;
494 rw
|= (1 << BIO_RW_SYNC
);
497 bio
->bi_sector
= sector
;
498 bio_add_page(bio
, page
, size
, 0);
499 init_completion(&event
);
500 bio
->bi_private
= &event
;
501 bio
->bi_end_io
= bi_complete
;
503 wait_for_completion(&event
);
505 ret
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
509 EXPORT_SYMBOL_GPL(sync_page_io
);
511 static int read_disk_sb(mdk_rdev_t
* rdev
, int size
)
513 char b
[BDEVNAME_SIZE
];
514 if (!rdev
->sb_page
) {
522 if (!sync_page_io(rdev
->bdev
, rdev
->sb_start
, size
, rdev
->sb_page
, READ
))
528 printk(KERN_WARNING
"md: disabled device %s, could not read superblock.\n",
529 bdevname(rdev
->bdev
,b
));
533 static int uuid_equal(mdp_super_t
*sb1
, mdp_super_t
*sb2
)
535 return sb1
->set_uuid0
== sb2
->set_uuid0
&&
536 sb1
->set_uuid1
== sb2
->set_uuid1
&&
537 sb1
->set_uuid2
== sb2
->set_uuid2
&&
538 sb1
->set_uuid3
== sb2
->set_uuid3
;
541 static int sb_equal(mdp_super_t
*sb1
, mdp_super_t
*sb2
)
544 mdp_super_t
*tmp1
, *tmp2
;
546 tmp1
= kmalloc(sizeof(*tmp1
),GFP_KERNEL
);
547 tmp2
= kmalloc(sizeof(*tmp2
),GFP_KERNEL
);
549 if (!tmp1
|| !tmp2
) {
551 printk(KERN_INFO
"md.c sb_equal(): failed to allocate memory!\n");
559 * nr_disks is not constant
564 ret
= (memcmp(tmp1
, tmp2
, MD_SB_GENERIC_CONSTANT_WORDS
* 4) == 0);
572 static u32
md_csum_fold(u32 csum
)
574 csum
= (csum
& 0xffff) + (csum
>> 16);
575 return (csum
& 0xffff) + (csum
>> 16);
578 static unsigned int calc_sb_csum(mdp_super_t
* sb
)
581 u32
*sb32
= (u32
*)sb
;
583 unsigned int disk_csum
, csum
;
585 disk_csum
= sb
->sb_csum
;
588 for (i
= 0; i
< MD_SB_BYTES
/4 ; i
++)
590 csum
= (newcsum
& 0xffffffff) + (newcsum
>>32);
594 /* This used to use csum_partial, which was wrong for several
595 * reasons including that different results are returned on
596 * different architectures. It isn't critical that we get exactly
597 * the same return value as before (we always csum_fold before
598 * testing, and that removes any differences). However as we
599 * know that csum_partial always returned a 16bit value on
600 * alphas, do a fold to maximise conformity to previous behaviour.
602 sb
->sb_csum
= md_csum_fold(disk_csum
);
604 sb
->sb_csum
= disk_csum
;
611 * Handle superblock details.
612 * We want to be able to handle multiple superblock formats
613 * so we have a common interface to them all, and an array of
614 * different handlers.
615 * We rely on user-space to write the initial superblock, and support
616 * reading and updating of superblocks.
617 * Interface methods are:
618 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
619 * loads and validates a superblock on dev.
620 * if refdev != NULL, compare superblocks on both devices
622 * 0 - dev has a superblock that is compatible with refdev
623 * 1 - dev has a superblock that is compatible and newer than refdev
624 * so dev should be used as the refdev in future
625 * -EINVAL superblock incompatible or invalid
626 * -othererror e.g. -EIO
628 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
629 * Verify that dev is acceptable into mddev.
630 * The first time, mddev->raid_disks will be 0, and data from
631 * dev should be merged in. Subsequent calls check that dev
632 * is new enough. Return 0 or -EINVAL
634 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
635 * Update the superblock for rdev with data in mddev
636 * This does not write to disc.
642 struct module
*owner
;
643 int (*load_super
)(mdk_rdev_t
*rdev
, mdk_rdev_t
*refdev
,
645 int (*validate_super
)(mddev_t
*mddev
, mdk_rdev_t
*rdev
);
646 void (*sync_super
)(mddev_t
*mddev
, mdk_rdev_t
*rdev
);
647 unsigned long long (*rdev_size_change
)(mdk_rdev_t
*rdev
,
648 sector_t num_sectors
);
652 * load_super for 0.90.0
654 static int super_90_load(mdk_rdev_t
*rdev
, mdk_rdev_t
*refdev
, int minor_version
)
656 char b
[BDEVNAME_SIZE
], b2
[BDEVNAME_SIZE
];
661 * Calculate the position of the superblock (512byte sectors),
662 * it's at the end of the disk.
664 * It also happens to be a multiple of 4Kb.
666 rdev
->sb_start
= calc_dev_sboffset(rdev
->bdev
);
668 ret
= read_disk_sb(rdev
, MD_SB_BYTES
);
673 bdevname(rdev
->bdev
, b
);
674 sb
= (mdp_super_t
*)page_address(rdev
->sb_page
);
676 if (sb
->md_magic
!= MD_SB_MAGIC
) {
677 printk(KERN_ERR
"md: invalid raid superblock magic on %s\n",
682 if (sb
->major_version
!= 0 ||
683 sb
->minor_version
< 90 ||
684 sb
->minor_version
> 91) {
685 printk(KERN_WARNING
"Bad version number %d.%d on %s\n",
686 sb
->major_version
, sb
->minor_version
,
691 if (sb
->raid_disks
<= 0)
694 if (md_csum_fold(calc_sb_csum(sb
)) != md_csum_fold(sb
->sb_csum
)) {
695 printk(KERN_WARNING
"md: invalid superblock checksum on %s\n",
700 rdev
->preferred_minor
= sb
->md_minor
;
701 rdev
->data_offset
= 0;
702 rdev
->sb_size
= MD_SB_BYTES
;
704 if (sb
->state
& (1<<MD_SB_BITMAP_PRESENT
)) {
705 if (sb
->level
!= 1 && sb
->level
!= 4
706 && sb
->level
!= 5 && sb
->level
!= 6
707 && sb
->level
!= 10) {
708 /* FIXME use a better test */
710 "md: bitmaps not supported for this level.\n");
715 if (sb
->level
== LEVEL_MULTIPATH
)
718 rdev
->desc_nr
= sb
->this_disk
.number
;
724 mdp_super_t
*refsb
= (mdp_super_t
*)page_address(refdev
->sb_page
);
725 if (!uuid_equal(refsb
, sb
)) {
726 printk(KERN_WARNING
"md: %s has different UUID to %s\n",
727 b
, bdevname(refdev
->bdev
,b2
));
730 if (!sb_equal(refsb
, sb
)) {
731 printk(KERN_WARNING
"md: %s has same UUID"
732 " but different superblock to %s\n",
733 b
, bdevname(refdev
->bdev
, b2
));
737 ev2
= md_event(refsb
);
743 rdev
->size
= calc_num_sectors(rdev
, sb
->chunk_size
) / 2;
745 if (rdev
->size
< sb
->size
&& sb
->level
> 1)
746 /* "this cannot possibly happen" ... */
754 * validate_super for 0.90.0
756 static int super_90_validate(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
759 mdp_super_t
*sb
= (mdp_super_t
*)page_address(rdev
->sb_page
);
760 __u64 ev1
= md_event(sb
);
762 rdev
->raid_disk
= -1;
763 clear_bit(Faulty
, &rdev
->flags
);
764 clear_bit(In_sync
, &rdev
->flags
);
765 clear_bit(WriteMostly
, &rdev
->flags
);
766 clear_bit(BarriersNotsupp
, &rdev
->flags
);
768 if (mddev
->raid_disks
== 0) {
769 mddev
->major_version
= 0;
770 mddev
->minor_version
= sb
->minor_version
;
771 mddev
->patch_version
= sb
->patch_version
;
773 mddev
->chunk_size
= sb
->chunk_size
;
774 mddev
->ctime
= sb
->ctime
;
775 mddev
->utime
= sb
->utime
;
776 mddev
->level
= sb
->level
;
777 mddev
->clevel
[0] = 0;
778 mddev
->layout
= sb
->layout
;
779 mddev
->raid_disks
= sb
->raid_disks
;
780 mddev
->size
= sb
->size
;
782 mddev
->bitmap_offset
= 0;
783 mddev
->default_bitmap_offset
= MD_SB_BYTES
>> 9;
785 if (mddev
->minor_version
>= 91) {
786 mddev
->reshape_position
= sb
->reshape_position
;
787 mddev
->delta_disks
= sb
->delta_disks
;
788 mddev
->new_level
= sb
->new_level
;
789 mddev
->new_layout
= sb
->new_layout
;
790 mddev
->new_chunk
= sb
->new_chunk
;
792 mddev
->reshape_position
= MaxSector
;
793 mddev
->delta_disks
= 0;
794 mddev
->new_level
= mddev
->level
;
795 mddev
->new_layout
= mddev
->layout
;
796 mddev
->new_chunk
= mddev
->chunk_size
;
799 if (sb
->state
& (1<<MD_SB_CLEAN
))
800 mddev
->recovery_cp
= MaxSector
;
802 if (sb
->events_hi
== sb
->cp_events_hi
&&
803 sb
->events_lo
== sb
->cp_events_lo
) {
804 mddev
->recovery_cp
= sb
->recovery_cp
;
806 mddev
->recovery_cp
= 0;
809 memcpy(mddev
->uuid
+0, &sb
->set_uuid0
, 4);
810 memcpy(mddev
->uuid
+4, &sb
->set_uuid1
, 4);
811 memcpy(mddev
->uuid
+8, &sb
->set_uuid2
, 4);
812 memcpy(mddev
->uuid
+12,&sb
->set_uuid3
, 4);
814 mddev
->max_disks
= MD_SB_DISKS
;
816 if (sb
->state
& (1<<MD_SB_BITMAP_PRESENT
) &&
817 mddev
->bitmap_file
== NULL
)
818 mddev
->bitmap_offset
= mddev
->default_bitmap_offset
;
820 } else if (mddev
->pers
== NULL
) {
821 /* Insist on good event counter while assembling */
823 if (ev1
< mddev
->events
)
825 } else if (mddev
->bitmap
) {
826 /* if adding to array with a bitmap, then we can accept an
827 * older device ... but not too old.
829 if (ev1
< mddev
->bitmap
->events_cleared
)
832 if (ev1
< mddev
->events
)
833 /* just a hot-add of a new device, leave raid_disk at -1 */
837 if (mddev
->level
!= LEVEL_MULTIPATH
) {
838 desc
= sb
->disks
+ rdev
->desc_nr
;
840 if (desc
->state
& (1<<MD_DISK_FAULTY
))
841 set_bit(Faulty
, &rdev
->flags
);
842 else if (desc
->state
& (1<<MD_DISK_SYNC
) /* &&
843 desc->raid_disk < mddev->raid_disks */) {
844 set_bit(In_sync
, &rdev
->flags
);
845 rdev
->raid_disk
= desc
->raid_disk
;
847 if (desc
->state
& (1<<MD_DISK_WRITEMOSTLY
))
848 set_bit(WriteMostly
, &rdev
->flags
);
849 } else /* MULTIPATH are always insync */
850 set_bit(In_sync
, &rdev
->flags
);
855 * sync_super for 0.90.0
857 static void super_90_sync(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
860 struct list_head
*tmp
;
862 int next_spare
= mddev
->raid_disks
;
865 /* make rdev->sb match mddev data..
868 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
869 * 3/ any empty disks < next_spare become removed
871 * disks[0] gets initialised to REMOVED because
872 * we cannot be sure from other fields if it has
873 * been initialised or not.
876 int active
=0, working
=0,failed
=0,spare
=0,nr_disks
=0;
878 rdev
->sb_size
= MD_SB_BYTES
;
880 sb
= (mdp_super_t
*)page_address(rdev
->sb_page
);
882 memset(sb
, 0, sizeof(*sb
));
884 sb
->md_magic
= MD_SB_MAGIC
;
885 sb
->major_version
= mddev
->major_version
;
886 sb
->patch_version
= mddev
->patch_version
;
887 sb
->gvalid_words
= 0; /* ignored */
888 memcpy(&sb
->set_uuid0
, mddev
->uuid
+0, 4);
889 memcpy(&sb
->set_uuid1
, mddev
->uuid
+4, 4);
890 memcpy(&sb
->set_uuid2
, mddev
->uuid
+8, 4);
891 memcpy(&sb
->set_uuid3
, mddev
->uuid
+12,4);
893 sb
->ctime
= mddev
->ctime
;
894 sb
->level
= mddev
->level
;
895 sb
->size
= mddev
->size
;
896 sb
->raid_disks
= mddev
->raid_disks
;
897 sb
->md_minor
= mddev
->md_minor
;
898 sb
->not_persistent
= 0;
899 sb
->utime
= mddev
->utime
;
901 sb
->events_hi
= (mddev
->events
>>32);
902 sb
->events_lo
= (u32
)mddev
->events
;
904 if (mddev
->reshape_position
== MaxSector
)
905 sb
->minor_version
= 90;
907 sb
->minor_version
= 91;
908 sb
->reshape_position
= mddev
->reshape_position
;
909 sb
->new_level
= mddev
->new_level
;
910 sb
->delta_disks
= mddev
->delta_disks
;
911 sb
->new_layout
= mddev
->new_layout
;
912 sb
->new_chunk
= mddev
->new_chunk
;
914 mddev
->minor_version
= sb
->minor_version
;
917 sb
->recovery_cp
= mddev
->recovery_cp
;
918 sb
->cp_events_hi
= (mddev
->events
>>32);
919 sb
->cp_events_lo
= (u32
)mddev
->events
;
920 if (mddev
->recovery_cp
== MaxSector
)
921 sb
->state
= (1<< MD_SB_CLEAN
);
925 sb
->layout
= mddev
->layout
;
926 sb
->chunk_size
= mddev
->chunk_size
;
928 if (mddev
->bitmap
&& mddev
->bitmap_file
== NULL
)
929 sb
->state
|= (1<<MD_SB_BITMAP_PRESENT
);
931 sb
->disks
[0].state
= (1<<MD_DISK_REMOVED
);
932 rdev_for_each(rdev2
, tmp
, mddev
) {
935 if (rdev2
->raid_disk
>= 0 && test_bit(In_sync
, &rdev2
->flags
)
936 && !test_bit(Faulty
, &rdev2
->flags
))
937 desc_nr
= rdev2
->raid_disk
;
939 desc_nr
= next_spare
++;
940 rdev2
->desc_nr
= desc_nr
;
941 d
= &sb
->disks
[rdev2
->desc_nr
];
943 d
->number
= rdev2
->desc_nr
;
944 d
->major
= MAJOR(rdev2
->bdev
->bd_dev
);
945 d
->minor
= MINOR(rdev2
->bdev
->bd_dev
);
946 if (rdev2
->raid_disk
>= 0 && test_bit(In_sync
, &rdev2
->flags
)
947 && !test_bit(Faulty
, &rdev2
->flags
))
948 d
->raid_disk
= rdev2
->raid_disk
;
950 d
->raid_disk
= rdev2
->desc_nr
; /* compatibility */
951 if (test_bit(Faulty
, &rdev2
->flags
))
952 d
->state
= (1<<MD_DISK_FAULTY
);
953 else if (test_bit(In_sync
, &rdev2
->flags
)) {
954 d
->state
= (1<<MD_DISK_ACTIVE
);
955 d
->state
|= (1<<MD_DISK_SYNC
);
963 if (test_bit(WriteMostly
, &rdev2
->flags
))
964 d
->state
|= (1<<MD_DISK_WRITEMOSTLY
);
966 /* now set the "removed" and "faulty" bits on any missing devices */
967 for (i
=0 ; i
< mddev
->raid_disks
; i
++) {
968 mdp_disk_t
*d
= &sb
->disks
[i
];
969 if (d
->state
== 0 && d
->number
== 0) {
972 d
->state
= (1<<MD_DISK_REMOVED
);
973 d
->state
|= (1<<MD_DISK_FAULTY
);
977 sb
->nr_disks
= nr_disks
;
978 sb
->active_disks
= active
;
979 sb
->working_disks
= working
;
980 sb
->failed_disks
= failed
;
981 sb
->spare_disks
= spare
;
983 sb
->this_disk
= sb
->disks
[rdev
->desc_nr
];
984 sb
->sb_csum
= calc_sb_csum(sb
);
988 * rdev_size_change for 0.90.0
990 static unsigned long long
991 super_90_rdev_size_change(mdk_rdev_t
*rdev
, sector_t num_sectors
)
993 if (num_sectors
&& num_sectors
< rdev
->mddev
->size
* 2)
994 return 0; /* component must fit device */
995 if (rdev
->mddev
->bitmap_offset
)
996 return 0; /* can't move bitmap */
997 rdev
->sb_start
= calc_dev_sboffset(rdev
->bdev
);
998 if (!num_sectors
|| num_sectors
> rdev
->sb_start
)
999 num_sectors
= rdev
->sb_start
;
1000 md_super_write(rdev
->mddev
, rdev
, rdev
->sb_start
, rdev
->sb_size
,
1002 md_super_wait(rdev
->mddev
);
1003 return num_sectors
/ 2; /* kB for sysfs */
1008 * version 1 superblock
1011 static __le32
calc_sb_1_csum(struct mdp_superblock_1
* sb
)
1015 unsigned long long newcsum
;
1016 int size
= 256 + le32_to_cpu(sb
->max_dev
)*2;
1017 __le32
*isuper
= (__le32
*)sb
;
1020 disk_csum
= sb
->sb_csum
;
1023 for (i
=0; size
>=4; size
-= 4 )
1024 newcsum
+= le32_to_cpu(*isuper
++);
1027 newcsum
+= le16_to_cpu(*(__le16
*) isuper
);
1029 csum
= (newcsum
& 0xffffffff) + (newcsum
>> 32);
1030 sb
->sb_csum
= disk_csum
;
1031 return cpu_to_le32(csum
);
1034 static int super_1_load(mdk_rdev_t
*rdev
, mdk_rdev_t
*refdev
, int minor_version
)
1036 struct mdp_superblock_1
*sb
;
1039 char b
[BDEVNAME_SIZE
], b2
[BDEVNAME_SIZE
];
1043 * Calculate the position of the superblock in 512byte sectors.
1044 * It is always aligned to a 4K boundary and
1045 * depeding on minor_version, it can be:
1046 * 0: At least 8K, but less than 12K, from end of device
1047 * 1: At start of device
1048 * 2: 4K from start of device.
1050 switch(minor_version
) {
1052 sb_start
= rdev
->bdev
->bd_inode
->i_size
>> 9;
1054 sb_start
&= ~(sector_t
)(4*2-1);
1065 rdev
->sb_start
= sb_start
;
1067 /* superblock is rarely larger than 1K, but it can be larger,
1068 * and it is safe to read 4k, so we do that
1070 ret
= read_disk_sb(rdev
, 4096);
1071 if (ret
) return ret
;
1074 sb
= (struct mdp_superblock_1
*)page_address(rdev
->sb_page
);
1076 if (sb
->magic
!= cpu_to_le32(MD_SB_MAGIC
) ||
1077 sb
->major_version
!= cpu_to_le32(1) ||
1078 le32_to_cpu(sb
->max_dev
) > (4096-256)/2 ||
1079 le64_to_cpu(sb
->super_offset
) != rdev
->sb_start
||
1080 (le32_to_cpu(sb
->feature_map
) & ~MD_FEATURE_ALL
) != 0)
1083 if (calc_sb_1_csum(sb
) != sb
->sb_csum
) {
1084 printk("md: invalid superblock checksum on %s\n",
1085 bdevname(rdev
->bdev
,b
));
1088 if (le64_to_cpu(sb
->data_size
) < 10) {
1089 printk("md: data_size too small on %s\n",
1090 bdevname(rdev
->bdev
,b
));
1093 if ((le32_to_cpu(sb
->feature_map
) & MD_FEATURE_BITMAP_OFFSET
)) {
1094 if (sb
->level
!= cpu_to_le32(1) &&
1095 sb
->level
!= cpu_to_le32(4) &&
1096 sb
->level
!= cpu_to_le32(5) &&
1097 sb
->level
!= cpu_to_le32(6) &&
1098 sb
->level
!= cpu_to_le32(10)) {
1100 "md: bitmaps not supported for this level.\n");
1105 rdev
->preferred_minor
= 0xffff;
1106 rdev
->data_offset
= le64_to_cpu(sb
->data_offset
);
1107 atomic_set(&rdev
->corrected_errors
, le32_to_cpu(sb
->cnt_corrected_read
));
1109 rdev
->sb_size
= le32_to_cpu(sb
->max_dev
) * 2 + 256;
1110 bmask
= queue_hardsect_size(rdev
->bdev
->bd_disk
->queue
)-1;
1111 if (rdev
->sb_size
& bmask
)
1112 rdev
->sb_size
= (rdev
->sb_size
| bmask
) + 1;
1115 && rdev
->data_offset
< sb_start
+ (rdev
->sb_size
/512))
1118 if (sb
->level
== cpu_to_le32(LEVEL_MULTIPATH
))
1121 rdev
->desc_nr
= le32_to_cpu(sb
->dev_number
);
1127 struct mdp_superblock_1
*refsb
=
1128 (struct mdp_superblock_1
*)page_address(refdev
->sb_page
);
1130 if (memcmp(sb
->set_uuid
, refsb
->set_uuid
, 16) != 0 ||
1131 sb
->level
!= refsb
->level
||
1132 sb
->layout
!= refsb
->layout
||
1133 sb
->chunksize
!= refsb
->chunksize
) {
1134 printk(KERN_WARNING
"md: %s has strangely different"
1135 " superblock to %s\n",
1136 bdevname(rdev
->bdev
,b
),
1137 bdevname(refdev
->bdev
,b2
));
1140 ev1
= le64_to_cpu(sb
->events
);
1141 ev2
= le64_to_cpu(refsb
->events
);
1149 rdev
->size
= ((rdev
->bdev
->bd_inode
->i_size
>>9) - le64_to_cpu(sb
->data_offset
)) / 2;
1151 rdev
->size
= rdev
->sb_start
/ 2;
1152 if (rdev
->size
< le64_to_cpu(sb
->data_size
)/2)
1154 rdev
->size
= le64_to_cpu(sb
->data_size
)/2;
1155 if (le32_to_cpu(sb
->chunksize
))
1156 rdev
->size
&= ~((sector_t
)le32_to_cpu(sb
->chunksize
)/2 - 1);
1158 if (le64_to_cpu(sb
->size
) > rdev
->size
*2)
1163 static int super_1_validate(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1165 struct mdp_superblock_1
*sb
= (struct mdp_superblock_1
*)page_address(rdev
->sb_page
);
1166 __u64 ev1
= le64_to_cpu(sb
->events
);
1168 rdev
->raid_disk
= -1;
1169 clear_bit(Faulty
, &rdev
->flags
);
1170 clear_bit(In_sync
, &rdev
->flags
);
1171 clear_bit(WriteMostly
, &rdev
->flags
);
1172 clear_bit(BarriersNotsupp
, &rdev
->flags
);
1174 if (mddev
->raid_disks
== 0) {
1175 mddev
->major_version
= 1;
1176 mddev
->patch_version
= 0;
1177 mddev
->external
= 0;
1178 mddev
->chunk_size
= le32_to_cpu(sb
->chunksize
) << 9;
1179 mddev
->ctime
= le64_to_cpu(sb
->ctime
) & ((1ULL << 32)-1);
1180 mddev
->utime
= le64_to_cpu(sb
->utime
) & ((1ULL << 32)-1);
1181 mddev
->level
= le32_to_cpu(sb
->level
);
1182 mddev
->clevel
[0] = 0;
1183 mddev
->layout
= le32_to_cpu(sb
->layout
);
1184 mddev
->raid_disks
= le32_to_cpu(sb
->raid_disks
);
1185 mddev
->size
= le64_to_cpu(sb
->size
)/2;
1186 mddev
->events
= ev1
;
1187 mddev
->bitmap_offset
= 0;
1188 mddev
->default_bitmap_offset
= 1024 >> 9;
1190 mddev
->recovery_cp
= le64_to_cpu(sb
->resync_offset
);
1191 memcpy(mddev
->uuid
, sb
->set_uuid
, 16);
1193 mddev
->max_disks
= (4096-256)/2;
1195 if ((le32_to_cpu(sb
->feature_map
) & MD_FEATURE_BITMAP_OFFSET
) &&
1196 mddev
->bitmap_file
== NULL
)
1197 mddev
->bitmap_offset
= (__s32
)le32_to_cpu(sb
->bitmap_offset
);
1199 if ((le32_to_cpu(sb
->feature_map
) & MD_FEATURE_RESHAPE_ACTIVE
)) {
1200 mddev
->reshape_position
= le64_to_cpu(sb
->reshape_position
);
1201 mddev
->delta_disks
= le32_to_cpu(sb
->delta_disks
);
1202 mddev
->new_level
= le32_to_cpu(sb
->new_level
);
1203 mddev
->new_layout
= le32_to_cpu(sb
->new_layout
);
1204 mddev
->new_chunk
= le32_to_cpu(sb
->new_chunk
)<<9;
1206 mddev
->reshape_position
= MaxSector
;
1207 mddev
->delta_disks
= 0;
1208 mddev
->new_level
= mddev
->level
;
1209 mddev
->new_layout
= mddev
->layout
;
1210 mddev
->new_chunk
= mddev
->chunk_size
;
1213 } else if (mddev
->pers
== NULL
) {
1214 /* Insist of good event counter while assembling */
1216 if (ev1
< mddev
->events
)
1218 } else if (mddev
->bitmap
) {
1219 /* If adding to array with a bitmap, then we can accept an
1220 * older device, but not too old.
1222 if (ev1
< mddev
->bitmap
->events_cleared
)
1225 if (ev1
< mddev
->events
)
1226 /* just a hot-add of a new device, leave raid_disk at -1 */
1229 if (mddev
->level
!= LEVEL_MULTIPATH
) {
1231 role
= le16_to_cpu(sb
->dev_roles
[rdev
->desc_nr
]);
1233 case 0xffff: /* spare */
1235 case 0xfffe: /* faulty */
1236 set_bit(Faulty
, &rdev
->flags
);
1239 if ((le32_to_cpu(sb
->feature_map
) &
1240 MD_FEATURE_RECOVERY_OFFSET
))
1241 rdev
->recovery_offset
= le64_to_cpu(sb
->recovery_offset
);
1243 set_bit(In_sync
, &rdev
->flags
);
1244 rdev
->raid_disk
= role
;
1247 if (sb
->devflags
& WriteMostly1
)
1248 set_bit(WriteMostly
, &rdev
->flags
);
1249 } else /* MULTIPATH are always insync */
1250 set_bit(In_sync
, &rdev
->flags
);
1255 static void super_1_sync(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1257 struct mdp_superblock_1
*sb
;
1258 struct list_head
*tmp
;
1261 /* make rdev->sb match mddev and rdev data. */
1263 sb
= (struct mdp_superblock_1
*)page_address(rdev
->sb_page
);
1265 sb
->feature_map
= 0;
1267 sb
->recovery_offset
= cpu_to_le64(0);
1268 memset(sb
->pad1
, 0, sizeof(sb
->pad1
));
1269 memset(sb
->pad2
, 0, sizeof(sb
->pad2
));
1270 memset(sb
->pad3
, 0, sizeof(sb
->pad3
));
1272 sb
->utime
= cpu_to_le64((__u64
)mddev
->utime
);
1273 sb
->events
= cpu_to_le64(mddev
->events
);
1275 sb
->resync_offset
= cpu_to_le64(mddev
->recovery_cp
);
1277 sb
->resync_offset
= cpu_to_le64(0);
1279 sb
->cnt_corrected_read
= cpu_to_le32(atomic_read(&rdev
->corrected_errors
));
1281 sb
->raid_disks
= cpu_to_le32(mddev
->raid_disks
);
1282 sb
->size
= cpu_to_le64(mddev
->size
<<1);
1284 if (mddev
->bitmap
&& mddev
->bitmap_file
== NULL
) {
1285 sb
->bitmap_offset
= cpu_to_le32((__u32
)mddev
->bitmap_offset
);
1286 sb
->feature_map
= cpu_to_le32(MD_FEATURE_BITMAP_OFFSET
);
1289 if (rdev
->raid_disk
>= 0 &&
1290 !test_bit(In_sync
, &rdev
->flags
) &&
1291 rdev
->recovery_offset
> 0) {
1292 sb
->feature_map
|= cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET
);
1293 sb
->recovery_offset
= cpu_to_le64(rdev
->recovery_offset
);
1296 if (mddev
->reshape_position
!= MaxSector
) {
1297 sb
->feature_map
|= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE
);
1298 sb
->reshape_position
= cpu_to_le64(mddev
->reshape_position
);
1299 sb
->new_layout
= cpu_to_le32(mddev
->new_layout
);
1300 sb
->delta_disks
= cpu_to_le32(mddev
->delta_disks
);
1301 sb
->new_level
= cpu_to_le32(mddev
->new_level
);
1302 sb
->new_chunk
= cpu_to_le32(mddev
->new_chunk
>>9);
1306 rdev_for_each(rdev2
, tmp
, mddev
)
1307 if (rdev2
->desc_nr
+1 > max_dev
)
1308 max_dev
= rdev2
->desc_nr
+1;
1310 if (max_dev
> le32_to_cpu(sb
->max_dev
))
1311 sb
->max_dev
= cpu_to_le32(max_dev
);
1312 for (i
=0; i
<max_dev
;i
++)
1313 sb
->dev_roles
[i
] = cpu_to_le16(0xfffe);
1315 rdev_for_each(rdev2
, tmp
, mddev
) {
1317 if (test_bit(Faulty
, &rdev2
->flags
))
1318 sb
->dev_roles
[i
] = cpu_to_le16(0xfffe);
1319 else if (test_bit(In_sync
, &rdev2
->flags
))
1320 sb
->dev_roles
[i
] = cpu_to_le16(rdev2
->raid_disk
);
1321 else if (rdev2
->raid_disk
>= 0 && rdev2
->recovery_offset
> 0)
1322 sb
->dev_roles
[i
] = cpu_to_le16(rdev2
->raid_disk
);
1324 sb
->dev_roles
[i
] = cpu_to_le16(0xffff);
1327 sb
->sb_csum
= calc_sb_1_csum(sb
);
1330 static unsigned long long
1331 super_1_rdev_size_change(mdk_rdev_t
*rdev
, sector_t num_sectors
)
1333 struct mdp_superblock_1
*sb
;
1334 sector_t max_sectors
;
1335 if (num_sectors
&& num_sectors
< rdev
->mddev
->size
* 2)
1336 return 0; /* component must fit device */
1337 if (rdev
->sb_start
< rdev
->data_offset
) {
1338 /* minor versions 1 and 2; superblock before data */
1339 max_sectors
= rdev
->bdev
->bd_inode
->i_size
>> 9;
1340 max_sectors
-= rdev
->data_offset
;
1341 if (!num_sectors
|| num_sectors
> max_sectors
)
1342 num_sectors
= max_sectors
;
1343 } else if (rdev
->mddev
->bitmap_offset
) {
1344 /* minor version 0 with bitmap we can't move */
1347 /* minor version 0; superblock after data */
1349 sb_start
= (rdev
->bdev
->bd_inode
->i_size
>> 9) - 8*2;
1350 sb_start
&= ~(sector_t
)(4*2 - 1);
1351 max_sectors
= rdev
->size
* 2 + sb_start
- rdev
->sb_start
;
1352 if (!num_sectors
|| num_sectors
> max_sectors
)
1353 num_sectors
= max_sectors
;
1354 rdev
->sb_start
= sb_start
;
1356 sb
= (struct mdp_superblock_1
*) page_address(rdev
->sb_page
);
1357 sb
->data_size
= cpu_to_le64(num_sectors
);
1358 sb
->super_offset
= rdev
->sb_start
;
1359 sb
->sb_csum
= calc_sb_1_csum(sb
);
1360 md_super_write(rdev
->mddev
, rdev
, rdev
->sb_start
, rdev
->sb_size
,
1362 md_super_wait(rdev
->mddev
);
1363 return num_sectors
/ 2; /* kB for sysfs */
1366 static struct super_type super_types
[] = {
1369 .owner
= THIS_MODULE
,
1370 .load_super
= super_90_load
,
1371 .validate_super
= super_90_validate
,
1372 .sync_super
= super_90_sync
,
1373 .rdev_size_change
= super_90_rdev_size_change
,
1377 .owner
= THIS_MODULE
,
1378 .load_super
= super_1_load
,
1379 .validate_super
= super_1_validate
,
1380 .sync_super
= super_1_sync
,
1381 .rdev_size_change
= super_1_rdev_size_change
,
1385 static int match_mddev_units(mddev_t
*mddev1
, mddev_t
*mddev2
)
1387 mdk_rdev_t
*rdev
, *rdev2
;
1390 rdev_for_each_rcu(rdev
, mddev1
)
1391 rdev_for_each_rcu(rdev2
, mddev2
)
1392 if (rdev
->bdev
->bd_contains
==
1393 rdev2
->bdev
->bd_contains
) {
1401 static LIST_HEAD(pending_raid_disks
);
1403 static int bind_rdev_to_array(mdk_rdev_t
* rdev
, mddev_t
* mddev
)
1405 char b
[BDEVNAME_SIZE
];
1415 /* prevent duplicates */
1416 if (find_rdev(mddev
, rdev
->bdev
->bd_dev
))
1419 /* make sure rdev->size exceeds mddev->size */
1420 if (rdev
->size
&& (mddev
->size
== 0 || rdev
->size
< mddev
->size
)) {
1422 /* Cannot change size, so fail
1423 * If mddev->level <= 0, then we don't care
1424 * about aligning sizes (e.g. linear)
1426 if (mddev
->level
> 0)
1429 mddev
->size
= rdev
->size
;
1432 /* Verify rdev->desc_nr is unique.
1433 * If it is -1, assign a free number, else
1434 * check number is not in use
1436 if (rdev
->desc_nr
< 0) {
1438 if (mddev
->pers
) choice
= mddev
->raid_disks
;
1439 while (find_rdev_nr(mddev
, choice
))
1441 rdev
->desc_nr
= choice
;
1443 if (find_rdev_nr(mddev
, rdev
->desc_nr
))
1446 bdevname(rdev
->bdev
,b
);
1447 while ( (s
=strchr(b
, '/')) != NULL
)
1450 rdev
->mddev
= mddev
;
1451 printk(KERN_INFO
"md: bind<%s>\n", b
);
1453 if ((err
= kobject_add(&rdev
->kobj
, &mddev
->kobj
, "dev-%s", b
)))
1456 ko
= &part_to_dev(rdev
->bdev
->bd_part
)->kobj
;
1457 if ((err
= sysfs_create_link(&rdev
->kobj
, ko
, "block"))) {
1458 kobject_del(&rdev
->kobj
);
1461 list_add_rcu(&rdev
->same_set
, &mddev
->disks
);
1462 bd_claim_by_disk(rdev
->bdev
, rdev
->bdev
->bd_holder
, mddev
->gendisk
);
1466 printk(KERN_WARNING
"md: failed to register dev-%s for %s\n",
1471 static void md_delayed_delete(struct work_struct
*ws
)
1473 mdk_rdev_t
*rdev
= container_of(ws
, mdk_rdev_t
, del_work
);
1474 kobject_del(&rdev
->kobj
);
1475 kobject_put(&rdev
->kobj
);
1478 static void unbind_rdev_from_array(mdk_rdev_t
* rdev
)
1480 char b
[BDEVNAME_SIZE
];
1485 bd_release_from_disk(rdev
->bdev
, rdev
->mddev
->gendisk
);
1486 list_del_rcu(&rdev
->same_set
);
1487 printk(KERN_INFO
"md: unbind<%s>\n", bdevname(rdev
->bdev
,b
));
1489 sysfs_remove_link(&rdev
->kobj
, "block");
1491 /* We need to delay this, otherwise we can deadlock when
1492 * writing to 'remove' to "dev/state". We also need
1493 * to delay it due to rcu usage.
1496 INIT_WORK(&rdev
->del_work
, md_delayed_delete
);
1497 kobject_get(&rdev
->kobj
);
1498 schedule_work(&rdev
->del_work
);
1502 * prevent the device from being mounted, repartitioned or
1503 * otherwise reused by a RAID array (or any other kernel
1504 * subsystem), by bd_claiming the device.
1506 static int lock_rdev(mdk_rdev_t
*rdev
, dev_t dev
, int shared
)
1509 struct block_device
*bdev
;
1510 char b
[BDEVNAME_SIZE
];
1512 bdev
= open_by_devnum(dev
, FMODE_READ
|FMODE_WRITE
);
1514 printk(KERN_ERR
"md: could not open %s.\n",
1515 __bdevname(dev
, b
));
1516 return PTR_ERR(bdev
);
1518 err
= bd_claim(bdev
, shared
? (mdk_rdev_t
*)lock_rdev
: rdev
);
1520 printk(KERN_ERR
"md: could not bd_claim %s.\n",
1526 set_bit(AllReserved
, &rdev
->flags
);
1531 static void unlock_rdev(mdk_rdev_t
*rdev
)
1533 struct block_device
*bdev
= rdev
->bdev
;
1541 void md_autodetect_dev(dev_t dev
);
1543 static void export_rdev(mdk_rdev_t
* rdev
)
1545 char b
[BDEVNAME_SIZE
];
1546 printk(KERN_INFO
"md: export_rdev(%s)\n",
1547 bdevname(rdev
->bdev
,b
));
1552 if (test_bit(AutoDetected
, &rdev
->flags
))
1553 md_autodetect_dev(rdev
->bdev
->bd_dev
);
1556 kobject_put(&rdev
->kobj
);
1559 static void kick_rdev_from_array(mdk_rdev_t
* rdev
)
1561 unbind_rdev_from_array(rdev
);
1565 static void export_array(mddev_t
*mddev
)
1567 struct list_head
*tmp
;
1570 rdev_for_each(rdev
, tmp
, mddev
) {
1575 kick_rdev_from_array(rdev
);
1577 if (!list_empty(&mddev
->disks
))
1579 mddev
->raid_disks
= 0;
1580 mddev
->major_version
= 0;
1583 static void print_desc(mdp_disk_t
*desc
)
1585 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc
->number
,
1586 desc
->major
,desc
->minor
,desc
->raid_disk
,desc
->state
);
1589 static void print_sb(mdp_super_t
*sb
)
1594 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1595 sb
->major_version
, sb
->minor_version
, sb
->patch_version
,
1596 sb
->set_uuid0
, sb
->set_uuid1
, sb
->set_uuid2
, sb
->set_uuid3
,
1598 printk(KERN_INFO
"md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1599 sb
->level
, sb
->size
, sb
->nr_disks
, sb
->raid_disks
,
1600 sb
->md_minor
, sb
->layout
, sb
->chunk_size
);
1601 printk(KERN_INFO
"md: UT:%08x ST:%d AD:%d WD:%d"
1602 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1603 sb
->utime
, sb
->state
, sb
->active_disks
, sb
->working_disks
,
1604 sb
->failed_disks
, sb
->spare_disks
,
1605 sb
->sb_csum
, (unsigned long)sb
->events_lo
);
1608 for (i
= 0; i
< MD_SB_DISKS
; i
++) {
1611 desc
= sb
->disks
+ i
;
1612 if (desc
->number
|| desc
->major
|| desc
->minor
||
1613 desc
->raid_disk
|| (desc
->state
&& (desc
->state
!= 4))) {
1614 printk(" D %2d: ", i
);
1618 printk(KERN_INFO
"md: THIS: ");
1619 print_desc(&sb
->this_disk
);
1623 static void print_rdev(mdk_rdev_t
*rdev
)
1625 char b
[BDEVNAME_SIZE
];
1626 printk(KERN_INFO
"md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
1627 bdevname(rdev
->bdev
,b
), (unsigned long long)rdev
->size
,
1628 test_bit(Faulty
, &rdev
->flags
), test_bit(In_sync
, &rdev
->flags
),
1630 if (rdev
->sb_loaded
) {
1631 printk(KERN_INFO
"md: rdev superblock:\n");
1632 print_sb((mdp_super_t
*)page_address(rdev
->sb_page
));
1634 printk(KERN_INFO
"md: no rdev superblock!\n");
1637 static void md_print_devices(void)
1639 struct list_head
*tmp
, *tmp2
;
1642 char b
[BDEVNAME_SIZE
];
1645 printk("md: **********************************\n");
1646 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
1647 printk("md: **********************************\n");
1648 for_each_mddev(mddev
, tmp
) {
1651 bitmap_print_sb(mddev
->bitmap
);
1653 printk("%s: ", mdname(mddev
));
1654 rdev_for_each(rdev
, tmp2
, mddev
)
1655 printk("<%s>", bdevname(rdev
->bdev
,b
));
1658 rdev_for_each(rdev
, tmp2
, mddev
)
1661 printk("md: **********************************\n");
1666 static void sync_sbs(mddev_t
* mddev
, int nospares
)
1668 /* Update each superblock (in-memory image), but
1669 * if we are allowed to, skip spares which already
1670 * have the right event counter, or have one earlier
1671 * (which would mean they aren't being marked as dirty
1672 * with the rest of the array)
1675 struct list_head
*tmp
;
1677 rdev_for_each(rdev
, tmp
, mddev
) {
1678 if (rdev
->sb_events
== mddev
->events
||
1680 rdev
->raid_disk
< 0 &&
1681 (rdev
->sb_events
&1)==0 &&
1682 rdev
->sb_events
+1 == mddev
->events
)) {
1683 /* Don't update this superblock */
1684 rdev
->sb_loaded
= 2;
1686 super_types
[mddev
->major_version
].
1687 sync_super(mddev
, rdev
);
1688 rdev
->sb_loaded
= 1;
1693 static void md_update_sb(mddev_t
* mddev
, int force_change
)
1695 struct list_head
*tmp
;
1700 if (mddev
->external
)
1703 spin_lock_irq(&mddev
->write_lock
);
1705 set_bit(MD_CHANGE_PENDING
, &mddev
->flags
);
1706 if (test_and_clear_bit(MD_CHANGE_DEVS
, &mddev
->flags
))
1708 if (test_and_clear_bit(MD_CHANGE_CLEAN
, &mddev
->flags
))
1709 /* just a clean<-> dirty transition, possibly leave spares alone,
1710 * though if events isn't the right even/odd, we will have to do
1716 if (mddev
->degraded
)
1717 /* If the array is degraded, then skipping spares is both
1718 * dangerous and fairly pointless.
1719 * Dangerous because a device that was removed from the array
1720 * might have a event_count that still looks up-to-date,
1721 * so it can be re-added without a resync.
1722 * Pointless because if there are any spares to skip,
1723 * then a recovery will happen and soon that array won't
1724 * be degraded any more and the spare can go back to sleep then.
1728 sync_req
= mddev
->in_sync
;
1729 mddev
->utime
= get_seconds();
1731 /* If this is just a dirty<->clean transition, and the array is clean
1732 * and 'events' is odd, we can roll back to the previous clean state */
1734 && (mddev
->in_sync
&& mddev
->recovery_cp
== MaxSector
)
1735 && (mddev
->events
& 1)
1736 && mddev
->events
!= 1)
1739 /* otherwise we have to go forward and ... */
1741 if (!mddev
->in_sync
|| mddev
->recovery_cp
!= MaxSector
) { /* not clean */
1742 /* .. if the array isn't clean, insist on an odd 'events' */
1743 if ((mddev
->events
&1)==0) {
1748 /* otherwise insist on an even 'events' (for clean states) */
1749 if ((mddev
->events
&1)) {
1756 if (!mddev
->events
) {
1758 * oops, this 64-bit counter should never wrap.
1759 * Either we are in around ~1 trillion A.C., assuming
1760 * 1 reboot per second, or we have a bug:
1767 * do not write anything to disk if using
1768 * nonpersistent superblocks
1770 if (!mddev
->persistent
) {
1771 if (!mddev
->external
)
1772 clear_bit(MD_CHANGE_PENDING
, &mddev
->flags
);
1774 spin_unlock_irq(&mddev
->write_lock
);
1775 wake_up(&mddev
->sb_wait
);
1778 sync_sbs(mddev
, nospares
);
1779 spin_unlock_irq(&mddev
->write_lock
);
1782 "md: updating %s RAID superblock on device (in sync %d)\n",
1783 mdname(mddev
),mddev
->in_sync
);
1785 bitmap_update_sb(mddev
->bitmap
);
1786 rdev_for_each(rdev
, tmp
, mddev
) {
1787 char b
[BDEVNAME_SIZE
];
1788 dprintk(KERN_INFO
"md: ");
1789 if (rdev
->sb_loaded
!= 1)
1790 continue; /* no noise on spare devices */
1791 if (test_bit(Faulty
, &rdev
->flags
))
1792 dprintk("(skipping faulty ");
1794 dprintk("%s ", bdevname(rdev
->bdev
,b
));
1795 if (!test_bit(Faulty
, &rdev
->flags
)) {
1796 md_super_write(mddev
,rdev
,
1797 rdev
->sb_start
, rdev
->sb_size
,
1799 dprintk(KERN_INFO
"(write) %s's sb offset: %llu\n",
1800 bdevname(rdev
->bdev
,b
),
1801 (unsigned long long)rdev
->sb_start
);
1802 rdev
->sb_events
= mddev
->events
;
1806 if (mddev
->level
== LEVEL_MULTIPATH
)
1807 /* only need to write one superblock... */
1810 md_super_wait(mddev
);
1811 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
1813 spin_lock_irq(&mddev
->write_lock
);
1814 if (mddev
->in_sync
!= sync_req
||
1815 test_bit(MD_CHANGE_DEVS
, &mddev
->flags
)) {
1816 /* have to write it out again */
1817 spin_unlock_irq(&mddev
->write_lock
);
1820 clear_bit(MD_CHANGE_PENDING
, &mddev
->flags
);
1821 spin_unlock_irq(&mddev
->write_lock
);
1822 wake_up(&mddev
->sb_wait
);
1826 /* words written to sysfs files may, or may not, be \n terminated.
1827 * We want to accept with case. For this we use cmd_match.
1829 static int cmd_match(const char *cmd
, const char *str
)
1831 /* See if cmd, written into a sysfs file, matches
1832 * str. They must either be the same, or cmd can
1833 * have a trailing newline
1835 while (*cmd
&& *str
&& *cmd
== *str
) {
1846 struct rdev_sysfs_entry
{
1847 struct attribute attr
;
1848 ssize_t (*show
)(mdk_rdev_t
*, char *);
1849 ssize_t (*store
)(mdk_rdev_t
*, const char *, size_t);
1853 state_show(mdk_rdev_t
*rdev
, char *page
)
1858 if (test_bit(Faulty
, &rdev
->flags
)) {
1859 len
+= sprintf(page
+len
, "%sfaulty",sep
);
1862 if (test_bit(In_sync
, &rdev
->flags
)) {
1863 len
+= sprintf(page
+len
, "%sin_sync",sep
);
1866 if (test_bit(WriteMostly
, &rdev
->flags
)) {
1867 len
+= sprintf(page
+len
, "%swrite_mostly",sep
);
1870 if (test_bit(Blocked
, &rdev
->flags
)) {
1871 len
+= sprintf(page
+len
, "%sblocked", sep
);
1874 if (!test_bit(Faulty
, &rdev
->flags
) &&
1875 !test_bit(In_sync
, &rdev
->flags
)) {
1876 len
+= sprintf(page
+len
, "%sspare", sep
);
1879 return len
+sprintf(page
+len
, "\n");
1883 state_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
1886 * faulty - simulates and error
1887 * remove - disconnects the device
1888 * writemostly - sets write_mostly
1889 * -writemostly - clears write_mostly
1890 * blocked - sets the Blocked flag
1891 * -blocked - clears the Blocked flag
1894 if (cmd_match(buf
, "faulty") && rdev
->mddev
->pers
) {
1895 md_error(rdev
->mddev
, rdev
);
1897 } else if (cmd_match(buf
, "remove")) {
1898 if (rdev
->raid_disk
>= 0)
1901 mddev_t
*mddev
= rdev
->mddev
;
1902 kick_rdev_from_array(rdev
);
1904 md_update_sb(mddev
, 1);
1905 md_new_event(mddev
);
1908 } else if (cmd_match(buf
, "writemostly")) {
1909 set_bit(WriteMostly
, &rdev
->flags
);
1911 } else if (cmd_match(buf
, "-writemostly")) {
1912 clear_bit(WriteMostly
, &rdev
->flags
);
1914 } else if (cmd_match(buf
, "blocked")) {
1915 set_bit(Blocked
, &rdev
->flags
);
1917 } else if (cmd_match(buf
, "-blocked")) {
1918 clear_bit(Blocked
, &rdev
->flags
);
1919 wake_up(&rdev
->blocked_wait
);
1920 set_bit(MD_RECOVERY_NEEDED
, &rdev
->mddev
->recovery
);
1921 md_wakeup_thread(rdev
->mddev
->thread
);
1926 sysfs_notify(&rdev
->kobj
, NULL
, "state");
1927 return err
? err
: len
;
1929 static struct rdev_sysfs_entry rdev_state
=
1930 __ATTR(state
, S_IRUGO
|S_IWUSR
, state_show
, state_store
);
1933 errors_show(mdk_rdev_t
*rdev
, char *page
)
1935 return sprintf(page
, "%d\n", atomic_read(&rdev
->corrected_errors
));
1939 errors_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
1942 unsigned long n
= simple_strtoul(buf
, &e
, 10);
1943 if (*buf
&& (*e
== 0 || *e
== '\n')) {
1944 atomic_set(&rdev
->corrected_errors
, n
);
1949 static struct rdev_sysfs_entry rdev_errors
=
1950 __ATTR(errors
, S_IRUGO
|S_IWUSR
, errors_show
, errors_store
);
1953 slot_show(mdk_rdev_t
*rdev
, char *page
)
1955 if (rdev
->raid_disk
< 0)
1956 return sprintf(page
, "none\n");
1958 return sprintf(page
, "%d\n", rdev
->raid_disk
);
1962 slot_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
1967 int slot
= simple_strtoul(buf
, &e
, 10);
1968 if (strncmp(buf
, "none", 4)==0)
1970 else if (e
==buf
|| (*e
&& *e
!= '\n'))
1972 if (rdev
->mddev
->pers
&& slot
== -1) {
1973 /* Setting 'slot' on an active array requires also
1974 * updating the 'rd%d' link, and communicating
1975 * with the personality with ->hot_*_disk.
1976 * For now we only support removing
1977 * failed/spare devices. This normally happens automatically,
1978 * but not when the metadata is externally managed.
1980 if (rdev
->raid_disk
== -1)
1982 /* personality does all needed checks */
1983 if (rdev
->mddev
->pers
->hot_add_disk
== NULL
)
1985 err
= rdev
->mddev
->pers
->
1986 hot_remove_disk(rdev
->mddev
, rdev
->raid_disk
);
1989 sprintf(nm
, "rd%d", rdev
->raid_disk
);
1990 sysfs_remove_link(&rdev
->mddev
->kobj
, nm
);
1991 set_bit(MD_RECOVERY_NEEDED
, &rdev
->mddev
->recovery
);
1992 md_wakeup_thread(rdev
->mddev
->thread
);
1993 } else if (rdev
->mddev
->pers
) {
1995 struct list_head
*tmp
;
1996 /* Activating a spare .. or possibly reactivating
1997 * if we every get bitmaps working here.
2000 if (rdev
->raid_disk
!= -1)
2003 if (rdev
->mddev
->pers
->hot_add_disk
== NULL
)
2006 rdev_for_each(rdev2
, tmp
, rdev
->mddev
)
2007 if (rdev2
->raid_disk
== slot
)
2010 rdev
->raid_disk
= slot
;
2011 if (test_bit(In_sync
, &rdev
->flags
))
2012 rdev
->saved_raid_disk
= slot
;
2014 rdev
->saved_raid_disk
= -1;
2015 err
= rdev
->mddev
->pers
->
2016 hot_add_disk(rdev
->mddev
, rdev
);
2018 rdev
->raid_disk
= -1;
2021 sysfs_notify(&rdev
->kobj
, NULL
, "state");
2022 sprintf(nm
, "rd%d", rdev
->raid_disk
);
2023 if (sysfs_create_link(&rdev
->mddev
->kobj
, &rdev
->kobj
, nm
))
2025 "md: cannot register "
2027 nm
, mdname(rdev
->mddev
));
2029 /* don't wakeup anyone, leave that to userspace. */
2031 if (slot
>= rdev
->mddev
->raid_disks
)
2033 rdev
->raid_disk
= slot
;
2034 /* assume it is working */
2035 clear_bit(Faulty
, &rdev
->flags
);
2036 clear_bit(WriteMostly
, &rdev
->flags
);
2037 set_bit(In_sync
, &rdev
->flags
);
2038 sysfs_notify(&rdev
->kobj
, NULL
, "state");
2044 static struct rdev_sysfs_entry rdev_slot
=
2045 __ATTR(slot
, S_IRUGO
|S_IWUSR
, slot_show
, slot_store
);
2048 offset_show(mdk_rdev_t
*rdev
, char *page
)
2050 return sprintf(page
, "%llu\n", (unsigned long long)rdev
->data_offset
);
2054 offset_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
2057 unsigned long long offset
= simple_strtoull(buf
, &e
, 10);
2058 if (e
==buf
|| (*e
&& *e
!= '\n'))
2060 if (rdev
->mddev
->pers
&& rdev
->raid_disk
>= 0)
2062 if (rdev
->size
&& rdev
->mddev
->external
)
2063 /* Must set offset before size, so overlap checks
2066 rdev
->data_offset
= offset
;
2070 static struct rdev_sysfs_entry rdev_offset
=
2071 __ATTR(offset
, S_IRUGO
|S_IWUSR
, offset_show
, offset_store
);
2074 rdev_size_show(mdk_rdev_t
*rdev
, char *page
)
2076 return sprintf(page
, "%llu\n", (unsigned long long)rdev
->size
);
2079 static int overlaps(sector_t s1
, sector_t l1
, sector_t s2
, sector_t l2
)
2081 /* check if two start/length pairs overlap */
2090 rdev_size_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
2092 unsigned long long size
;
2093 unsigned long long oldsize
= rdev
->size
;
2094 mddev_t
*my_mddev
= rdev
->mddev
;
2096 if (strict_strtoull(buf
, 10, &size
) < 0)
2098 if (my_mddev
->pers
&& rdev
->raid_disk
>= 0) {
2099 if (my_mddev
->persistent
) {
2100 size
= super_types
[my_mddev
->major_version
].
2101 rdev_size_change(rdev
, size
* 2);
2105 size
= (rdev
->bdev
->bd_inode
->i_size
>> 10);
2106 size
-= rdev
->data_offset
/2;
2109 if (size
< my_mddev
->size
)
2110 return -EINVAL
; /* component must fit device */
2113 if (size
> oldsize
&& my_mddev
->external
) {
2114 /* need to check that all other rdevs with the same ->bdev
2115 * do not overlap. We need to unlock the mddev to avoid
2116 * a deadlock. We have already changed rdev->size, and if
2117 * we have to change it back, we will have the lock again.
2121 struct list_head
*tmp
, *tmp2
;
2123 mddev_unlock(my_mddev
);
2124 for_each_mddev(mddev
, tmp
) {
2128 rdev_for_each(rdev2
, tmp2
, mddev
)
2129 if (test_bit(AllReserved
, &rdev2
->flags
) ||
2130 (rdev
->bdev
== rdev2
->bdev
&&
2132 overlaps(rdev
->data_offset
, rdev
->size
* 2,
2134 rdev2
->size
* 2))) {
2138 mddev_unlock(mddev
);
2144 mddev_lock(my_mddev
);
2146 /* Someone else could have slipped in a size
2147 * change here, but doing so is just silly.
2148 * We put oldsize back because we *know* it is
2149 * safe, and trust userspace not to race with
2152 rdev
->size
= oldsize
;
2159 static struct rdev_sysfs_entry rdev_size
=
2160 __ATTR(size
, S_IRUGO
|S_IWUSR
, rdev_size_show
, rdev_size_store
);
2162 static struct attribute
*rdev_default_attrs
[] = {
2171 rdev_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
2173 struct rdev_sysfs_entry
*entry
= container_of(attr
, struct rdev_sysfs_entry
, attr
);
2174 mdk_rdev_t
*rdev
= container_of(kobj
, mdk_rdev_t
, kobj
);
2175 mddev_t
*mddev
= rdev
->mddev
;
2181 rv
= mddev
? mddev_lock(mddev
) : -EBUSY
;
2183 if (rdev
->mddev
== NULL
)
2186 rv
= entry
->show(rdev
, page
);
2187 mddev_unlock(mddev
);
2193 rdev_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
2194 const char *page
, size_t length
)
2196 struct rdev_sysfs_entry
*entry
= container_of(attr
, struct rdev_sysfs_entry
, attr
);
2197 mdk_rdev_t
*rdev
= container_of(kobj
, mdk_rdev_t
, kobj
);
2199 mddev_t
*mddev
= rdev
->mddev
;
2203 if (!capable(CAP_SYS_ADMIN
))
2205 rv
= mddev
? mddev_lock(mddev
): -EBUSY
;
2207 if (rdev
->mddev
== NULL
)
2210 rv
= entry
->store(rdev
, page
, length
);
2211 mddev_unlock(mddev
);
2216 static void rdev_free(struct kobject
*ko
)
2218 mdk_rdev_t
*rdev
= container_of(ko
, mdk_rdev_t
, kobj
);
2221 static struct sysfs_ops rdev_sysfs_ops
= {
2222 .show
= rdev_attr_show
,
2223 .store
= rdev_attr_store
,
2225 static struct kobj_type rdev_ktype
= {
2226 .release
= rdev_free
,
2227 .sysfs_ops
= &rdev_sysfs_ops
,
2228 .default_attrs
= rdev_default_attrs
,
2232 * Import a device. If 'super_format' >= 0, then sanity check the superblock
2234 * mark the device faulty if:
2236 * - the device is nonexistent (zero size)
2237 * - the device has no valid superblock
2239 * a faulty rdev _never_ has rdev->sb set.
2241 static mdk_rdev_t
*md_import_device(dev_t newdev
, int super_format
, int super_minor
)
2243 char b
[BDEVNAME_SIZE
];
2248 rdev
= kzalloc(sizeof(*rdev
), GFP_KERNEL
);
2250 printk(KERN_ERR
"md: could not alloc mem for new device!\n");
2251 return ERR_PTR(-ENOMEM
);
2254 if ((err
= alloc_disk_sb(rdev
)))
2257 err
= lock_rdev(rdev
, newdev
, super_format
== -2);
2261 kobject_init(&rdev
->kobj
, &rdev_ktype
);
2264 rdev
->saved_raid_disk
= -1;
2265 rdev
->raid_disk
= -1;
2267 rdev
->data_offset
= 0;
2268 rdev
->sb_events
= 0;
2269 atomic_set(&rdev
->nr_pending
, 0);
2270 atomic_set(&rdev
->read_errors
, 0);
2271 atomic_set(&rdev
->corrected_errors
, 0);
2273 size
= rdev
->bdev
->bd_inode
->i_size
>> BLOCK_SIZE_BITS
;
2276 "md: %s has zero or unknown size, marking faulty!\n",
2277 bdevname(rdev
->bdev
,b
));
2282 if (super_format
>= 0) {
2283 err
= super_types
[super_format
].
2284 load_super(rdev
, NULL
, super_minor
);
2285 if (err
== -EINVAL
) {
2287 "md: %s does not have a valid v%d.%d "
2288 "superblock, not importing!\n",
2289 bdevname(rdev
->bdev
,b
),
2290 super_format
, super_minor
);
2295 "md: could not read %s's sb, not importing!\n",
2296 bdevname(rdev
->bdev
,b
));
2301 INIT_LIST_HEAD(&rdev
->same_set
);
2302 init_waitqueue_head(&rdev
->blocked_wait
);
2307 if (rdev
->sb_page
) {
2313 return ERR_PTR(err
);
2317 * Check a full RAID array for plausibility
2321 static void analyze_sbs(mddev_t
* mddev
)
2324 struct list_head
*tmp
;
2325 mdk_rdev_t
*rdev
, *freshest
;
2326 char b
[BDEVNAME_SIZE
];
2329 rdev_for_each(rdev
, tmp
, mddev
)
2330 switch (super_types
[mddev
->major_version
].
2331 load_super(rdev
, freshest
, mddev
->minor_version
)) {
2339 "md: fatal superblock inconsistency in %s"
2340 " -- removing from array\n",
2341 bdevname(rdev
->bdev
,b
));
2342 kick_rdev_from_array(rdev
);
2346 super_types
[mddev
->major_version
].
2347 validate_super(mddev
, freshest
);
2350 rdev_for_each(rdev
, tmp
, mddev
) {
2351 if (rdev
!= freshest
)
2352 if (super_types
[mddev
->major_version
].
2353 validate_super(mddev
, rdev
)) {
2354 printk(KERN_WARNING
"md: kicking non-fresh %s"
2356 bdevname(rdev
->bdev
,b
));
2357 kick_rdev_from_array(rdev
);
2360 if (mddev
->level
== LEVEL_MULTIPATH
) {
2361 rdev
->desc_nr
= i
++;
2362 rdev
->raid_disk
= rdev
->desc_nr
;
2363 set_bit(In_sync
, &rdev
->flags
);
2364 } else if (rdev
->raid_disk
>= mddev
->raid_disks
) {
2365 rdev
->raid_disk
= -1;
2366 clear_bit(In_sync
, &rdev
->flags
);
2372 if (mddev
->recovery_cp
!= MaxSector
&&
2374 printk(KERN_ERR
"md: %s: raid array is not clean"
2375 " -- starting background reconstruction\n",
2380 static void md_safemode_timeout(unsigned long data
);
2383 safe_delay_show(mddev_t
*mddev
, char *page
)
2385 int msec
= (mddev
->safemode_delay
*1000)/HZ
;
2386 return sprintf(page
, "%d.%03d\n", msec
/1000, msec
%1000);
2389 safe_delay_store(mddev_t
*mddev
, const char *cbuf
, size_t len
)
2397 /* remove a period, and count digits after it */
2398 if (len
>= sizeof(buf
))
2400 strlcpy(buf
, cbuf
, len
);
2402 for (i
=0; i
<len
; i
++) {
2404 if (isdigit(buf
[i
])) {
2409 } else if (buf
[i
] == '.') {
2414 msec
= simple_strtoul(buf
, &e
, 10);
2415 if (e
== buf
|| (*e
&& *e
!= '\n'))
2417 msec
= (msec
* 1000) / scale
;
2419 mddev
->safemode_delay
= 0;
2421 unsigned long old_delay
= mddev
->safemode_delay
;
2422 mddev
->safemode_delay
= (msec
*HZ
)/1000;
2423 if (mddev
->safemode_delay
== 0)
2424 mddev
->safemode_delay
= 1;
2425 if (mddev
->safemode_delay
< old_delay
)
2426 md_safemode_timeout((unsigned long)mddev
);
2430 static struct md_sysfs_entry md_safe_delay
=
2431 __ATTR(safe_mode_delay
, S_IRUGO
|S_IWUSR
,safe_delay_show
, safe_delay_store
);
2434 level_show(mddev_t
*mddev
, char *page
)
2436 struct mdk_personality
*p
= mddev
->pers
;
2438 return sprintf(page
, "%s\n", p
->name
);
2439 else if (mddev
->clevel
[0])
2440 return sprintf(page
, "%s\n", mddev
->clevel
);
2441 else if (mddev
->level
!= LEVEL_NONE
)
2442 return sprintf(page
, "%d\n", mddev
->level
);
2448 level_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2455 if (len
>= sizeof(mddev
->clevel
))
2457 strncpy(mddev
->clevel
, buf
, len
);
2458 if (mddev
->clevel
[len
-1] == '\n')
2460 mddev
->clevel
[len
] = 0;
2461 mddev
->level
= LEVEL_NONE
;
2465 static struct md_sysfs_entry md_level
=
2466 __ATTR(level
, S_IRUGO
|S_IWUSR
, level_show
, level_store
);
2470 layout_show(mddev_t
*mddev
, char *page
)
2472 /* just a number, not meaningful for all levels */
2473 if (mddev
->reshape_position
!= MaxSector
&&
2474 mddev
->layout
!= mddev
->new_layout
)
2475 return sprintf(page
, "%d (%d)\n",
2476 mddev
->new_layout
, mddev
->layout
);
2477 return sprintf(page
, "%d\n", mddev
->layout
);
2481 layout_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2484 unsigned long n
= simple_strtoul(buf
, &e
, 10);
2486 if (!*buf
|| (*e
&& *e
!= '\n'))
2491 if (mddev
->reshape_position
!= MaxSector
)
2492 mddev
->new_layout
= n
;
2497 static struct md_sysfs_entry md_layout
=
2498 __ATTR(layout
, S_IRUGO
|S_IWUSR
, layout_show
, layout_store
);
2502 raid_disks_show(mddev_t
*mddev
, char *page
)
2504 if (mddev
->raid_disks
== 0)
2506 if (mddev
->reshape_position
!= MaxSector
&&
2507 mddev
->delta_disks
!= 0)
2508 return sprintf(page
, "%d (%d)\n", mddev
->raid_disks
,
2509 mddev
->raid_disks
- mddev
->delta_disks
);
2510 return sprintf(page
, "%d\n", mddev
->raid_disks
);
2513 static int update_raid_disks(mddev_t
*mddev
, int raid_disks
);
2516 raid_disks_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2520 unsigned long n
= simple_strtoul(buf
, &e
, 10);
2522 if (!*buf
|| (*e
&& *e
!= '\n'))
2526 rv
= update_raid_disks(mddev
, n
);
2527 else if (mddev
->reshape_position
!= MaxSector
) {
2528 int olddisks
= mddev
->raid_disks
- mddev
->delta_disks
;
2529 mddev
->delta_disks
= n
- olddisks
;
2530 mddev
->raid_disks
= n
;
2532 mddev
->raid_disks
= n
;
2533 return rv
? rv
: len
;
2535 static struct md_sysfs_entry md_raid_disks
=
2536 __ATTR(raid_disks
, S_IRUGO
|S_IWUSR
, raid_disks_show
, raid_disks_store
);
2539 chunk_size_show(mddev_t
*mddev
, char *page
)
2541 if (mddev
->reshape_position
!= MaxSector
&&
2542 mddev
->chunk_size
!= mddev
->new_chunk
)
2543 return sprintf(page
, "%d (%d)\n", mddev
->new_chunk
,
2545 return sprintf(page
, "%d\n", mddev
->chunk_size
);
2549 chunk_size_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2551 /* can only set chunk_size if array is not yet active */
2553 unsigned long n
= simple_strtoul(buf
, &e
, 10);
2555 if (!*buf
|| (*e
&& *e
!= '\n'))
2560 else if (mddev
->reshape_position
!= MaxSector
)
2561 mddev
->new_chunk
= n
;
2563 mddev
->chunk_size
= n
;
2566 static struct md_sysfs_entry md_chunk_size
=
2567 __ATTR(chunk_size
, S_IRUGO
|S_IWUSR
, chunk_size_show
, chunk_size_store
);
2570 resync_start_show(mddev_t
*mddev
, char *page
)
2572 return sprintf(page
, "%llu\n", (unsigned long long)mddev
->recovery_cp
);
2576 resync_start_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2579 unsigned long long n
= simple_strtoull(buf
, &e
, 10);
2583 if (!*buf
|| (*e
&& *e
!= '\n'))
2586 mddev
->recovery_cp
= n
;
2589 static struct md_sysfs_entry md_resync_start
=
2590 __ATTR(resync_start
, S_IRUGO
|S_IWUSR
, resync_start_show
, resync_start_store
);
2593 * The array state can be:
2596 * No devices, no size, no level
2597 * Equivalent to STOP_ARRAY ioctl
2599 * May have some settings, but array is not active
2600 * all IO results in error
2601 * When written, doesn't tear down array, but just stops it
2602 * suspended (not supported yet)
2603 * All IO requests will block. The array can be reconfigured.
2604 * Writing this, if accepted, will block until array is quiescent
2606 * no resync can happen. no superblocks get written.
2607 * write requests fail
2609 * like readonly, but behaves like 'clean' on a write request.
2611 * clean - no pending writes, but otherwise active.
2612 * When written to inactive array, starts without resync
2613 * If a write request arrives then
2614 * if metadata is known, mark 'dirty' and switch to 'active'.
2615 * if not known, block and switch to write-pending
2616 * If written to an active array that has pending writes, then fails.
2618 * fully active: IO and resync can be happening.
2619 * When written to inactive array, starts with resync
2622 * clean, but writes are blocked waiting for 'active' to be written.
2625 * like active, but no writes have been seen for a while (100msec).
2628 enum array_state
{ clear
, inactive
, suspended
, readonly
, read_auto
, clean
, active
,
2629 write_pending
, active_idle
, bad_word
};
2630 static char *array_states
[] = {
2631 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
2632 "write-pending", "active-idle", NULL
};
2634 static int match_word(const char *word
, char **list
)
2637 for (n
=0; list
[n
]; n
++)
2638 if (cmd_match(word
, list
[n
]))
2644 array_state_show(mddev_t
*mddev
, char *page
)
2646 enum array_state st
= inactive
;
2659 else if (test_bit(MD_CHANGE_CLEAN
, &mddev
->flags
))
2661 else if (mddev
->safemode
)
2667 if (list_empty(&mddev
->disks
) &&
2668 mddev
->raid_disks
== 0 &&
2674 return sprintf(page
, "%s\n", array_states
[st
]);
2677 static int do_md_stop(mddev_t
* mddev
, int ro
, int is_open
);
2678 static int do_md_run(mddev_t
* mddev
);
2679 static int restart_array(mddev_t
*mddev
);
2682 array_state_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2685 enum array_state st
= match_word(buf
, array_states
);
2690 /* stopping an active array */
2691 if (atomic_read(&mddev
->openers
) > 0)
2693 err
= do_md_stop(mddev
, 0, 0);
2696 /* stopping an active array */
2698 if (atomic_read(&mddev
->openers
) > 0)
2700 err
= do_md_stop(mddev
, 2, 0);
2702 err
= 0; /* already inactive */
2705 break; /* not supported yet */
2708 err
= do_md_stop(mddev
, 1, 0);
2711 set_disk_ro(mddev
->gendisk
, 1);
2712 err
= do_md_run(mddev
);
2718 err
= do_md_stop(mddev
, 1, 0);
2719 else if (mddev
->ro
== 1)
2720 err
= restart_array(mddev
);
2723 set_disk_ro(mddev
->gendisk
, 0);
2727 err
= do_md_run(mddev
);
2732 restart_array(mddev
);
2733 spin_lock_irq(&mddev
->write_lock
);
2734 if (atomic_read(&mddev
->writes_pending
) == 0) {
2735 if (mddev
->in_sync
== 0) {
2737 if (mddev
->safemode
== 1)
2738 mddev
->safemode
= 0;
2739 if (mddev
->persistent
)
2740 set_bit(MD_CHANGE_CLEAN
,
2746 spin_unlock_irq(&mddev
->write_lock
);
2749 mddev
->recovery_cp
= MaxSector
;
2750 err
= do_md_run(mddev
);
2755 restart_array(mddev
);
2756 if (mddev
->external
)
2757 clear_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
2758 wake_up(&mddev
->sb_wait
);
2762 set_disk_ro(mddev
->gendisk
, 0);
2763 err
= do_md_run(mddev
);
2768 /* these cannot be set */
2774 sysfs_notify(&mddev
->kobj
, NULL
, "array_state");
2778 static struct md_sysfs_entry md_array_state
=
2779 __ATTR(array_state
, S_IRUGO
|S_IWUSR
, array_state_show
, array_state_store
);
2782 null_show(mddev_t
*mddev
, char *page
)
2788 new_dev_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2790 /* buf must be %d:%d\n? giving major and minor numbers */
2791 /* The new device is added to the array.
2792 * If the array has a persistent superblock, we read the
2793 * superblock to initialise info and check validity.
2794 * Otherwise, only checking done is that in bind_rdev_to_array,
2795 * which mainly checks size.
2798 int major
= simple_strtoul(buf
, &e
, 10);
2804 if (!*buf
|| *e
!= ':' || !e
[1] || e
[1] == '\n')
2806 minor
= simple_strtoul(e
+1, &e
, 10);
2807 if (*e
&& *e
!= '\n')
2809 dev
= MKDEV(major
, minor
);
2810 if (major
!= MAJOR(dev
) ||
2811 minor
!= MINOR(dev
))
2815 if (mddev
->persistent
) {
2816 rdev
= md_import_device(dev
, mddev
->major_version
,
2817 mddev
->minor_version
);
2818 if (!IS_ERR(rdev
) && !list_empty(&mddev
->disks
)) {
2819 mdk_rdev_t
*rdev0
= list_entry(mddev
->disks
.next
,
2820 mdk_rdev_t
, same_set
);
2821 err
= super_types
[mddev
->major_version
]
2822 .load_super(rdev
, rdev0
, mddev
->minor_version
);
2826 } else if (mddev
->external
)
2827 rdev
= md_import_device(dev
, -2, -1);
2829 rdev
= md_import_device(dev
, -1, -1);
2832 return PTR_ERR(rdev
);
2833 err
= bind_rdev_to_array(rdev
, mddev
);
2837 return err
? err
: len
;
2840 static struct md_sysfs_entry md_new_device
=
2841 __ATTR(new_dev
, S_IWUSR
, null_show
, new_dev_store
);
2844 bitmap_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2847 unsigned long chunk
, end_chunk
;
2851 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
2853 chunk
= end_chunk
= simple_strtoul(buf
, &end
, 0);
2854 if (buf
== end
) break;
2855 if (*end
== '-') { /* range */
2857 end_chunk
= simple_strtoul(buf
, &end
, 0);
2858 if (buf
== end
) break;
2860 if (*end
&& !isspace(*end
)) break;
2861 bitmap_dirty_bits(mddev
->bitmap
, chunk
, end_chunk
);
2863 while (isspace(*buf
)) buf
++;
2865 bitmap_unplug(mddev
->bitmap
); /* flush the bits to disk */
2870 static struct md_sysfs_entry md_bitmap
=
2871 __ATTR(bitmap_set_bits
, S_IWUSR
, null_show
, bitmap_store
);
2874 size_show(mddev_t
*mddev
, char *page
)
2876 return sprintf(page
, "%llu\n", (unsigned long long)mddev
->size
);
2879 static int update_size(mddev_t
*mddev
, sector_t num_sectors
);
2882 size_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2884 /* If array is inactive, we can reduce the component size, but
2885 * not increase it (except from 0).
2886 * If array is active, we can try an on-line resize
2890 unsigned long long size
= simple_strtoull(buf
, &e
, 10);
2891 if (!*buf
|| *buf
== '\n' ||
2896 err
= update_size(mddev
, size
* 2);
2897 md_update_sb(mddev
, 1);
2899 if (mddev
->size
== 0 ||
2905 return err
? err
: len
;
2908 static struct md_sysfs_entry md_size
=
2909 __ATTR(component_size
, S_IRUGO
|S_IWUSR
, size_show
, size_store
);
2914 * 'none' for arrays with no metadata (good luck...)
2915 * 'external' for arrays with externally managed metadata,
2916 * or N.M for internally known formats
2919 metadata_show(mddev_t
*mddev
, char *page
)
2921 if (mddev
->persistent
)
2922 return sprintf(page
, "%d.%d\n",
2923 mddev
->major_version
, mddev
->minor_version
);
2924 else if (mddev
->external
)
2925 return sprintf(page
, "external:%s\n", mddev
->metadata_type
);
2927 return sprintf(page
, "none\n");
2931 metadata_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2935 /* Changing the details of 'external' metadata is
2936 * always permitted. Otherwise there must be
2937 * no devices attached to the array.
2939 if (mddev
->external
&& strncmp(buf
, "external:", 9) == 0)
2941 else if (!list_empty(&mddev
->disks
))
2944 if (cmd_match(buf
, "none")) {
2945 mddev
->persistent
= 0;
2946 mddev
->external
= 0;
2947 mddev
->major_version
= 0;
2948 mddev
->minor_version
= 90;
2951 if (strncmp(buf
, "external:", 9) == 0) {
2952 size_t namelen
= len
-9;
2953 if (namelen
>= sizeof(mddev
->metadata_type
))
2954 namelen
= sizeof(mddev
->metadata_type
)-1;
2955 strncpy(mddev
->metadata_type
, buf
+9, namelen
);
2956 mddev
->metadata_type
[namelen
] = 0;
2957 if (namelen
&& mddev
->metadata_type
[namelen
-1] == '\n')
2958 mddev
->metadata_type
[--namelen
] = 0;
2959 mddev
->persistent
= 0;
2960 mddev
->external
= 1;
2961 mddev
->major_version
= 0;
2962 mddev
->minor_version
= 90;
2965 major
= simple_strtoul(buf
, &e
, 10);
2966 if (e
==buf
|| *e
!= '.')
2969 minor
= simple_strtoul(buf
, &e
, 10);
2970 if (e
==buf
|| (*e
&& *e
!= '\n') )
2972 if (major
>= ARRAY_SIZE(super_types
) || super_types
[major
].name
== NULL
)
2974 mddev
->major_version
= major
;
2975 mddev
->minor_version
= minor
;
2976 mddev
->persistent
= 1;
2977 mddev
->external
= 0;
2981 static struct md_sysfs_entry md_metadata
=
2982 __ATTR(metadata_version
, S_IRUGO
|S_IWUSR
, metadata_show
, metadata_store
);
2985 action_show(mddev_t
*mddev
, char *page
)
2987 char *type
= "idle";
2988 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
) ||
2989 (!mddev
->ro
&& test_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
))) {
2990 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
2992 else if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
2993 if (!test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
))
2995 else if (test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
))
2999 } else if (test_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
))
3002 return sprintf(page
, "%s\n", type
);
3006 action_store(mddev_t
*mddev
, const char *page
, size_t len
)
3008 if (!mddev
->pers
|| !mddev
->pers
->sync_request
)
3011 if (cmd_match(page
, "idle")) {
3012 if (mddev
->sync_thread
) {
3013 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
3014 md_unregister_thread(mddev
->sync_thread
);
3015 mddev
->sync_thread
= NULL
;
3016 mddev
->recovery
= 0;
3018 } else if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
) ||
3019 test_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
))
3021 else if (cmd_match(page
, "resync"))
3022 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
3023 else if (cmd_match(page
, "recover")) {
3024 set_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
);
3025 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
3026 } else if (cmd_match(page
, "reshape")) {
3028 if (mddev
->pers
->start_reshape
== NULL
)
3030 err
= mddev
->pers
->start_reshape(mddev
);
3033 sysfs_notify(&mddev
->kobj
, NULL
, "degraded");
3035 if (cmd_match(page
, "check"))
3036 set_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
3037 else if (!cmd_match(page
, "repair"))
3039 set_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
);
3040 set_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
3042 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
3043 md_wakeup_thread(mddev
->thread
);
3044 sysfs_notify(&mddev
->kobj
, NULL
, "sync_action");
3049 mismatch_cnt_show(mddev_t
*mddev
, char *page
)
3051 return sprintf(page
, "%llu\n",
3052 (unsigned long long) mddev
->resync_mismatches
);
3055 static struct md_sysfs_entry md_scan_mode
=
3056 __ATTR(sync_action
, S_IRUGO
|S_IWUSR
, action_show
, action_store
);
3059 static struct md_sysfs_entry md_mismatches
= __ATTR_RO(mismatch_cnt
);
3062 sync_min_show(mddev_t
*mddev
, char *page
)
3064 return sprintf(page
, "%d (%s)\n", speed_min(mddev
),
3065 mddev
->sync_speed_min
? "local": "system");
3069 sync_min_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3073 if (strncmp(buf
, "system", 6)==0) {
3074 mddev
->sync_speed_min
= 0;
3077 min
= simple_strtoul(buf
, &e
, 10);
3078 if (buf
== e
|| (*e
&& *e
!= '\n') || min
<= 0)
3080 mddev
->sync_speed_min
= min
;
3084 static struct md_sysfs_entry md_sync_min
=
3085 __ATTR(sync_speed_min
, S_IRUGO
|S_IWUSR
, sync_min_show
, sync_min_store
);
3088 sync_max_show(mddev_t
*mddev
, char *page
)
3090 return sprintf(page
, "%d (%s)\n", speed_max(mddev
),
3091 mddev
->sync_speed_max
? "local": "system");
3095 sync_max_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3099 if (strncmp(buf
, "system", 6)==0) {
3100 mddev
->sync_speed_max
= 0;
3103 max
= simple_strtoul(buf
, &e
, 10);
3104 if (buf
== e
|| (*e
&& *e
!= '\n') || max
<= 0)
3106 mddev
->sync_speed_max
= max
;
3110 static struct md_sysfs_entry md_sync_max
=
3111 __ATTR(sync_speed_max
, S_IRUGO
|S_IWUSR
, sync_max_show
, sync_max_store
);
3114 degraded_show(mddev_t
*mddev
, char *page
)
3116 return sprintf(page
, "%d\n", mddev
->degraded
);
3118 static struct md_sysfs_entry md_degraded
= __ATTR_RO(degraded
);
3121 sync_force_parallel_show(mddev_t
*mddev
, char *page
)
3123 return sprintf(page
, "%d\n", mddev
->parallel_resync
);
3127 sync_force_parallel_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3131 if (strict_strtol(buf
, 10, &n
))
3134 if (n
!= 0 && n
!= 1)
3137 mddev
->parallel_resync
= n
;
3139 if (mddev
->sync_thread
)
3140 wake_up(&resync_wait
);
3145 /* force parallel resync, even with shared block devices */
3146 static struct md_sysfs_entry md_sync_force_parallel
=
3147 __ATTR(sync_force_parallel
, S_IRUGO
|S_IWUSR
,
3148 sync_force_parallel_show
, sync_force_parallel_store
);
3151 sync_speed_show(mddev_t
*mddev
, char *page
)
3153 unsigned long resync
, dt
, db
;
3154 resync
= mddev
->curr_mark_cnt
- atomic_read(&mddev
->recovery_active
);
3155 dt
= (jiffies
- mddev
->resync_mark
) / HZ
;
3157 db
= resync
- mddev
->resync_mark_cnt
;
3158 return sprintf(page
, "%lu\n", db
/dt
/2); /* K/sec */
3161 static struct md_sysfs_entry md_sync_speed
= __ATTR_RO(sync_speed
);
3164 sync_completed_show(mddev_t
*mddev
, char *page
)
3166 unsigned long max_blocks
, resync
;
3168 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
))
3169 max_blocks
= mddev
->resync_max_sectors
;
3171 max_blocks
= mddev
->size
<< 1;
3173 resync
= (mddev
->curr_resync
- atomic_read(&mddev
->recovery_active
));
3174 return sprintf(page
, "%lu / %lu\n", resync
, max_blocks
);
3177 static struct md_sysfs_entry md_sync_completed
= __ATTR_RO(sync_completed
);
3180 min_sync_show(mddev_t
*mddev
, char *page
)
3182 return sprintf(page
, "%llu\n",
3183 (unsigned long long)mddev
->resync_min
);
3186 min_sync_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3188 unsigned long long min
;
3189 if (strict_strtoull(buf
, 10, &min
))
3191 if (min
> mddev
->resync_max
)
3193 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
))
3196 /* Must be a multiple of chunk_size */
3197 if (mddev
->chunk_size
) {
3198 if (min
& (sector_t
)((mddev
->chunk_size
>>9)-1))
3201 mddev
->resync_min
= min
;
3206 static struct md_sysfs_entry md_min_sync
=
3207 __ATTR(sync_min
, S_IRUGO
|S_IWUSR
, min_sync_show
, min_sync_store
);
3210 max_sync_show(mddev_t
*mddev
, char *page
)
3212 if (mddev
->resync_max
== MaxSector
)
3213 return sprintf(page
, "max\n");
3215 return sprintf(page
, "%llu\n",
3216 (unsigned long long)mddev
->resync_max
);
3219 max_sync_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3221 if (strncmp(buf
, "max", 3) == 0)
3222 mddev
->resync_max
= MaxSector
;
3224 unsigned long long max
;
3225 if (strict_strtoull(buf
, 10, &max
))
3227 if (max
< mddev
->resync_min
)
3229 if (max
< mddev
->resync_max
&&
3230 test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
))
3233 /* Must be a multiple of chunk_size */
3234 if (mddev
->chunk_size
) {
3235 if (max
& (sector_t
)((mddev
->chunk_size
>>9)-1))
3238 mddev
->resync_max
= max
;
3240 wake_up(&mddev
->recovery_wait
);
3244 static struct md_sysfs_entry md_max_sync
=
3245 __ATTR(sync_max
, S_IRUGO
|S_IWUSR
, max_sync_show
, max_sync_store
);
3248 suspend_lo_show(mddev_t
*mddev
, char *page
)
3250 return sprintf(page
, "%llu\n", (unsigned long long)mddev
->suspend_lo
);
3254 suspend_lo_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3257 unsigned long long new = simple_strtoull(buf
, &e
, 10);
3259 if (mddev
->pers
->quiesce
== NULL
)
3261 if (buf
== e
|| (*e
&& *e
!= '\n'))
3263 if (new >= mddev
->suspend_hi
||
3264 (new > mddev
->suspend_lo
&& new < mddev
->suspend_hi
)) {
3265 mddev
->suspend_lo
= new;
3266 mddev
->pers
->quiesce(mddev
, 2);
3271 static struct md_sysfs_entry md_suspend_lo
=
3272 __ATTR(suspend_lo
, S_IRUGO
|S_IWUSR
, suspend_lo_show
, suspend_lo_store
);
3276 suspend_hi_show(mddev_t
*mddev
, char *page
)
3278 return sprintf(page
, "%llu\n", (unsigned long long)mddev
->suspend_hi
);
3282 suspend_hi_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3285 unsigned long long new = simple_strtoull(buf
, &e
, 10);
3287 if (mddev
->pers
->quiesce
== NULL
)
3289 if (buf
== e
|| (*e
&& *e
!= '\n'))
3291 if ((new <= mddev
->suspend_lo
&& mddev
->suspend_lo
>= mddev
->suspend_hi
) ||
3292 (new > mddev
->suspend_lo
&& new > mddev
->suspend_hi
)) {
3293 mddev
->suspend_hi
= new;
3294 mddev
->pers
->quiesce(mddev
, 1);
3295 mddev
->pers
->quiesce(mddev
, 0);
3300 static struct md_sysfs_entry md_suspend_hi
=
3301 __ATTR(suspend_hi
, S_IRUGO
|S_IWUSR
, suspend_hi_show
, suspend_hi_store
);
3304 reshape_position_show(mddev_t
*mddev
, char *page
)
3306 if (mddev
->reshape_position
!= MaxSector
)
3307 return sprintf(page
, "%llu\n",
3308 (unsigned long long)mddev
->reshape_position
);
3309 strcpy(page
, "none\n");
3314 reshape_position_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3317 unsigned long long new = simple_strtoull(buf
, &e
, 10);
3320 if (buf
== e
|| (*e
&& *e
!= '\n'))
3322 mddev
->reshape_position
= new;
3323 mddev
->delta_disks
= 0;
3324 mddev
->new_level
= mddev
->level
;
3325 mddev
->new_layout
= mddev
->layout
;
3326 mddev
->new_chunk
= mddev
->chunk_size
;
3330 static struct md_sysfs_entry md_reshape_position
=
3331 __ATTR(reshape_position
, S_IRUGO
|S_IWUSR
, reshape_position_show
,
3332 reshape_position_store
);
3335 static struct attribute
*md_default_attrs
[] = {
3338 &md_raid_disks
.attr
,
3339 &md_chunk_size
.attr
,
3341 &md_resync_start
.attr
,
3343 &md_new_device
.attr
,
3344 &md_safe_delay
.attr
,
3345 &md_array_state
.attr
,
3346 &md_reshape_position
.attr
,
3350 static struct attribute
*md_redundancy_attrs
[] = {
3352 &md_mismatches
.attr
,
3355 &md_sync_speed
.attr
,
3356 &md_sync_force_parallel
.attr
,
3357 &md_sync_completed
.attr
,
3360 &md_suspend_lo
.attr
,
3361 &md_suspend_hi
.attr
,
3366 static struct attribute_group md_redundancy_group
= {
3368 .attrs
= md_redundancy_attrs
,
3373 md_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
3375 struct md_sysfs_entry
*entry
= container_of(attr
, struct md_sysfs_entry
, attr
);
3376 mddev_t
*mddev
= container_of(kobj
, struct mddev_s
, kobj
);
3381 rv
= mddev_lock(mddev
);
3383 rv
= entry
->show(mddev
, page
);
3384 mddev_unlock(mddev
);
3390 md_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
3391 const char *page
, size_t length
)
3393 struct md_sysfs_entry
*entry
= container_of(attr
, struct md_sysfs_entry
, attr
);
3394 mddev_t
*mddev
= container_of(kobj
, struct mddev_s
, kobj
);
3399 if (!capable(CAP_SYS_ADMIN
))
3401 rv
= mddev_lock(mddev
);
3403 rv
= entry
->store(mddev
, page
, length
);
3404 mddev_unlock(mddev
);
3409 static void md_free(struct kobject
*ko
)
3411 mddev_t
*mddev
= container_of(ko
, mddev_t
, kobj
);
3415 static struct sysfs_ops md_sysfs_ops
= {
3416 .show
= md_attr_show
,
3417 .store
= md_attr_store
,
3419 static struct kobj_type md_ktype
= {
3421 .sysfs_ops
= &md_sysfs_ops
,
3422 .default_attrs
= md_default_attrs
,
3427 static struct kobject
*md_probe(dev_t dev
, int *part
, void *data
)
3429 static DEFINE_MUTEX(disks_mutex
);
3430 mddev_t
*mddev
= mddev_find(dev
);
3431 struct gendisk
*disk
;
3432 int partitioned
= (MAJOR(dev
) != MD_MAJOR
);
3433 int shift
= partitioned
? MdpMinorShift
: 0;
3434 int unit
= MINOR(dev
) >> shift
;
3440 mutex_lock(&disks_mutex
);
3441 if (mddev
->gendisk
) {
3442 mutex_unlock(&disks_mutex
);
3446 disk
= alloc_disk(1 << shift
);
3448 mutex_unlock(&disks_mutex
);
3452 disk
->major
= MAJOR(dev
);
3453 disk
->first_minor
= unit
<< shift
;
3455 sprintf(disk
->disk_name
, "md_d%d", unit
);
3457 sprintf(disk
->disk_name
, "md%d", unit
);
3458 disk
->fops
= &md_fops
;
3459 disk
->private_data
= mddev
;
3460 disk
->queue
= mddev
->queue
;
3462 mddev
->gendisk
= disk
;
3463 error
= kobject_init_and_add(&mddev
->kobj
, &md_ktype
,
3464 &disk_to_dev(disk
)->kobj
, "%s", "md");
3465 mutex_unlock(&disks_mutex
);
3467 printk(KERN_WARNING
"md: cannot register %s/md - name in use\n",
3470 kobject_uevent(&mddev
->kobj
, KOBJ_ADD
);
3474 static void md_safemode_timeout(unsigned long data
)
3476 mddev_t
*mddev
= (mddev_t
*) data
;
3478 if (!atomic_read(&mddev
->writes_pending
)) {
3479 mddev
->safemode
= 1;
3480 if (mddev
->external
)
3481 set_bit(MD_NOTIFY_ARRAY_STATE
, &mddev
->flags
);
3483 md_wakeup_thread(mddev
->thread
);
3486 static int start_dirty_degraded
;
3488 static int do_md_run(mddev_t
* mddev
)
3492 struct list_head
*tmp
;
3494 struct gendisk
*disk
;
3495 struct mdk_personality
*pers
;
3496 char b
[BDEVNAME_SIZE
];
3498 if (list_empty(&mddev
->disks
))
3499 /* cannot run an array with no devices.. */
3506 * Analyze all RAID superblock(s)
3508 if (!mddev
->raid_disks
) {
3509 if (!mddev
->persistent
)
3514 chunk_size
= mddev
->chunk_size
;
3517 if (chunk_size
> MAX_CHUNK_SIZE
) {
3518 printk(KERN_ERR
"too big chunk_size: %d > %d\n",
3519 chunk_size
, MAX_CHUNK_SIZE
);
3523 * chunk-size has to be a power of 2
3525 if ( (1 << ffz(~chunk_size
)) != chunk_size
) {
3526 printk(KERN_ERR
"chunk_size of %d not valid\n", chunk_size
);
3530 /* devices must have minimum size of one chunk */
3531 rdev_for_each(rdev
, tmp
, mddev
) {
3532 if (test_bit(Faulty
, &rdev
->flags
))
3534 if (rdev
->size
< chunk_size
/ 1024) {
3536 "md: Dev %s smaller than chunk_size:"
3538 bdevname(rdev
->bdev
,b
),
3539 (unsigned long long)rdev
->size
,
3546 if (mddev
->level
!= LEVEL_NONE
)
3547 request_module("md-level-%d", mddev
->level
);
3548 else if (mddev
->clevel
[0])
3549 request_module("md-%s", mddev
->clevel
);
3552 * Drop all container device buffers, from now on
3553 * the only valid external interface is through the md
3556 rdev_for_each(rdev
, tmp
, mddev
) {
3557 if (test_bit(Faulty
, &rdev
->flags
))
3559 sync_blockdev(rdev
->bdev
);
3560 invalidate_bdev(rdev
->bdev
);
3562 /* perform some consistency tests on the device.
3563 * We don't want the data to overlap the metadata,
3564 * Internal Bitmap issues has handled elsewhere.
3566 if (rdev
->data_offset
< rdev
->sb_start
) {
3568 rdev
->data_offset
+ mddev
->size
*2
3570 printk("md: %s: data overlaps metadata\n",
3575 if (rdev
->sb_start
+ rdev
->sb_size
/512
3576 > rdev
->data_offset
) {
3577 printk("md: %s: metadata overlaps data\n",
3582 sysfs_notify(&rdev
->kobj
, NULL
, "state");
3585 md_probe(mddev
->unit
, NULL
, NULL
);
3586 disk
= mddev
->gendisk
;
3590 spin_lock(&pers_lock
);
3591 pers
= find_pers(mddev
->level
, mddev
->clevel
);
3592 if (!pers
|| !try_module_get(pers
->owner
)) {
3593 spin_unlock(&pers_lock
);
3594 if (mddev
->level
!= LEVEL_NONE
)
3595 printk(KERN_WARNING
"md: personality for level %d is not loaded!\n",
3598 printk(KERN_WARNING
"md: personality for level %s is not loaded!\n",
3603 spin_unlock(&pers_lock
);
3604 mddev
->level
= pers
->level
;
3605 strlcpy(mddev
->clevel
, pers
->name
, sizeof(mddev
->clevel
));
3607 if (mddev
->reshape_position
!= MaxSector
&&
3608 pers
->start_reshape
== NULL
) {
3609 /* This personality cannot handle reshaping... */
3611 module_put(pers
->owner
);
3615 if (pers
->sync_request
) {
3616 /* Warn if this is a potentially silly
3619 char b
[BDEVNAME_SIZE
], b2
[BDEVNAME_SIZE
];
3621 struct list_head
*tmp2
;
3623 rdev_for_each(rdev
, tmp
, mddev
) {
3624 rdev_for_each(rdev2
, tmp2
, mddev
) {
3626 rdev
->bdev
->bd_contains
==
3627 rdev2
->bdev
->bd_contains
) {
3629 "%s: WARNING: %s appears to be"
3630 " on the same physical disk as"
3633 bdevname(rdev
->bdev
,b
),
3634 bdevname(rdev2
->bdev
,b2
));
3641 "True protection against single-disk"
3642 " failure might be compromised.\n");
3645 mddev
->recovery
= 0;
3646 mddev
->resync_max_sectors
= mddev
->size
<< 1; /* may be over-ridden by personality */
3647 mddev
->barriers_work
= 1;
3648 mddev
->ok_start_degraded
= start_dirty_degraded
;
3651 mddev
->ro
= 2; /* read-only, but switch on first write */
3653 err
= mddev
->pers
->run(mddev
);
3655 printk(KERN_ERR
"md: pers->run() failed ...\n");
3656 else if (mddev
->pers
->sync_request
) {
3657 err
= bitmap_create(mddev
);
3659 printk(KERN_ERR
"%s: failed to create bitmap (%d)\n",
3660 mdname(mddev
), err
);
3661 mddev
->pers
->stop(mddev
);
3665 module_put(mddev
->pers
->owner
);
3667 bitmap_destroy(mddev
);
3670 if (mddev
->pers
->sync_request
) {
3671 if (sysfs_create_group(&mddev
->kobj
, &md_redundancy_group
))
3673 "md: cannot register extra attributes for %s\n",
3675 } else if (mddev
->ro
== 2) /* auto-readonly not meaningful */
3678 atomic_set(&mddev
->writes_pending
,0);
3679 mddev
->safemode
= 0;
3680 mddev
->safemode_timer
.function
= md_safemode_timeout
;
3681 mddev
->safemode_timer
.data
= (unsigned long) mddev
;
3682 mddev
->safemode_delay
= (200 * HZ
)/1000 +1; /* 200 msec delay */
3685 rdev_for_each(rdev
, tmp
, mddev
)
3686 if (rdev
->raid_disk
>= 0) {
3688 sprintf(nm
, "rd%d", rdev
->raid_disk
);
3689 if (sysfs_create_link(&mddev
->kobj
, &rdev
->kobj
, nm
))
3690 printk("md: cannot register %s for %s\n",
3694 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
3697 md_update_sb(mddev
, 0);
3699 set_capacity(disk
, mddev
->array_sectors
);
3701 /* If we call blk_queue_make_request here, it will
3702 * re-initialise max_sectors etc which may have been
3703 * refined inside -> run. So just set the bits we need to set.
3704 * Most initialisation happended when we called
3705 * blk_queue_make_request(..., md_fail_request)
3708 mddev
->queue
->queuedata
= mddev
;
3709 mddev
->queue
->make_request_fn
= mddev
->pers
->make_request
;
3711 /* If there is a partially-recovered drive we need to
3712 * start recovery here. If we leave it to md_check_recovery,
3713 * it will remove the drives and not do the right thing
3715 if (mddev
->degraded
&& !mddev
->sync_thread
) {
3716 struct list_head
*rtmp
;
3718 rdev_for_each(rdev
, rtmp
, mddev
)
3719 if (rdev
->raid_disk
>= 0 &&
3720 !test_bit(In_sync
, &rdev
->flags
) &&
3721 !test_bit(Faulty
, &rdev
->flags
))
3722 /* complete an interrupted recovery */
3724 if (spares
&& mddev
->pers
->sync_request
) {
3725 mddev
->recovery
= 0;
3726 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
3727 mddev
->sync_thread
= md_register_thread(md_do_sync
,
3730 if (!mddev
->sync_thread
) {
3731 printk(KERN_ERR
"%s: could not start resync"
3734 /* leave the spares where they are, it shouldn't hurt */
3735 mddev
->recovery
= 0;
3739 md_wakeup_thread(mddev
->thread
);
3740 md_wakeup_thread(mddev
->sync_thread
); /* possibly kick off a reshape */
3743 md_new_event(mddev
);
3744 sysfs_notify(&mddev
->kobj
, NULL
, "array_state");
3745 sysfs_notify(&mddev
->kobj
, NULL
, "sync_action");
3746 sysfs_notify(&mddev
->kobj
, NULL
, "degraded");
3747 kobject_uevent(&disk_to_dev(mddev
->gendisk
)->kobj
, KOBJ_CHANGE
);
3751 static int restart_array(mddev_t
*mddev
)
3753 struct gendisk
*disk
= mddev
->gendisk
;
3755 /* Complain if it has no devices */
3756 if (list_empty(&mddev
->disks
))
3762 mddev
->safemode
= 0;
3764 set_disk_ro(disk
, 0);
3765 printk(KERN_INFO
"md: %s switched to read-write mode.\n",
3767 /* Kick recovery or resync if necessary */
3768 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
3769 md_wakeup_thread(mddev
->thread
);
3770 md_wakeup_thread(mddev
->sync_thread
);
3771 sysfs_notify(&mddev
->kobj
, NULL
, "array_state");
3775 /* similar to deny_write_access, but accounts for our holding a reference
3776 * to the file ourselves */
3777 static int deny_bitmap_write_access(struct file
* file
)
3779 struct inode
*inode
= file
->f_mapping
->host
;
3781 spin_lock(&inode
->i_lock
);
3782 if (atomic_read(&inode
->i_writecount
) > 1) {
3783 spin_unlock(&inode
->i_lock
);
3786 atomic_set(&inode
->i_writecount
, -1);
3787 spin_unlock(&inode
->i_lock
);
3792 static void restore_bitmap_write_access(struct file
*file
)
3794 struct inode
*inode
= file
->f_mapping
->host
;
3796 spin_lock(&inode
->i_lock
);
3797 atomic_set(&inode
->i_writecount
, 1);
3798 spin_unlock(&inode
->i_lock
);
3802 * 0 - completely stop and dis-assemble array
3803 * 1 - switch to readonly
3804 * 2 - stop but do not disassemble array
3806 static int do_md_stop(mddev_t
* mddev
, int mode
, int is_open
)
3809 struct gendisk
*disk
= mddev
->gendisk
;
3811 if (atomic_read(&mddev
->openers
) > is_open
) {
3812 printk("md: %s still in use.\n",mdname(mddev
));
3818 if (mddev
->sync_thread
) {
3819 set_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
);
3820 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
3821 md_unregister_thread(mddev
->sync_thread
);
3822 mddev
->sync_thread
= NULL
;
3825 del_timer_sync(&mddev
->safemode_timer
);
3828 case 1: /* readonly */
3834 case 0: /* disassemble */
3836 bitmap_flush(mddev
);
3837 md_super_wait(mddev
);
3839 set_disk_ro(disk
, 0);
3840 blk_queue_make_request(mddev
->queue
, md_fail_request
);
3841 mddev
->pers
->stop(mddev
);
3842 mddev
->queue
->merge_bvec_fn
= NULL
;
3843 mddev
->queue
->unplug_fn
= NULL
;
3844 mddev
->queue
->backing_dev_info
.congested_fn
= NULL
;
3845 if (mddev
->pers
->sync_request
)
3846 sysfs_remove_group(&mddev
->kobj
, &md_redundancy_group
);
3848 module_put(mddev
->pers
->owner
);
3850 /* tell userspace to handle 'inactive' */
3851 sysfs_notify(&mddev
->kobj
, NULL
, "array_state");
3853 set_capacity(disk
, 0);
3859 if (!mddev
->in_sync
|| mddev
->flags
) {
3860 /* mark array as shutdown cleanly */
3862 md_update_sb(mddev
, 1);
3865 set_disk_ro(disk
, 1);
3866 clear_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
);
3870 * Free resources if final stop
3874 struct list_head
*tmp
;
3876 printk(KERN_INFO
"md: %s stopped.\n", mdname(mddev
));
3878 bitmap_destroy(mddev
);
3879 if (mddev
->bitmap_file
) {
3880 restore_bitmap_write_access(mddev
->bitmap_file
);
3881 fput(mddev
->bitmap_file
);
3882 mddev
->bitmap_file
= NULL
;
3884 mddev
->bitmap_offset
= 0;
3886 rdev_for_each(rdev
, tmp
, mddev
)
3887 if (rdev
->raid_disk
>= 0) {
3889 sprintf(nm
, "rd%d", rdev
->raid_disk
);
3890 sysfs_remove_link(&mddev
->kobj
, nm
);
3893 /* make sure all md_delayed_delete calls have finished */
3894 flush_scheduled_work();
3896 export_array(mddev
);
3898 mddev
->array_sectors
= 0;
3900 mddev
->raid_disks
= 0;
3901 mddev
->recovery_cp
= 0;
3902 mddev
->resync_min
= 0;
3903 mddev
->resync_max
= MaxSector
;
3904 mddev
->reshape_position
= MaxSector
;
3905 mddev
->external
= 0;
3906 mddev
->persistent
= 0;
3907 mddev
->level
= LEVEL_NONE
;
3908 mddev
->clevel
[0] = 0;
3911 mddev
->metadata_type
[0] = 0;
3912 mddev
->chunk_size
= 0;
3913 mddev
->ctime
= mddev
->utime
= 0;
3915 mddev
->max_disks
= 0;
3917 mddev
->delta_disks
= 0;
3918 mddev
->new_level
= LEVEL_NONE
;
3919 mddev
->new_layout
= 0;
3920 mddev
->new_chunk
= 0;
3921 mddev
->curr_resync
= 0;
3922 mddev
->resync_mismatches
= 0;
3923 mddev
->suspend_lo
= mddev
->suspend_hi
= 0;
3924 mddev
->sync_speed_min
= mddev
->sync_speed_max
= 0;
3925 mddev
->recovery
= 0;
3928 mddev
->degraded
= 0;
3929 mddev
->barriers_work
= 0;
3930 mddev
->safemode
= 0;
3932 } else if (mddev
->pers
)
3933 printk(KERN_INFO
"md: %s switched to read-only mode.\n",
3936 md_new_event(mddev
);
3937 sysfs_notify(&mddev
->kobj
, NULL
, "array_state");
3943 static void autorun_array(mddev_t
*mddev
)
3946 struct list_head
*tmp
;
3949 if (list_empty(&mddev
->disks
))
3952 printk(KERN_INFO
"md: running: ");
3954 rdev_for_each(rdev
, tmp
, mddev
) {
3955 char b
[BDEVNAME_SIZE
];
3956 printk("<%s>", bdevname(rdev
->bdev
,b
));
3960 err
= do_md_run(mddev
);
3962 printk(KERN_WARNING
"md: do_md_run() returned %d\n", err
);
3963 do_md_stop(mddev
, 0, 0);
3968 * lets try to run arrays based on all disks that have arrived
3969 * until now. (those are in pending_raid_disks)
3971 * the method: pick the first pending disk, collect all disks with
3972 * the same UUID, remove all from the pending list and put them into
3973 * the 'same_array' list. Then order this list based on superblock
3974 * update time (freshest comes first), kick out 'old' disks and
3975 * compare superblocks. If everything's fine then run it.
3977 * If "unit" is allocated, then bump its reference count
3979 static void autorun_devices(int part
)
3981 struct list_head
*tmp
;
3982 mdk_rdev_t
*rdev0
, *rdev
;
3984 char b
[BDEVNAME_SIZE
];
3986 printk(KERN_INFO
"md: autorun ...\n");
3987 while (!list_empty(&pending_raid_disks
)) {
3990 LIST_HEAD(candidates
);
3991 rdev0
= list_entry(pending_raid_disks
.next
,
3992 mdk_rdev_t
, same_set
);
3994 printk(KERN_INFO
"md: considering %s ...\n",
3995 bdevname(rdev0
->bdev
,b
));
3996 INIT_LIST_HEAD(&candidates
);
3997 rdev_for_each_list(rdev
, tmp
, pending_raid_disks
)
3998 if (super_90_load(rdev
, rdev0
, 0) >= 0) {
3999 printk(KERN_INFO
"md: adding %s ...\n",
4000 bdevname(rdev
->bdev
,b
));
4001 list_move(&rdev
->same_set
, &candidates
);
4004 * now we have a set of devices, with all of them having
4005 * mostly sane superblocks. It's time to allocate the
4009 dev
= MKDEV(mdp_major
,
4010 rdev0
->preferred_minor
<< MdpMinorShift
);
4011 unit
= MINOR(dev
) >> MdpMinorShift
;
4013 dev
= MKDEV(MD_MAJOR
, rdev0
->preferred_minor
);
4016 if (rdev0
->preferred_minor
!= unit
) {
4017 printk(KERN_INFO
"md: unit number in %s is bad: %d\n",
4018 bdevname(rdev0
->bdev
, b
), rdev0
->preferred_minor
);
4022 md_probe(dev
, NULL
, NULL
);
4023 mddev
= mddev_find(dev
);
4024 if (!mddev
|| !mddev
->gendisk
) {
4028 "md: cannot allocate memory for md drive.\n");
4031 if (mddev_lock(mddev
))
4032 printk(KERN_WARNING
"md: %s locked, cannot run\n",
4034 else if (mddev
->raid_disks
|| mddev
->major_version
4035 || !list_empty(&mddev
->disks
)) {
4037 "md: %s already running, cannot run %s\n",
4038 mdname(mddev
), bdevname(rdev0
->bdev
,b
));
4039 mddev_unlock(mddev
);
4041 printk(KERN_INFO
"md: created %s\n", mdname(mddev
));
4042 mddev
->persistent
= 1;
4043 rdev_for_each_list(rdev
, tmp
, candidates
) {
4044 list_del_init(&rdev
->same_set
);
4045 if (bind_rdev_to_array(rdev
, mddev
))
4048 autorun_array(mddev
);
4049 mddev_unlock(mddev
);
4051 /* on success, candidates will be empty, on error
4054 rdev_for_each_list(rdev
, tmp
, candidates
) {
4055 list_del_init(&rdev
->same_set
);
4060 printk(KERN_INFO
"md: ... autorun DONE.\n");
4062 #endif /* !MODULE */
4064 static int get_version(void __user
* arg
)
4068 ver
.major
= MD_MAJOR_VERSION
;
4069 ver
.minor
= MD_MINOR_VERSION
;
4070 ver
.patchlevel
= MD_PATCHLEVEL_VERSION
;
4072 if (copy_to_user(arg
, &ver
, sizeof(ver
)))
4078 static int get_array_info(mddev_t
* mddev
, void __user
* arg
)
4080 mdu_array_info_t info
;
4081 int nr
,working
,active
,failed
,spare
;
4083 struct list_head
*tmp
;
4085 nr
=working
=active
=failed
=spare
=0;
4086 rdev_for_each(rdev
, tmp
, mddev
) {
4088 if (test_bit(Faulty
, &rdev
->flags
))
4092 if (test_bit(In_sync
, &rdev
->flags
))
4099 info
.major_version
= mddev
->major_version
;
4100 info
.minor_version
= mddev
->minor_version
;
4101 info
.patch_version
= MD_PATCHLEVEL_VERSION
;
4102 info
.ctime
= mddev
->ctime
;
4103 info
.level
= mddev
->level
;
4104 info
.size
= mddev
->size
;
4105 if (info
.size
!= mddev
->size
) /* overflow */
4108 info
.raid_disks
= mddev
->raid_disks
;
4109 info
.md_minor
= mddev
->md_minor
;
4110 info
.not_persistent
= !mddev
->persistent
;
4112 info
.utime
= mddev
->utime
;
4115 info
.state
= (1<<MD_SB_CLEAN
);
4116 if (mddev
->bitmap
&& mddev
->bitmap_offset
)
4117 info
.state
= (1<<MD_SB_BITMAP_PRESENT
);
4118 info
.active_disks
= active
;
4119 info
.working_disks
= working
;
4120 info
.failed_disks
= failed
;
4121 info
.spare_disks
= spare
;
4123 info
.layout
= mddev
->layout
;
4124 info
.chunk_size
= mddev
->chunk_size
;
4126 if (copy_to_user(arg
, &info
, sizeof(info
)))
4132 static int get_bitmap_file(mddev_t
* mddev
, void __user
* arg
)
4134 mdu_bitmap_file_t
*file
= NULL
; /* too big for stack allocation */
4135 char *ptr
, *buf
= NULL
;
4138 if (md_allow_write(mddev
))
4139 file
= kmalloc(sizeof(*file
), GFP_NOIO
);
4141 file
= kmalloc(sizeof(*file
), GFP_KERNEL
);
4146 /* bitmap disabled, zero the first byte and copy out */
4147 if (!mddev
->bitmap
|| !mddev
->bitmap
->file
) {
4148 file
->pathname
[0] = '\0';
4152 buf
= kmalloc(sizeof(file
->pathname
), GFP_KERNEL
);
4156 ptr
= d_path(&mddev
->bitmap
->file
->f_path
, buf
, sizeof(file
->pathname
));
4160 strcpy(file
->pathname
, ptr
);
4164 if (copy_to_user(arg
, file
, sizeof(*file
)))
4172 static int get_disk_info(mddev_t
* mddev
, void __user
* arg
)
4174 mdu_disk_info_t info
;
4177 if (copy_from_user(&info
, arg
, sizeof(info
)))
4180 rdev
= find_rdev_nr(mddev
, info
.number
);
4182 info
.major
= MAJOR(rdev
->bdev
->bd_dev
);
4183 info
.minor
= MINOR(rdev
->bdev
->bd_dev
);
4184 info
.raid_disk
= rdev
->raid_disk
;
4186 if (test_bit(Faulty
, &rdev
->flags
))
4187 info
.state
|= (1<<MD_DISK_FAULTY
);
4188 else if (test_bit(In_sync
, &rdev
->flags
)) {
4189 info
.state
|= (1<<MD_DISK_ACTIVE
);
4190 info
.state
|= (1<<MD_DISK_SYNC
);
4192 if (test_bit(WriteMostly
, &rdev
->flags
))
4193 info
.state
|= (1<<MD_DISK_WRITEMOSTLY
);
4195 info
.major
= info
.minor
= 0;
4196 info
.raid_disk
= -1;
4197 info
.state
= (1<<MD_DISK_REMOVED
);
4200 if (copy_to_user(arg
, &info
, sizeof(info
)))
4206 static int add_new_disk(mddev_t
* mddev
, mdu_disk_info_t
*info
)
4208 char b
[BDEVNAME_SIZE
], b2
[BDEVNAME_SIZE
];
4210 dev_t dev
= MKDEV(info
->major
,info
->minor
);
4212 if (info
->major
!= MAJOR(dev
) || info
->minor
!= MINOR(dev
))
4215 if (!mddev
->raid_disks
) {
4217 /* expecting a device which has a superblock */
4218 rdev
= md_import_device(dev
, mddev
->major_version
, mddev
->minor_version
);
4221 "md: md_import_device returned %ld\n",
4223 return PTR_ERR(rdev
);
4225 if (!list_empty(&mddev
->disks
)) {
4226 mdk_rdev_t
*rdev0
= list_entry(mddev
->disks
.next
,
4227 mdk_rdev_t
, same_set
);
4228 int err
= super_types
[mddev
->major_version
]
4229 .load_super(rdev
, rdev0
, mddev
->minor_version
);
4232 "md: %s has different UUID to %s\n",
4233 bdevname(rdev
->bdev
,b
),
4234 bdevname(rdev0
->bdev
,b2
));
4239 err
= bind_rdev_to_array(rdev
, mddev
);
4246 * add_new_disk can be used once the array is assembled
4247 * to add "hot spares". They must already have a superblock
4252 if (!mddev
->pers
->hot_add_disk
) {
4254 "%s: personality does not support diskops!\n",
4258 if (mddev
->persistent
)
4259 rdev
= md_import_device(dev
, mddev
->major_version
,
4260 mddev
->minor_version
);
4262 rdev
= md_import_device(dev
, -1, -1);
4265 "md: md_import_device returned %ld\n",
4267 return PTR_ERR(rdev
);
4269 /* set save_raid_disk if appropriate */
4270 if (!mddev
->persistent
) {
4271 if (info
->state
& (1<<MD_DISK_SYNC
) &&
4272 info
->raid_disk
< mddev
->raid_disks
)
4273 rdev
->raid_disk
= info
->raid_disk
;
4275 rdev
->raid_disk
= -1;
4277 super_types
[mddev
->major_version
].
4278 validate_super(mddev
, rdev
);
4279 rdev
->saved_raid_disk
= rdev
->raid_disk
;
4281 clear_bit(In_sync
, &rdev
->flags
); /* just to be sure */
4282 if (info
->state
& (1<<MD_DISK_WRITEMOSTLY
))
4283 set_bit(WriteMostly
, &rdev
->flags
);
4285 rdev
->raid_disk
= -1;
4286 err
= bind_rdev_to_array(rdev
, mddev
);
4287 if (!err
&& !mddev
->pers
->hot_remove_disk
) {
4288 /* If there is hot_add_disk but no hot_remove_disk
4289 * then added disks for geometry changes,
4290 * and should be added immediately.
4292 super_types
[mddev
->major_version
].
4293 validate_super(mddev
, rdev
);
4294 err
= mddev
->pers
->hot_add_disk(mddev
, rdev
);
4296 unbind_rdev_from_array(rdev
);
4301 sysfs_notify(&rdev
->kobj
, NULL
, "state");
4303 md_update_sb(mddev
, 1);
4304 if (mddev
->degraded
)
4305 set_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
);
4306 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
4307 md_wakeup_thread(mddev
->thread
);
4311 /* otherwise, add_new_disk is only allowed
4312 * for major_version==0 superblocks
4314 if (mddev
->major_version
!= 0) {
4315 printk(KERN_WARNING
"%s: ADD_NEW_DISK not supported\n",
4320 if (!(info
->state
& (1<<MD_DISK_FAULTY
))) {
4322 rdev
= md_import_device(dev
, -1, 0);
4325 "md: error, md_import_device() returned %ld\n",
4327 return PTR_ERR(rdev
);
4329 rdev
->desc_nr
= info
->number
;
4330 if (info
->raid_disk
< mddev
->raid_disks
)
4331 rdev
->raid_disk
= info
->raid_disk
;
4333 rdev
->raid_disk
= -1;
4335 if (rdev
->raid_disk
< mddev
->raid_disks
)
4336 if (info
->state
& (1<<MD_DISK_SYNC
))
4337 set_bit(In_sync
, &rdev
->flags
);
4339 if (info
->state
& (1<<MD_DISK_WRITEMOSTLY
))
4340 set_bit(WriteMostly
, &rdev
->flags
);
4342 if (!mddev
->persistent
) {
4343 printk(KERN_INFO
"md: nonpersistent superblock ...\n");
4344 rdev
->sb_start
= rdev
->bdev
->bd_inode
->i_size
/ 512;
4346 rdev
->sb_start
= calc_dev_sboffset(rdev
->bdev
);
4347 rdev
->size
= calc_num_sectors(rdev
, mddev
->chunk_size
) / 2;
4349 err
= bind_rdev_to_array(rdev
, mddev
);
4359 static int hot_remove_disk(mddev_t
* mddev
, dev_t dev
)
4361 char b
[BDEVNAME_SIZE
];
4364 rdev
= find_rdev(mddev
, dev
);
4368 if (rdev
->raid_disk
>= 0)
4371 kick_rdev_from_array(rdev
);
4372 md_update_sb(mddev
, 1);
4373 md_new_event(mddev
);
4377 printk(KERN_WARNING
"md: cannot remove active disk %s from %s ...\n",
4378 bdevname(rdev
->bdev
,b
), mdname(mddev
));
4382 static int hot_add_disk(mddev_t
* mddev
, dev_t dev
)
4384 char b
[BDEVNAME_SIZE
];
4391 if (mddev
->major_version
!= 0) {
4392 printk(KERN_WARNING
"%s: HOT_ADD may only be used with"
4393 " version-0 superblocks.\n",
4397 if (!mddev
->pers
->hot_add_disk
) {
4399 "%s: personality does not support diskops!\n",
4404 rdev
= md_import_device(dev
, -1, 0);
4407 "md: error, md_import_device() returned %ld\n",
4412 if (mddev
->persistent
)
4413 rdev
->sb_start
= calc_dev_sboffset(rdev
->bdev
);
4415 rdev
->sb_start
= rdev
->bdev
->bd_inode
->i_size
/ 512;
4417 rdev
->size
= calc_num_sectors(rdev
, mddev
->chunk_size
) / 2;
4419 if (test_bit(Faulty
, &rdev
->flags
)) {
4421 "md: can not hot-add faulty %s disk to %s!\n",
4422 bdevname(rdev
->bdev
,b
), mdname(mddev
));
4426 clear_bit(In_sync
, &rdev
->flags
);
4428 rdev
->saved_raid_disk
= -1;
4429 err
= bind_rdev_to_array(rdev
, mddev
);
4434 * The rest should better be atomic, we can have disk failures
4435 * noticed in interrupt contexts ...
4438 if (rdev
->desc_nr
== mddev
->max_disks
) {
4439 printk(KERN_WARNING
"%s: can not hot-add to full array!\n",
4442 goto abort_unbind_export
;
4445 rdev
->raid_disk
= -1;
4447 md_update_sb(mddev
, 1);
4450 * Kick recovery, maybe this spare has to be added to the
4451 * array immediately.
4453 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
4454 md_wakeup_thread(mddev
->thread
);
4455 md_new_event(mddev
);
4458 abort_unbind_export
:
4459 unbind_rdev_from_array(rdev
);
4466 static int set_bitmap_file(mddev_t
*mddev
, int fd
)
4471 if (!mddev
->pers
->quiesce
)
4473 if (mddev
->recovery
|| mddev
->sync_thread
)
4475 /* we should be able to change the bitmap.. */
4481 return -EEXIST
; /* cannot add when bitmap is present */
4482 mddev
->bitmap_file
= fget(fd
);
4484 if (mddev
->bitmap_file
== NULL
) {
4485 printk(KERN_ERR
"%s: error: failed to get bitmap file\n",
4490 err
= deny_bitmap_write_access(mddev
->bitmap_file
);
4492 printk(KERN_ERR
"%s: error: bitmap file is already in use\n",
4494 fput(mddev
->bitmap_file
);
4495 mddev
->bitmap_file
= NULL
;
4498 mddev
->bitmap_offset
= 0; /* file overrides offset */
4499 } else if (mddev
->bitmap
== NULL
)
4500 return -ENOENT
; /* cannot remove what isn't there */
4503 mddev
->pers
->quiesce(mddev
, 1);
4505 err
= bitmap_create(mddev
);
4506 if (fd
< 0 || err
) {
4507 bitmap_destroy(mddev
);
4508 fd
= -1; /* make sure to put the file */
4510 mddev
->pers
->quiesce(mddev
, 0);
4513 if (mddev
->bitmap_file
) {
4514 restore_bitmap_write_access(mddev
->bitmap_file
);
4515 fput(mddev
->bitmap_file
);
4517 mddev
->bitmap_file
= NULL
;
4524 * set_array_info is used two different ways
4525 * The original usage is when creating a new array.
4526 * In this usage, raid_disks is > 0 and it together with
4527 * level, size, not_persistent,layout,chunksize determine the
4528 * shape of the array.
4529 * This will always create an array with a type-0.90.0 superblock.
4530 * The newer usage is when assembling an array.
4531 * In this case raid_disks will be 0, and the major_version field is
4532 * use to determine which style super-blocks are to be found on the devices.
4533 * The minor and patch _version numbers are also kept incase the
4534 * super_block handler wishes to interpret them.
4536 static int set_array_info(mddev_t
* mddev
, mdu_array_info_t
*info
)
4539 if (info
->raid_disks
== 0) {
4540 /* just setting version number for superblock loading */
4541 if (info
->major_version
< 0 ||
4542 info
->major_version
>= ARRAY_SIZE(super_types
) ||
4543 super_types
[info
->major_version
].name
== NULL
) {
4544 /* maybe try to auto-load a module? */
4546 "md: superblock version %d not known\n",
4547 info
->major_version
);
4550 mddev
->major_version
= info
->major_version
;
4551 mddev
->minor_version
= info
->minor_version
;
4552 mddev
->patch_version
= info
->patch_version
;
4553 mddev
->persistent
= !info
->not_persistent
;
4556 mddev
->major_version
= MD_MAJOR_VERSION
;
4557 mddev
->minor_version
= MD_MINOR_VERSION
;
4558 mddev
->patch_version
= MD_PATCHLEVEL_VERSION
;
4559 mddev
->ctime
= get_seconds();
4561 mddev
->level
= info
->level
;
4562 mddev
->clevel
[0] = 0;
4563 mddev
->size
= info
->size
;
4564 mddev
->raid_disks
= info
->raid_disks
;
4565 /* don't set md_minor, it is determined by which /dev/md* was
4568 if (info
->state
& (1<<MD_SB_CLEAN
))
4569 mddev
->recovery_cp
= MaxSector
;
4571 mddev
->recovery_cp
= 0;
4572 mddev
->persistent
= ! info
->not_persistent
;
4573 mddev
->external
= 0;
4575 mddev
->layout
= info
->layout
;
4576 mddev
->chunk_size
= info
->chunk_size
;
4578 mddev
->max_disks
= MD_SB_DISKS
;
4580 if (mddev
->persistent
)
4582 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
4584 mddev
->default_bitmap_offset
= MD_SB_BYTES
>> 9;
4585 mddev
->bitmap_offset
= 0;
4587 mddev
->reshape_position
= MaxSector
;
4590 * Generate a 128 bit UUID
4592 get_random_bytes(mddev
->uuid
, 16);
4594 mddev
->new_level
= mddev
->level
;
4595 mddev
->new_chunk
= mddev
->chunk_size
;
4596 mddev
->new_layout
= mddev
->layout
;
4597 mddev
->delta_disks
= 0;
4602 static int update_size(mddev_t
*mddev
, sector_t num_sectors
)
4606 struct list_head
*tmp
;
4607 int fit
= (num_sectors
== 0);
4609 if (mddev
->pers
->resize
== NULL
)
4611 /* The "num_sectors" is the number of sectors of each device that
4612 * is used. This can only make sense for arrays with redundancy.
4613 * linear and raid0 always use whatever space is available. We can only
4614 * consider changing this number if no resync or reconstruction is
4615 * happening, and if the new size is acceptable. It must fit before the
4616 * sb_start or, if that is <data_offset, it must fit before the size
4617 * of each device. If num_sectors is zero, we find the largest size
4621 if (mddev
->sync_thread
)
4624 /* Sorry, cannot grow a bitmap yet, just remove it,
4628 rdev_for_each(rdev
, tmp
, mddev
) {
4630 avail
= rdev
->size
* 2;
4632 if (fit
&& (num_sectors
== 0 || num_sectors
> avail
))
4633 num_sectors
= avail
;
4634 if (avail
< num_sectors
)
4637 rv
= mddev
->pers
->resize(mddev
, num_sectors
);
4639 struct block_device
*bdev
;
4641 bdev
= bdget_disk(mddev
->gendisk
, 0);
4643 mutex_lock(&bdev
->bd_inode
->i_mutex
);
4644 i_size_write(bdev
->bd_inode
,
4645 (loff_t
)mddev
->array_sectors
<< 9);
4646 mutex_unlock(&bdev
->bd_inode
->i_mutex
);
4653 static int update_raid_disks(mddev_t
*mddev
, int raid_disks
)
4656 /* change the number of raid disks */
4657 if (mddev
->pers
->check_reshape
== NULL
)
4659 if (raid_disks
<= 0 ||
4660 raid_disks
>= mddev
->max_disks
)
4662 if (mddev
->sync_thread
|| mddev
->reshape_position
!= MaxSector
)
4664 mddev
->delta_disks
= raid_disks
- mddev
->raid_disks
;
4666 rv
= mddev
->pers
->check_reshape(mddev
);
4672 * update_array_info is used to change the configuration of an
4674 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
4675 * fields in the info are checked against the array.
4676 * Any differences that cannot be handled will cause an error.
4677 * Normally, only one change can be managed at a time.
4679 static int update_array_info(mddev_t
*mddev
, mdu_array_info_t
*info
)
4685 /* calculate expected state,ignoring low bits */
4686 if (mddev
->bitmap
&& mddev
->bitmap_offset
)
4687 state
|= (1 << MD_SB_BITMAP_PRESENT
);
4689 if (mddev
->major_version
!= info
->major_version
||
4690 mddev
->minor_version
!= info
->minor_version
||
4691 /* mddev->patch_version != info->patch_version || */
4692 mddev
->ctime
!= info
->ctime
||
4693 mddev
->level
!= info
->level
||
4694 /* mddev->layout != info->layout || */
4695 !mddev
->persistent
!= info
->not_persistent
||
4696 mddev
->chunk_size
!= info
->chunk_size
||
4697 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
4698 ((state
^info
->state
) & 0xfffffe00)
4701 /* Check there is only one change */
4702 if (info
->size
>= 0 && mddev
->size
!= info
->size
) cnt
++;
4703 if (mddev
->raid_disks
!= info
->raid_disks
) cnt
++;
4704 if (mddev
->layout
!= info
->layout
) cnt
++;
4705 if ((state
^ info
->state
) & (1<<MD_SB_BITMAP_PRESENT
)) cnt
++;
4706 if (cnt
== 0) return 0;
4707 if (cnt
> 1) return -EINVAL
;
4709 if (mddev
->layout
!= info
->layout
) {
4711 * we don't need to do anything at the md level, the
4712 * personality will take care of it all.
4714 if (mddev
->pers
->reconfig
== NULL
)
4717 return mddev
->pers
->reconfig(mddev
, info
->layout
, -1);
4719 if (info
->size
>= 0 && mddev
->size
!= info
->size
)
4720 rv
= update_size(mddev
, (sector_t
)info
->size
* 2);
4722 if (mddev
->raid_disks
!= info
->raid_disks
)
4723 rv
= update_raid_disks(mddev
, info
->raid_disks
);
4725 if ((state
^ info
->state
) & (1<<MD_SB_BITMAP_PRESENT
)) {
4726 if (mddev
->pers
->quiesce
== NULL
)
4728 if (mddev
->recovery
|| mddev
->sync_thread
)
4730 if (info
->state
& (1<<MD_SB_BITMAP_PRESENT
)) {
4731 /* add the bitmap */
4734 if (mddev
->default_bitmap_offset
== 0)
4736 mddev
->bitmap_offset
= mddev
->default_bitmap_offset
;
4737 mddev
->pers
->quiesce(mddev
, 1);
4738 rv
= bitmap_create(mddev
);
4740 bitmap_destroy(mddev
);
4741 mddev
->pers
->quiesce(mddev
, 0);
4743 /* remove the bitmap */
4746 if (mddev
->bitmap
->file
)
4748 mddev
->pers
->quiesce(mddev
, 1);
4749 bitmap_destroy(mddev
);
4750 mddev
->pers
->quiesce(mddev
, 0);
4751 mddev
->bitmap_offset
= 0;
4754 md_update_sb(mddev
, 1);
4758 static int set_disk_faulty(mddev_t
*mddev
, dev_t dev
)
4762 if (mddev
->pers
== NULL
)
4765 rdev
= find_rdev(mddev
, dev
);
4769 md_error(mddev
, rdev
);
4774 * We have a problem here : there is no easy way to give a CHS
4775 * virtual geometry. We currently pretend that we have a 2 heads
4776 * 4 sectors (with a BIG number of cylinders...). This drives
4777 * dosfs just mad... ;-)
4779 static int md_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
4781 mddev_t
*mddev
= bdev
->bd_disk
->private_data
;
4785 geo
->cylinders
= get_capacity(mddev
->gendisk
) / 8;
4789 static int md_ioctl(struct inode
*inode
, struct file
*file
,
4790 unsigned int cmd
, unsigned long arg
)
4793 void __user
*argp
= (void __user
*)arg
;
4794 mddev_t
*mddev
= NULL
;
4796 if (!capable(CAP_SYS_ADMIN
))
4800 * Commands dealing with the RAID driver but not any
4806 err
= get_version(argp
);
4809 case PRINT_RAID_DEBUG
:
4817 autostart_arrays(arg
);
4824 * Commands creating/starting a new array:
4827 mddev
= inode
->i_bdev
->bd_disk
->private_data
;
4834 err
= mddev_lock(mddev
);
4837 "md: ioctl lock interrupted, reason %d, cmd %d\n",
4844 case SET_ARRAY_INFO
:
4846 mdu_array_info_t info
;
4848 memset(&info
, 0, sizeof(info
));
4849 else if (copy_from_user(&info
, argp
, sizeof(info
))) {
4854 err
= update_array_info(mddev
, &info
);
4856 printk(KERN_WARNING
"md: couldn't update"
4857 " array info. %d\n", err
);
4862 if (!list_empty(&mddev
->disks
)) {
4864 "md: array %s already has disks!\n",
4869 if (mddev
->raid_disks
) {
4871 "md: array %s already initialised!\n",
4876 err
= set_array_info(mddev
, &info
);
4878 printk(KERN_WARNING
"md: couldn't set"
4879 " array info. %d\n", err
);
4889 * Commands querying/configuring an existing array:
4891 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
4892 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
4893 if ((!mddev
->raid_disks
&& !mddev
->external
)
4894 && cmd
!= ADD_NEW_DISK
&& cmd
!= STOP_ARRAY
4895 && cmd
!= RUN_ARRAY
&& cmd
!= SET_BITMAP_FILE
4896 && cmd
!= GET_BITMAP_FILE
) {
4902 * Commands even a read-only array can execute:
4906 case GET_ARRAY_INFO
:
4907 err
= get_array_info(mddev
, argp
);
4910 case GET_BITMAP_FILE
:
4911 err
= get_bitmap_file(mddev
, argp
);
4915 err
= get_disk_info(mddev
, argp
);
4918 case RESTART_ARRAY_RW
:
4919 err
= restart_array(mddev
);
4923 err
= do_md_stop(mddev
, 0, 1);
4927 err
= do_md_stop(mddev
, 1, 1);
4933 * The remaining ioctls are changing the state of the
4934 * superblock, so we do not allow them on read-only arrays.
4935 * However non-MD ioctls (e.g. get-size) will still come through
4936 * here and hit the 'default' below, so only disallow
4937 * 'md' ioctls, and switch to rw mode if started auto-readonly.
4939 if (_IOC_TYPE(cmd
) == MD_MAJOR
&& mddev
->ro
&& mddev
->pers
) {
4940 if (mddev
->ro
== 2) {
4942 sysfs_notify(&mddev
->kobj
, NULL
, "array_state");
4943 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
4944 md_wakeup_thread(mddev
->thread
);
4955 mdu_disk_info_t info
;
4956 if (copy_from_user(&info
, argp
, sizeof(info
)))
4959 err
= add_new_disk(mddev
, &info
);
4963 case HOT_REMOVE_DISK
:
4964 err
= hot_remove_disk(mddev
, new_decode_dev(arg
));
4968 err
= hot_add_disk(mddev
, new_decode_dev(arg
));
4971 case SET_DISK_FAULTY
:
4972 err
= set_disk_faulty(mddev
, new_decode_dev(arg
));
4976 err
= do_md_run(mddev
);
4979 case SET_BITMAP_FILE
:
4980 err
= set_bitmap_file(mddev
, (int)arg
);
4990 mddev_unlock(mddev
);
5000 static int md_open(struct inode
*inode
, struct file
*file
)
5003 * Succeed if we can lock the mddev, which confirms that
5004 * it isn't being stopped right now.
5006 mddev_t
*mddev
= inode
->i_bdev
->bd_disk
->private_data
;
5009 if ((err
= mutex_lock_interruptible_nested(&mddev
->reconfig_mutex
, 1)))
5014 atomic_inc(&mddev
->openers
);
5015 mddev_unlock(mddev
);
5017 check_disk_change(inode
->i_bdev
);
5022 static int md_release(struct inode
*inode
, struct file
* file
)
5024 mddev_t
*mddev
= inode
->i_bdev
->bd_disk
->private_data
;
5027 atomic_dec(&mddev
->openers
);
5033 static int md_media_changed(struct gendisk
*disk
)
5035 mddev_t
*mddev
= disk
->private_data
;
5037 return mddev
->changed
;
5040 static int md_revalidate(struct gendisk
*disk
)
5042 mddev_t
*mddev
= disk
->private_data
;
5047 static struct block_device_operations md_fops
=
5049 .owner
= THIS_MODULE
,
5051 .release
= md_release
,
5053 .getgeo
= md_getgeo
,
5054 .media_changed
= md_media_changed
,
5055 .revalidate_disk
= md_revalidate
,
5058 static int md_thread(void * arg
)
5060 mdk_thread_t
*thread
= arg
;
5063 * md_thread is a 'system-thread', it's priority should be very
5064 * high. We avoid resource deadlocks individually in each
5065 * raid personality. (RAID5 does preallocation) We also use RR and
5066 * the very same RT priority as kswapd, thus we will never get
5067 * into a priority inversion deadlock.
5069 * we definitely have to have equal or higher priority than
5070 * bdflush, otherwise bdflush will deadlock if there are too
5071 * many dirty RAID5 blocks.
5074 allow_signal(SIGKILL
);
5075 while (!kthread_should_stop()) {
5077 /* We need to wait INTERRUPTIBLE so that
5078 * we don't add to the load-average.
5079 * That means we need to be sure no signals are
5082 if (signal_pending(current
))
5083 flush_signals(current
);
5085 wait_event_interruptible_timeout
5087 test_bit(THREAD_WAKEUP
, &thread
->flags
)
5088 || kthread_should_stop(),
5091 clear_bit(THREAD_WAKEUP
, &thread
->flags
);
5093 thread
->run(thread
->mddev
);
5099 void md_wakeup_thread(mdk_thread_t
*thread
)
5102 dprintk("md: waking up MD thread %s.\n", thread
->tsk
->comm
);
5103 set_bit(THREAD_WAKEUP
, &thread
->flags
);
5104 wake_up(&thread
->wqueue
);
5108 mdk_thread_t
*md_register_thread(void (*run
) (mddev_t
*), mddev_t
*mddev
,
5111 mdk_thread_t
*thread
;
5113 thread
= kzalloc(sizeof(mdk_thread_t
), GFP_KERNEL
);
5117 init_waitqueue_head(&thread
->wqueue
);
5120 thread
->mddev
= mddev
;
5121 thread
->timeout
= MAX_SCHEDULE_TIMEOUT
;
5122 thread
->tsk
= kthread_run(md_thread
, thread
, name
, mdname(thread
->mddev
));
5123 if (IS_ERR(thread
->tsk
)) {
5130 void md_unregister_thread(mdk_thread_t
*thread
)
5132 dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread
->tsk
));
5134 kthread_stop(thread
->tsk
);
5138 void md_error(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
5145 if (!rdev
|| test_bit(Faulty
, &rdev
->flags
))
5148 if (mddev
->external
)
5149 set_bit(Blocked
, &rdev
->flags
);
5151 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
5153 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
5154 __builtin_return_address(0),__builtin_return_address(1),
5155 __builtin_return_address(2),__builtin_return_address(3));
5159 if (!mddev
->pers
->error_handler
)
5161 mddev
->pers
->error_handler(mddev
,rdev
);
5162 if (mddev
->degraded
)
5163 set_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
);
5164 set_bit(StateChanged
, &rdev
->flags
);
5165 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
5166 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
5167 md_wakeup_thread(mddev
->thread
);
5168 md_new_event_inintr(mddev
);
5171 /* seq_file implementation /proc/mdstat */
5173 static void status_unused(struct seq_file
*seq
)
5177 struct list_head
*tmp
;
5179 seq_printf(seq
, "unused devices: ");
5181 rdev_for_each_list(rdev
, tmp
, pending_raid_disks
) {
5182 char b
[BDEVNAME_SIZE
];
5184 seq_printf(seq
, "%s ",
5185 bdevname(rdev
->bdev
,b
));
5188 seq_printf(seq
, "<none>");
5190 seq_printf(seq
, "\n");
5194 static void status_resync(struct seq_file
*seq
, mddev_t
* mddev
)
5196 sector_t max_blocks
, resync
, res
;
5197 unsigned long dt
, db
, rt
;
5199 unsigned int per_milli
;
5201 resync
= (mddev
->curr_resync
- atomic_read(&mddev
->recovery_active
))/2;
5203 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
))
5204 max_blocks
= mddev
->resync_max_sectors
>> 1;
5206 max_blocks
= mddev
->size
;
5209 * Should not happen.
5215 /* Pick 'scale' such that (resync>>scale)*1000 will fit
5216 * in a sector_t, and (max_blocks>>scale) will fit in a
5217 * u32, as those are the requirements for sector_div.
5218 * Thus 'scale' must be at least 10
5221 if (sizeof(sector_t
) > sizeof(unsigned long)) {
5222 while ( max_blocks
/2 > (1ULL<<(scale
+32)))
5225 res
= (resync
>>scale
)*1000;
5226 sector_div(res
, (u32
)((max_blocks
>>scale
)+1));
5230 int i
, x
= per_milli
/50, y
= 20-x
;
5231 seq_printf(seq
, "[");
5232 for (i
= 0; i
< x
; i
++)
5233 seq_printf(seq
, "=");
5234 seq_printf(seq
, ">");
5235 for (i
= 0; i
< y
; i
++)
5236 seq_printf(seq
, ".");
5237 seq_printf(seq
, "] ");
5239 seq_printf(seq
, " %s =%3u.%u%% (%llu/%llu)",
5240 (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
)?
5242 (test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
)?
5244 (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
) ?
5245 "resync" : "recovery"))),
5246 per_milli
/10, per_milli
% 10,
5247 (unsigned long long) resync
,
5248 (unsigned long long) max_blocks
);
5251 * We do not want to overflow, so the order of operands and
5252 * the * 100 / 100 trick are important. We do a +1 to be
5253 * safe against division by zero. We only estimate anyway.
5255 * dt: time from mark until now
5256 * db: blocks written from mark until now
5257 * rt: remaining time
5259 dt
= ((jiffies
- mddev
->resync_mark
) / HZ
);
5261 db
= (mddev
->curr_mark_cnt
- atomic_read(&mddev
->recovery_active
))
5262 - mddev
->resync_mark_cnt
;
5263 rt
= (dt
* ((unsigned long)(max_blocks
-resync
) / (db
/2/100+1)))/100;
5265 seq_printf(seq
, " finish=%lu.%lumin", rt
/ 60, (rt
% 60)/6);
5267 seq_printf(seq
, " speed=%ldK/sec", db
/2/dt
);
5270 static void *md_seq_start(struct seq_file
*seq
, loff_t
*pos
)
5272 struct list_head
*tmp
;
5282 spin_lock(&all_mddevs_lock
);
5283 list_for_each(tmp
,&all_mddevs
)
5285 mddev
= list_entry(tmp
, mddev_t
, all_mddevs
);
5287 spin_unlock(&all_mddevs_lock
);
5290 spin_unlock(&all_mddevs_lock
);
5292 return (void*)2;/* tail */
5296 static void *md_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
5298 struct list_head
*tmp
;
5299 mddev_t
*next_mddev
, *mddev
= v
;
5305 spin_lock(&all_mddevs_lock
);
5307 tmp
= all_mddevs
.next
;
5309 tmp
= mddev
->all_mddevs
.next
;
5310 if (tmp
!= &all_mddevs
)
5311 next_mddev
= mddev_get(list_entry(tmp
,mddev_t
,all_mddevs
));
5313 next_mddev
= (void*)2;
5316 spin_unlock(&all_mddevs_lock
);
5324 static void md_seq_stop(struct seq_file
*seq
, void *v
)
5328 if (mddev
&& v
!= (void*)1 && v
!= (void*)2)
5332 struct mdstat_info
{
5336 static int md_seq_show(struct seq_file
*seq
, void *v
)
5340 struct list_head
*tmp2
;
5342 struct mdstat_info
*mi
= seq
->private;
5343 struct bitmap
*bitmap
;
5345 if (v
== (void*)1) {
5346 struct mdk_personality
*pers
;
5347 seq_printf(seq
, "Personalities : ");
5348 spin_lock(&pers_lock
);
5349 list_for_each_entry(pers
, &pers_list
, list
)
5350 seq_printf(seq
, "[%s] ", pers
->name
);
5352 spin_unlock(&pers_lock
);
5353 seq_printf(seq
, "\n");
5354 mi
->event
= atomic_read(&md_event_count
);
5357 if (v
== (void*)2) {
5362 if (mddev_lock(mddev
) < 0)
5365 if (mddev
->pers
|| mddev
->raid_disks
|| !list_empty(&mddev
->disks
)) {
5366 seq_printf(seq
, "%s : %sactive", mdname(mddev
),
5367 mddev
->pers
? "" : "in");
5370 seq_printf(seq
, " (read-only)");
5372 seq_printf(seq
, " (auto-read-only)");
5373 seq_printf(seq
, " %s", mddev
->pers
->name
);
5377 rdev_for_each(rdev
, tmp2
, mddev
) {
5378 char b
[BDEVNAME_SIZE
];
5379 seq_printf(seq
, " %s[%d]",
5380 bdevname(rdev
->bdev
,b
), rdev
->desc_nr
);
5381 if (test_bit(WriteMostly
, &rdev
->flags
))
5382 seq_printf(seq
, "(W)");
5383 if (test_bit(Faulty
, &rdev
->flags
)) {
5384 seq_printf(seq
, "(F)");
5386 } else if (rdev
->raid_disk
< 0)
5387 seq_printf(seq
, "(S)"); /* spare */
5391 if (!list_empty(&mddev
->disks
)) {
5393 seq_printf(seq
, "\n %llu blocks",
5394 (unsigned long long)
5395 mddev
->array_sectors
/ 2);
5397 seq_printf(seq
, "\n %llu blocks",
5398 (unsigned long long)size
);
5400 if (mddev
->persistent
) {
5401 if (mddev
->major_version
!= 0 ||
5402 mddev
->minor_version
!= 90) {
5403 seq_printf(seq
," super %d.%d",
5404 mddev
->major_version
,
5405 mddev
->minor_version
);
5407 } else if (mddev
->external
)
5408 seq_printf(seq
, " super external:%s",
5409 mddev
->metadata_type
);
5411 seq_printf(seq
, " super non-persistent");
5414 mddev
->pers
->status(seq
, mddev
);
5415 seq_printf(seq
, "\n ");
5416 if (mddev
->pers
->sync_request
) {
5417 if (mddev
->curr_resync
> 2) {
5418 status_resync(seq
, mddev
);
5419 seq_printf(seq
, "\n ");
5420 } else if (mddev
->curr_resync
== 1 || mddev
->curr_resync
== 2)
5421 seq_printf(seq
, "\tresync=DELAYED\n ");
5422 else if (mddev
->recovery_cp
< MaxSector
)
5423 seq_printf(seq
, "\tresync=PENDING\n ");
5426 seq_printf(seq
, "\n ");
5428 if ((bitmap
= mddev
->bitmap
)) {
5429 unsigned long chunk_kb
;
5430 unsigned long flags
;
5431 spin_lock_irqsave(&bitmap
->lock
, flags
);
5432 chunk_kb
= bitmap
->chunksize
>> 10;
5433 seq_printf(seq
, "bitmap: %lu/%lu pages [%luKB], "
5435 bitmap
->pages
- bitmap
->missing_pages
,
5437 (bitmap
->pages
- bitmap
->missing_pages
)
5438 << (PAGE_SHIFT
- 10),
5439 chunk_kb
? chunk_kb
: bitmap
->chunksize
,
5440 chunk_kb
? "KB" : "B");
5442 seq_printf(seq
, ", file: ");
5443 seq_path(seq
, &bitmap
->file
->f_path
, " \t\n");
5446 seq_printf(seq
, "\n");
5447 spin_unlock_irqrestore(&bitmap
->lock
, flags
);
5450 seq_printf(seq
, "\n");
5452 mddev_unlock(mddev
);
5457 static struct seq_operations md_seq_ops
= {
5458 .start
= md_seq_start
,
5459 .next
= md_seq_next
,
5460 .stop
= md_seq_stop
,
5461 .show
= md_seq_show
,
5464 static int md_seq_open(struct inode
*inode
, struct file
*file
)
5467 struct mdstat_info
*mi
= kmalloc(sizeof(*mi
), GFP_KERNEL
);
5471 error
= seq_open(file
, &md_seq_ops
);
5475 struct seq_file
*p
= file
->private_data
;
5477 mi
->event
= atomic_read(&md_event_count
);
5482 static unsigned int mdstat_poll(struct file
*filp
, poll_table
*wait
)
5484 struct seq_file
*m
= filp
->private_data
;
5485 struct mdstat_info
*mi
= m
->private;
5488 poll_wait(filp
, &md_event_waiters
, wait
);
5490 /* always allow read */
5491 mask
= POLLIN
| POLLRDNORM
;
5493 if (mi
->event
!= atomic_read(&md_event_count
))
5494 mask
|= POLLERR
| POLLPRI
;
5498 static const struct file_operations md_seq_fops
= {
5499 .owner
= THIS_MODULE
,
5500 .open
= md_seq_open
,
5502 .llseek
= seq_lseek
,
5503 .release
= seq_release_private
,
5504 .poll
= mdstat_poll
,
5507 int register_md_personality(struct mdk_personality
*p
)
5509 spin_lock(&pers_lock
);
5510 list_add_tail(&p
->list
, &pers_list
);
5511 printk(KERN_INFO
"md: %s personality registered for level %d\n", p
->name
, p
->level
);
5512 spin_unlock(&pers_lock
);
5516 int unregister_md_personality(struct mdk_personality
*p
)
5518 printk(KERN_INFO
"md: %s personality unregistered\n", p
->name
);
5519 spin_lock(&pers_lock
);
5520 list_del_init(&p
->list
);
5521 spin_unlock(&pers_lock
);
5525 static int is_mddev_idle(mddev_t
*mddev
)
5533 rdev_for_each_rcu(rdev
, mddev
) {
5534 struct gendisk
*disk
= rdev
->bdev
->bd_contains
->bd_disk
;
5535 curr_events
= part_stat_read(&disk
->part0
, sectors
[0]) +
5536 part_stat_read(&disk
->part0
, sectors
[1]) -
5537 atomic_read(&disk
->sync_io
);
5538 /* sync IO will cause sync_io to increase before the disk_stats
5539 * as sync_io is counted when a request starts, and
5540 * disk_stats is counted when it completes.
5541 * So resync activity will cause curr_events to be smaller than
5542 * when there was no such activity.
5543 * non-sync IO will cause disk_stat to increase without
5544 * increasing sync_io so curr_events will (eventually)
5545 * be larger than it was before. Once it becomes
5546 * substantially larger, the test below will cause
5547 * the array to appear non-idle, and resync will slow
5549 * If there is a lot of outstanding resync activity when
5550 * we set last_event to curr_events, then all that activity
5551 * completing might cause the array to appear non-idle
5552 * and resync will be slowed down even though there might
5553 * not have been non-resync activity. This will only
5554 * happen once though. 'last_events' will soon reflect
5555 * the state where there is little or no outstanding
5556 * resync requests, and further resync activity will
5557 * always make curr_events less than last_events.
5560 if (curr_events
- rdev
->last_events
> 4096) {
5561 rdev
->last_events
= curr_events
;
5569 void md_done_sync(mddev_t
*mddev
, int blocks
, int ok
)
5571 /* another "blocks" (512byte) blocks have been synced */
5572 atomic_sub(blocks
, &mddev
->recovery_active
);
5573 wake_up(&mddev
->recovery_wait
);
5575 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
5576 md_wakeup_thread(mddev
->thread
);
5577 // stop recovery, signal do_sync ....
5582 /* md_write_start(mddev, bi)
5583 * If we need to update some array metadata (e.g. 'active' flag
5584 * in superblock) before writing, schedule a superblock update
5585 * and wait for it to complete.
5587 void md_write_start(mddev_t
*mddev
, struct bio
*bi
)
5590 if (bio_data_dir(bi
) != WRITE
)
5593 BUG_ON(mddev
->ro
== 1);
5594 if (mddev
->ro
== 2) {
5595 /* need to switch to read/write */
5597 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
5598 md_wakeup_thread(mddev
->thread
);
5599 md_wakeup_thread(mddev
->sync_thread
);
5602 atomic_inc(&mddev
->writes_pending
);
5603 if (mddev
->safemode
== 1)
5604 mddev
->safemode
= 0;
5605 if (mddev
->in_sync
) {
5606 spin_lock_irq(&mddev
->write_lock
);
5607 if (mddev
->in_sync
) {
5609 set_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
5610 md_wakeup_thread(mddev
->thread
);
5613 spin_unlock_irq(&mddev
->write_lock
);
5616 sysfs_notify(&mddev
->kobj
, NULL
, "array_state");
5617 wait_event(mddev
->sb_wait
,
5618 !test_bit(MD_CHANGE_CLEAN
, &mddev
->flags
) &&
5619 !test_bit(MD_CHANGE_PENDING
, &mddev
->flags
));
5622 void md_write_end(mddev_t
*mddev
)
5624 if (atomic_dec_and_test(&mddev
->writes_pending
)) {
5625 if (mddev
->safemode
== 2)
5626 md_wakeup_thread(mddev
->thread
);
5627 else if (mddev
->safemode_delay
)
5628 mod_timer(&mddev
->safemode_timer
, jiffies
+ mddev
->safemode_delay
);
5632 /* md_allow_write(mddev)
5633 * Calling this ensures that the array is marked 'active' so that writes
5634 * may proceed without blocking. It is important to call this before
5635 * attempting a GFP_KERNEL allocation while holding the mddev lock.
5636 * Must be called with mddev_lock held.
5638 * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
5639 * is dropped, so return -EAGAIN after notifying userspace.
5641 int md_allow_write(mddev_t
*mddev
)
5647 if (!mddev
->pers
->sync_request
)
5650 spin_lock_irq(&mddev
->write_lock
);
5651 if (mddev
->in_sync
) {
5653 set_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
5654 if (mddev
->safemode_delay
&&
5655 mddev
->safemode
== 0)
5656 mddev
->safemode
= 1;
5657 spin_unlock_irq(&mddev
->write_lock
);
5658 md_update_sb(mddev
, 0);
5659 sysfs_notify(&mddev
->kobj
, NULL
, "array_state");
5661 spin_unlock_irq(&mddev
->write_lock
);
5663 if (test_bit(MD_CHANGE_CLEAN
, &mddev
->flags
))
5668 EXPORT_SYMBOL_GPL(md_allow_write
);
5670 #define SYNC_MARKS 10
5671 #define SYNC_MARK_STEP (3*HZ)
5672 void md_do_sync(mddev_t
*mddev
)
5675 unsigned int currspeed
= 0,
5677 sector_t max_sectors
,j
, io_sectors
;
5678 unsigned long mark
[SYNC_MARKS
];
5679 sector_t mark_cnt
[SYNC_MARKS
];
5681 struct list_head
*tmp
;
5682 sector_t last_check
;
5684 struct list_head
*rtmp
;
5688 /* just incase thread restarts... */
5689 if (test_bit(MD_RECOVERY_DONE
, &mddev
->recovery
))
5691 if (mddev
->ro
) /* never try to sync a read-only array */
5694 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
5695 if (test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
))
5696 desc
= "data-check";
5697 else if (test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
))
5698 desc
= "requested-resync";
5701 } else if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
5706 /* we overload curr_resync somewhat here.
5707 * 0 == not engaged in resync at all
5708 * 2 == checking that there is no conflict with another sync
5709 * 1 == like 2, but have yielded to allow conflicting resync to
5711 * other == active in resync - this many blocks
5713 * Before starting a resync we must have set curr_resync to
5714 * 2, and then checked that every "conflicting" array has curr_resync
5715 * less than ours. When we find one that is the same or higher
5716 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
5717 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
5718 * This will mean we have to start checking from the beginning again.
5723 mddev
->curr_resync
= 2;
5726 if (kthread_should_stop()) {
5727 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
5730 for_each_mddev(mddev2
, tmp
) {
5731 if (mddev2
== mddev
)
5733 if (!mddev
->parallel_resync
5734 && mddev2
->curr_resync
5735 && match_mddev_units(mddev
, mddev2
)) {
5737 if (mddev
< mddev2
&& mddev
->curr_resync
== 2) {
5738 /* arbitrarily yield */
5739 mddev
->curr_resync
= 1;
5740 wake_up(&resync_wait
);
5742 if (mddev
> mddev2
&& mddev
->curr_resync
== 1)
5743 /* no need to wait here, we can wait the next
5744 * time 'round when curr_resync == 2
5747 /* We need to wait 'interruptible' so as not to
5748 * contribute to the load average, and not to
5749 * be caught by 'softlockup'
5751 prepare_to_wait(&resync_wait
, &wq
, TASK_INTERRUPTIBLE
);
5752 if (!kthread_should_stop() &&
5753 mddev2
->curr_resync
>= mddev
->curr_resync
) {
5754 printk(KERN_INFO
"md: delaying %s of %s"
5755 " until %s has finished (they"
5756 " share one or more physical units)\n",
5757 desc
, mdname(mddev
), mdname(mddev2
));
5759 if (signal_pending(current
))
5760 flush_signals(current
);
5762 finish_wait(&resync_wait
, &wq
);
5765 finish_wait(&resync_wait
, &wq
);
5768 } while (mddev
->curr_resync
< 2);
5771 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
5772 /* resync follows the size requested by the personality,
5773 * which defaults to physical size, but can be virtual size
5775 max_sectors
= mddev
->resync_max_sectors
;
5776 mddev
->resync_mismatches
= 0;
5777 /* we don't use the checkpoint if there's a bitmap */
5778 if (test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
))
5779 j
= mddev
->resync_min
;
5780 else if (!mddev
->bitmap
)
5781 j
= mddev
->recovery_cp
;
5783 } else if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
5784 max_sectors
= mddev
->size
<< 1;
5786 /* recovery follows the physical size of devices */
5787 max_sectors
= mddev
->size
<< 1;
5789 rdev_for_each(rdev
, rtmp
, mddev
)
5790 if (rdev
->raid_disk
>= 0 &&
5791 !test_bit(Faulty
, &rdev
->flags
) &&
5792 !test_bit(In_sync
, &rdev
->flags
) &&
5793 rdev
->recovery_offset
< j
)
5794 j
= rdev
->recovery_offset
;
5797 printk(KERN_INFO
"md: %s of RAID array %s\n", desc
, mdname(mddev
));
5798 printk(KERN_INFO
"md: minimum _guaranteed_ speed:"
5799 " %d KB/sec/disk.\n", speed_min(mddev
));
5800 printk(KERN_INFO
"md: using maximum available idle IO bandwidth "
5801 "(but not more than %d KB/sec) for %s.\n",
5802 speed_max(mddev
), desc
);
5804 is_mddev_idle(mddev
); /* this also initializes IO event counters */
5807 for (m
= 0; m
< SYNC_MARKS
; m
++) {
5809 mark_cnt
[m
] = io_sectors
;
5812 mddev
->resync_mark
= mark
[last_mark
];
5813 mddev
->resync_mark_cnt
= mark_cnt
[last_mark
];
5816 * Tune reconstruction:
5818 window
= 32*(PAGE_SIZE
/512);
5819 printk(KERN_INFO
"md: using %dk window, over a total of %llu blocks.\n",
5820 window
/2,(unsigned long long) max_sectors
/2);
5822 atomic_set(&mddev
->recovery_active
, 0);
5827 "md: resuming %s of %s from checkpoint.\n",
5828 desc
, mdname(mddev
));
5829 mddev
->curr_resync
= j
;
5832 while (j
< max_sectors
) {
5836 if (j
>= mddev
->resync_max
) {
5837 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
5838 wait_event(mddev
->recovery_wait
,
5839 mddev
->resync_max
> j
5840 || kthread_should_stop());
5842 if (kthread_should_stop())
5844 sectors
= mddev
->pers
->sync_request(mddev
, j
, &skipped
,
5845 currspeed
< speed_min(mddev
));
5847 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
5851 if (!skipped
) { /* actual IO requested */
5852 io_sectors
+= sectors
;
5853 atomic_add(sectors
, &mddev
->recovery_active
);
5857 if (j
>1) mddev
->curr_resync
= j
;
5858 mddev
->curr_mark_cnt
= io_sectors
;
5859 if (last_check
== 0)
5860 /* this is the earliers that rebuilt will be
5861 * visible in /proc/mdstat
5863 md_new_event(mddev
);
5865 if (last_check
+ window
> io_sectors
|| j
== max_sectors
)
5868 last_check
= io_sectors
;
5870 if (test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
))
5874 if (time_after_eq(jiffies
, mark
[last_mark
] + SYNC_MARK_STEP
)) {
5876 int next
= (last_mark
+1) % SYNC_MARKS
;
5878 mddev
->resync_mark
= mark
[next
];
5879 mddev
->resync_mark_cnt
= mark_cnt
[next
];
5880 mark
[next
] = jiffies
;
5881 mark_cnt
[next
] = io_sectors
- atomic_read(&mddev
->recovery_active
);
5886 if (kthread_should_stop())
5891 * this loop exits only if either when we are slower than
5892 * the 'hard' speed limit, or the system was IO-idle for
5894 * the system might be non-idle CPU-wise, but we only care
5895 * about not overloading the IO subsystem. (things like an
5896 * e2fsck being done on the RAID array should execute fast)
5898 blk_unplug(mddev
->queue
);
5901 currspeed
= ((unsigned long)(io_sectors
-mddev
->resync_mark_cnt
))/2
5902 /((jiffies
-mddev
->resync_mark
)/HZ
+1) +1;
5904 if (currspeed
> speed_min(mddev
)) {
5905 if ((currspeed
> speed_max(mddev
)) ||
5906 !is_mddev_idle(mddev
)) {
5912 printk(KERN_INFO
"md: %s: %s done.\n",mdname(mddev
), desc
);
5914 * this also signals 'finished resyncing' to md_stop
5917 blk_unplug(mddev
->queue
);
5919 wait_event(mddev
->recovery_wait
, !atomic_read(&mddev
->recovery_active
));
5921 /* tell personality that we are finished */
5922 mddev
->pers
->sync_request(mddev
, max_sectors
, &skipped
, 1);
5924 if (!test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
) &&
5925 mddev
->curr_resync
> 2) {
5926 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
5927 if (test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
)) {
5928 if (mddev
->curr_resync
>= mddev
->recovery_cp
) {
5930 "md: checkpointing %s of %s.\n",
5931 desc
, mdname(mddev
));
5932 mddev
->recovery_cp
= mddev
->curr_resync
;
5935 mddev
->recovery_cp
= MaxSector
;
5937 if (!test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
))
5938 mddev
->curr_resync
= MaxSector
;
5939 rdev_for_each(rdev
, rtmp
, mddev
)
5940 if (rdev
->raid_disk
>= 0 &&
5941 !test_bit(Faulty
, &rdev
->flags
) &&
5942 !test_bit(In_sync
, &rdev
->flags
) &&
5943 rdev
->recovery_offset
< mddev
->curr_resync
)
5944 rdev
->recovery_offset
= mddev
->curr_resync
;
5947 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
5950 mddev
->curr_resync
= 0;
5951 mddev
->resync_min
= 0;
5952 mddev
->resync_max
= MaxSector
;
5953 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
5954 wake_up(&resync_wait
);
5955 set_bit(MD_RECOVERY_DONE
, &mddev
->recovery
);
5956 md_wakeup_thread(mddev
->thread
);
5961 * got a signal, exit.
5964 "md: md_do_sync() got signal ... exiting\n");
5965 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
5969 EXPORT_SYMBOL_GPL(md_do_sync
);
5972 static int remove_and_add_spares(mddev_t
*mddev
)
5975 struct list_head
*rtmp
;
5978 rdev_for_each(rdev
, rtmp
, mddev
)
5979 if (rdev
->raid_disk
>= 0 &&
5980 !test_bit(Blocked
, &rdev
->flags
) &&
5981 (test_bit(Faulty
, &rdev
->flags
) ||
5982 ! test_bit(In_sync
, &rdev
->flags
)) &&
5983 atomic_read(&rdev
->nr_pending
)==0) {
5984 if (mddev
->pers
->hot_remove_disk(
5985 mddev
, rdev
->raid_disk
)==0) {
5987 sprintf(nm
,"rd%d", rdev
->raid_disk
);
5988 sysfs_remove_link(&mddev
->kobj
, nm
);
5989 rdev
->raid_disk
= -1;
5993 if (mddev
->degraded
&& ! mddev
->ro
) {
5994 rdev_for_each(rdev
, rtmp
, mddev
) {
5995 if (rdev
->raid_disk
>= 0 &&
5996 !test_bit(In_sync
, &rdev
->flags
) &&
5997 !test_bit(Blocked
, &rdev
->flags
))
5999 if (rdev
->raid_disk
< 0
6000 && !test_bit(Faulty
, &rdev
->flags
)) {
6001 rdev
->recovery_offset
= 0;
6003 hot_add_disk(mddev
, rdev
) == 0) {
6005 sprintf(nm
, "rd%d", rdev
->raid_disk
);
6006 if (sysfs_create_link(&mddev
->kobj
,
6009 "md: cannot register "
6013 md_new_event(mddev
);
6022 * This routine is regularly called by all per-raid-array threads to
6023 * deal with generic issues like resync and super-block update.
6024 * Raid personalities that don't have a thread (linear/raid0) do not
6025 * need this as they never do any recovery or update the superblock.
6027 * It does not do any resync itself, but rather "forks" off other threads
6028 * to do that as needed.
6029 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
6030 * "->recovery" and create a thread at ->sync_thread.
6031 * When the thread finishes it sets MD_RECOVERY_DONE
6032 * and wakeups up this thread which will reap the thread and finish up.
6033 * This thread also removes any faulty devices (with nr_pending == 0).
6035 * The overall approach is:
6036 * 1/ if the superblock needs updating, update it.
6037 * 2/ If a recovery thread is running, don't do anything else.
6038 * 3/ If recovery has finished, clean up, possibly marking spares active.
6039 * 4/ If there are any faulty devices, remove them.
6040 * 5/ If array is degraded, try to add spares devices
6041 * 6/ If array has spares or is not in-sync, start a resync thread.
6043 void md_check_recovery(mddev_t
*mddev
)
6046 struct list_head
*rtmp
;
6050 bitmap_daemon_work(mddev
->bitmap
);
6052 if (test_and_clear_bit(MD_NOTIFY_ARRAY_STATE
, &mddev
->flags
))
6053 sysfs_notify(&mddev
->kobj
, NULL
, "array_state");
6058 if (signal_pending(current
)) {
6059 if (mddev
->pers
->sync_request
&& !mddev
->external
) {
6060 printk(KERN_INFO
"md: %s in immediate safe mode\n",
6062 mddev
->safemode
= 2;
6064 flush_signals(current
);
6067 if (mddev
->ro
&& !test_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
))
6070 (mddev
->flags
&& !mddev
->external
) ||
6071 test_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
) ||
6072 test_bit(MD_RECOVERY_DONE
, &mddev
->recovery
) ||
6073 (mddev
->external
== 0 && mddev
->safemode
== 1) ||
6074 (mddev
->safemode
== 2 && ! atomic_read(&mddev
->writes_pending
)
6075 && !mddev
->in_sync
&& mddev
->recovery_cp
== MaxSector
)
6079 if (mddev_trylock(mddev
)) {
6083 /* Only thing we do on a ro array is remove
6086 remove_and_add_spares(mddev
);
6087 clear_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
6091 if (!mddev
->external
) {
6093 spin_lock_irq(&mddev
->write_lock
);
6094 if (mddev
->safemode
&&
6095 !atomic_read(&mddev
->writes_pending
) &&
6097 mddev
->recovery_cp
== MaxSector
) {
6100 if (mddev
->persistent
)
6101 set_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
6103 if (mddev
->safemode
== 1)
6104 mddev
->safemode
= 0;
6105 spin_unlock_irq(&mddev
->write_lock
);
6107 sysfs_notify(&mddev
->kobj
, NULL
, "array_state");
6111 md_update_sb(mddev
, 0);
6113 rdev_for_each(rdev
, rtmp
, mddev
)
6114 if (test_and_clear_bit(StateChanged
, &rdev
->flags
))
6115 sysfs_notify(&rdev
->kobj
, NULL
, "state");
6118 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
) &&
6119 !test_bit(MD_RECOVERY_DONE
, &mddev
->recovery
)) {
6120 /* resync/recovery still happening */
6121 clear_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
6124 if (mddev
->sync_thread
) {
6125 /* resync has finished, collect result */
6126 md_unregister_thread(mddev
->sync_thread
);
6127 mddev
->sync_thread
= NULL
;
6128 if (!test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
) &&
6129 !test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
)) {
6131 /* activate any spares */
6132 if (mddev
->pers
->spare_active(mddev
))
6133 sysfs_notify(&mddev
->kobj
, NULL
,
6136 md_update_sb(mddev
, 1);
6138 /* if array is no-longer degraded, then any saved_raid_disk
6139 * information must be scrapped
6141 if (!mddev
->degraded
)
6142 rdev_for_each(rdev
, rtmp
, mddev
)
6143 rdev
->saved_raid_disk
= -1;
6145 mddev
->recovery
= 0;
6146 /* flag recovery needed just to double check */
6147 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
6148 sysfs_notify(&mddev
->kobj
, NULL
, "sync_action");
6149 md_new_event(mddev
);
6152 /* Set RUNNING before clearing NEEDED to avoid
6153 * any transients in the value of "sync_action".
6155 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
6156 clear_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
6157 /* Clear some bits that don't mean anything, but
6160 clear_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
6161 clear_bit(MD_RECOVERY_DONE
, &mddev
->recovery
);
6163 if (test_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
))
6165 /* no recovery is running.
6166 * remove any failed drives, then
6167 * add spares if possible.
6168 * Spare are also removed and re-added, to allow
6169 * the personality to fail the re-add.
6172 if (mddev
->reshape_position
!= MaxSector
) {
6173 if (mddev
->pers
->check_reshape(mddev
) != 0)
6174 /* Cannot proceed */
6176 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
6177 clear_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
);
6178 } else if ((spares
= remove_and_add_spares(mddev
))) {
6179 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
6180 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
6181 clear_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
);
6182 set_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
);
6183 } else if (mddev
->recovery_cp
< MaxSector
) {
6184 set_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
6185 clear_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
);
6186 } else if (!test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
))
6187 /* nothing to be done ... */
6190 if (mddev
->pers
->sync_request
) {
6191 if (spares
&& mddev
->bitmap
&& ! mddev
->bitmap
->file
) {
6192 /* We are adding a device or devices to an array
6193 * which has the bitmap stored on all devices.
6194 * So make sure all bitmap pages get written
6196 bitmap_write_all(mddev
->bitmap
);
6198 mddev
->sync_thread
= md_register_thread(md_do_sync
,
6201 if (!mddev
->sync_thread
) {
6202 printk(KERN_ERR
"%s: could not start resync"
6205 /* leave the spares where they are, it shouldn't hurt */
6206 mddev
->recovery
= 0;
6208 md_wakeup_thread(mddev
->sync_thread
);
6209 sysfs_notify(&mddev
->kobj
, NULL
, "sync_action");
6210 md_new_event(mddev
);
6213 if (!mddev
->sync_thread
) {
6214 clear_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
6215 if (test_and_clear_bit(MD_RECOVERY_RECOVER
,
6217 sysfs_notify(&mddev
->kobj
, NULL
, "sync_action");
6219 mddev_unlock(mddev
);
6223 void md_wait_for_blocked_rdev(mdk_rdev_t
*rdev
, mddev_t
*mddev
)
6225 sysfs_notify(&rdev
->kobj
, NULL
, "state");
6226 wait_event_timeout(rdev
->blocked_wait
,
6227 !test_bit(Blocked
, &rdev
->flags
),
6228 msecs_to_jiffies(5000));
6229 rdev_dec_pending(rdev
, mddev
);
6231 EXPORT_SYMBOL(md_wait_for_blocked_rdev
);
6233 static int md_notify_reboot(struct notifier_block
*this,
6234 unsigned long code
, void *x
)
6236 struct list_head
*tmp
;
6239 if ((code
== SYS_DOWN
) || (code
== SYS_HALT
) || (code
== SYS_POWER_OFF
)) {
6241 printk(KERN_INFO
"md: stopping all md devices.\n");
6243 for_each_mddev(mddev
, tmp
)
6244 if (mddev_trylock(mddev
)) {
6245 /* Force a switch to readonly even array
6246 * appears to still be in use. Hence
6249 do_md_stop(mddev
, 1, 100);
6250 mddev_unlock(mddev
);
6253 * certain more exotic SCSI devices are known to be
6254 * volatile wrt too early system reboots. While the
6255 * right place to handle this issue is the given
6256 * driver, we do want to have a safe RAID driver ...
6263 static struct notifier_block md_notifier
= {
6264 .notifier_call
= md_notify_reboot
,
6266 .priority
= INT_MAX
, /* before any real devices */
6269 static void md_geninit(void)
6271 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t
));
6273 proc_create("mdstat", S_IRUGO
, NULL
, &md_seq_fops
);
6276 static int __init
md_init(void)
6278 if (register_blkdev(MAJOR_NR
, "md"))
6280 if ((mdp_major
=register_blkdev(0, "mdp"))<=0) {
6281 unregister_blkdev(MAJOR_NR
, "md");
6284 blk_register_region(MKDEV(MAJOR_NR
, 0), 1UL<<MINORBITS
, THIS_MODULE
,
6285 md_probe
, NULL
, NULL
);
6286 blk_register_region(MKDEV(mdp_major
, 0), 1UL<<MINORBITS
, THIS_MODULE
,
6287 md_probe
, NULL
, NULL
);
6289 register_reboot_notifier(&md_notifier
);
6290 raid_table_header
= register_sysctl_table(raid_root_table
);
6300 * Searches all registered partitions for autorun RAID arrays
6304 static LIST_HEAD(all_detected_devices
);
6305 struct detected_devices_node
{
6306 struct list_head list
;
6310 void md_autodetect_dev(dev_t dev
)
6312 struct detected_devices_node
*node_detected_dev
;
6314 node_detected_dev
= kzalloc(sizeof(*node_detected_dev
), GFP_KERNEL
);
6315 if (node_detected_dev
) {
6316 node_detected_dev
->dev
= dev
;
6317 list_add_tail(&node_detected_dev
->list
, &all_detected_devices
);
6319 printk(KERN_CRIT
"md: md_autodetect_dev: kzalloc failed"
6320 ", skipping dev(%d,%d)\n", MAJOR(dev
), MINOR(dev
));
6325 static void autostart_arrays(int part
)
6328 struct detected_devices_node
*node_detected_dev
;
6330 int i_scanned
, i_passed
;
6335 printk(KERN_INFO
"md: Autodetecting RAID arrays.\n");
6337 while (!list_empty(&all_detected_devices
) && i_scanned
< INT_MAX
) {
6339 node_detected_dev
= list_entry(all_detected_devices
.next
,
6340 struct detected_devices_node
, list
);
6341 list_del(&node_detected_dev
->list
);
6342 dev
= node_detected_dev
->dev
;
6343 kfree(node_detected_dev
);
6344 rdev
= md_import_device(dev
,0, 90);
6348 if (test_bit(Faulty
, &rdev
->flags
)) {
6352 set_bit(AutoDetected
, &rdev
->flags
);
6353 list_add(&rdev
->same_set
, &pending_raid_disks
);
6357 printk(KERN_INFO
"md: Scanned %d and added %d devices.\n",
6358 i_scanned
, i_passed
);
6360 autorun_devices(part
);
6363 #endif /* !MODULE */
6365 static __exit
void md_exit(void)
6368 struct list_head
*tmp
;
6370 blk_unregister_region(MKDEV(MAJOR_NR
,0), 1U << MINORBITS
);
6371 blk_unregister_region(MKDEV(mdp_major
,0), 1U << MINORBITS
);
6373 unregister_blkdev(MAJOR_NR
,"md");
6374 unregister_blkdev(mdp_major
, "mdp");
6375 unregister_reboot_notifier(&md_notifier
);
6376 unregister_sysctl_table(raid_table_header
);
6377 remove_proc_entry("mdstat", NULL
);
6378 for_each_mddev(mddev
, tmp
) {
6379 struct gendisk
*disk
= mddev
->gendisk
;
6382 export_array(mddev
);
6385 mddev
->gendisk
= NULL
;
6390 subsys_initcall(md_init
);
6391 module_exit(md_exit
)
6393 static int get_ro(char *buffer
, struct kernel_param
*kp
)
6395 return sprintf(buffer
, "%d", start_readonly
);
6397 static int set_ro(const char *val
, struct kernel_param
*kp
)
6400 int num
= simple_strtoul(val
, &e
, 10);
6401 if (*val
&& (*e
== '\0' || *e
== '\n')) {
6402 start_readonly
= num
;
6408 module_param_call(start_ro
, set_ro
, get_ro
, NULL
, S_IRUSR
|S_IWUSR
);
6409 module_param(start_dirty_degraded
, int, S_IRUGO
|S_IWUSR
);
6412 EXPORT_SYMBOL(register_md_personality
);
6413 EXPORT_SYMBOL(unregister_md_personality
);
6414 EXPORT_SYMBOL(md_error
);
6415 EXPORT_SYMBOL(md_done_sync
);
6416 EXPORT_SYMBOL(md_write_start
);
6417 EXPORT_SYMBOL(md_write_end
);
6418 EXPORT_SYMBOL(md_register_thread
);
6419 EXPORT_SYMBOL(md_unregister_thread
);
6420 EXPORT_SYMBOL(md_wakeup_thread
);
6421 EXPORT_SYMBOL(md_check_recovery
);
6422 MODULE_LICENSE("GPL");
6424 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR
);