Merge with Linux 2.5.48.
[linux-2.6/linux-mips.git] / drivers / md / md.c
blob3c24302d1125f02db58611d2488ac490c341ee11
1 /*
2 md.c : Multiple Devices driver for Linux
3 Copyright (C) 1998, 1999, 2000 Ingo Molnar
5 completely rewritten, based on the MD driver code from Marc Zyngier
7 Changes:
9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
11 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
12 - kmod support by: Cyrus Durgin
13 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
14 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16 - lots of fixes and improvements to the RAID1/RAID5 and generic
17 RAID code (such as request based resynchronization):
19 Neil Brown <neilb@cse.unsw.edu.au>.
21 This program is free software; you can redistribute it and/or modify
22 it under the terms of the GNU General Public License as published by
23 the Free Software Foundation; either version 2, or (at your option)
24 any later version.
26 You should have received a copy of the GNU General Public License
27 (for example /usr/src/linux/COPYING); if not, write to the Free
28 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 #include <linux/module.h>
32 #include <linux/config.h>
33 #include <linux/linkage.h>
34 #include <linux/raid/md.h>
35 #include <linux/sysctl.h>
36 #include <linux/bio.h>
37 #include <linux/devfs_fs_kernel.h>
38 #include <linux/buffer_head.h> /* for invalidate_bdev */
40 #include <linux/init.h>
42 #ifdef CONFIG_KMOD
43 #include <linux/kmod.h>
44 #endif
46 #define __KERNEL_SYSCALLS__
47 #include <linux/unistd.h>
49 #include <asm/unaligned.h>
51 #define MAJOR_NR MD_MAJOR
52 #define MD_DRIVER
53 #define DEVICE_NR(device) (minor(device))
55 #include <linux/blk.h>
57 #define DEBUG 0
58 #define dprintk(x...) ((void)(DEBUG && printk(x)))
61 #ifndef MODULE
62 static void autostart_arrays (void);
63 #endif
65 static mdk_personality_t *pers[MAX_PERSONALITY];
68 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
69 * is 1000 KB/sec, so the extra system load does not show up that much.
70 * Increase it if you want to have more _guaranteed_ speed. Note that
71 * the RAID driver will use the maximum available bandwith if the IO
72 * subsystem is idle. There is also an 'absolute maximum' reconstruction
73 * speed limit - in case reconstruction slows down your system despite
74 * idle IO detection.
76 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
79 static int sysctl_speed_limit_min = 1000;
80 static int sysctl_speed_limit_max = 200000;
82 static struct ctl_table_header *raid_table_header;
84 static ctl_table raid_table[] = {
85 {DEV_RAID_SPEED_LIMIT_MIN, "speed_limit_min",
86 &sysctl_speed_limit_min, sizeof(int), 0644, NULL, &proc_dointvec},
87 {DEV_RAID_SPEED_LIMIT_MAX, "speed_limit_max",
88 &sysctl_speed_limit_max, sizeof(int), 0644, NULL, &proc_dointvec},
89 {0}
92 static ctl_table raid_dir_table[] = {
93 {DEV_RAID, "raid", NULL, 0, 0555, raid_table},
94 {0}
97 static ctl_table raid_root_table[] = {
98 {CTL_DEV, "dev", NULL, 0, 0555, raid_dir_table},
99 {0}
102 static void md_recover_arrays(void);
103 static mdk_thread_t *md_recovery_thread;
105 sector_t md_size[MAX_MD_DEVS];
107 static struct block_device_operations md_fops;
108 static devfs_handle_t devfs_handle;
110 static struct gendisk *disks[MAX_MD_DEVS];
113 * Enables to iterate over all existing md arrays
114 * all_mddevs_lock protects this list as well as mddev_map.
116 static LIST_HEAD(all_mddevs);
117 static spinlock_t all_mddevs_lock = SPIN_LOCK_UNLOCKED;
121 * iterates through all used mddevs in the system.
122 * We take care to grab the all_mddevs_lock whenever navigating
123 * the list, and to always hold a refcount when unlocked.
124 * Any code which breaks out of this loop while own
125 * a reference to the current mddev and must mddev_put it.
127 #define ITERATE_MDDEV(mddev,tmp) \
129 for (({ spin_lock(&all_mddevs_lock); \
130 tmp = all_mddevs.next; \
131 mddev = NULL;}); \
132 ({ if (tmp != &all_mddevs) \
133 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
134 spin_unlock(&all_mddevs_lock); \
135 if (mddev) mddev_put(mddev); \
136 mddev = list_entry(tmp, mddev_t, all_mddevs); \
137 tmp != &all_mddevs;}); \
138 ({ spin_lock(&all_mddevs_lock); \
139 tmp = tmp->next;}) \
142 static mddev_t *mddev_map[MAX_MD_DEVS];
144 static int md_fail_request (request_queue_t *q, struct bio *bio)
146 bio_io_error(bio, bio->bi_size);
147 return 0;
150 static inline mddev_t *mddev_get(mddev_t *mddev)
152 atomic_inc(&mddev->active);
153 return mddev;
156 static void mddev_put(mddev_t *mddev)
158 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
159 return;
160 if (!mddev->raid_disks && list_empty(&mddev->disks)) {
161 list_del(&mddev->all_mddevs);
162 mddev_map[mdidx(mddev)] = NULL;
163 kfree(mddev);
164 MOD_DEC_USE_COUNT;
166 spin_unlock(&all_mddevs_lock);
169 static mddev_t * mddev_find(int unit)
171 mddev_t *mddev, *new = NULL;
173 retry:
174 spin_lock(&all_mddevs_lock);
175 if (mddev_map[unit]) {
176 mddev = mddev_get(mddev_map[unit]);
177 spin_unlock(&all_mddevs_lock);
178 if (new)
179 kfree(new);
180 return mddev;
182 if (new) {
183 mddev_map[unit] = new;
184 list_add(&new->all_mddevs, &all_mddevs);
185 spin_unlock(&all_mddevs_lock);
186 MOD_INC_USE_COUNT;
187 return new;
189 spin_unlock(&all_mddevs_lock);
191 new = (mddev_t *) kmalloc(sizeof(*new), GFP_KERNEL);
192 if (!new)
193 return NULL;
195 memset(new, 0, sizeof(*new));
197 new->__minor = unit;
198 init_MUTEX(&new->reconfig_sem);
199 INIT_LIST_HEAD(&new->disks);
200 INIT_LIST_HEAD(&new->all_mddevs);
201 atomic_set(&new->active, 1);
202 blk_queue_make_request(&new->queue, md_fail_request);
204 goto retry;
207 static inline int mddev_lock(mddev_t * mddev)
209 return down_interruptible(&mddev->reconfig_sem);
212 static inline int mddev_trylock(mddev_t * mddev)
214 return down_trylock(&mddev->reconfig_sem);
217 static inline void mddev_unlock(mddev_t * mddev)
219 up(&mddev->reconfig_sem);
222 mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
224 mdk_rdev_t * rdev;
225 struct list_head *tmp;
227 ITERATE_RDEV(mddev,rdev,tmp) {
228 if (rdev->desc_nr == nr)
229 return rdev;
231 return NULL;
234 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
236 struct list_head *tmp;
237 mdk_rdev_t *rdev;
239 ITERATE_RDEV(mddev,rdev,tmp) {
240 if (rdev->bdev->bd_dev == dev)
241 return rdev;
243 return NULL;
246 static sector_t calc_dev_sboffset(struct block_device *bdev)
248 sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
249 return MD_NEW_SIZE_BLOCKS(size);
252 static sector_t calc_dev_size(struct block_device *bdev, mddev_t *mddev)
254 sector_t size;
256 if (mddev->persistent)
257 size = calc_dev_sboffset(bdev);
258 else
259 size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
260 if (mddev->chunk_size)
261 size &= ~((sector_t)mddev->chunk_size/1024 - 1);
262 return size;
265 static sector_t zoned_raid_size(mddev_t *mddev)
267 sector_t mask;
268 mdk_rdev_t * rdev;
269 struct list_head *tmp;
272 * do size and offset calculations.
274 mask = ~((sector_t)mddev->chunk_size/1024 - 1);
276 ITERATE_RDEV(mddev,rdev,tmp) {
277 rdev->size &= mask;
278 md_size[mdidx(mddev)] += rdev->size;
280 return 0;
284 #define BAD_MAGIC KERN_ERR \
285 "md: invalid raid superblock magic on %s\n"
287 #define BAD_MINOR KERN_ERR \
288 "md: %s: invalid raid minor (%x)\n"
290 #define OUT_OF_MEM KERN_ALERT \
291 "md: out of memory.\n"
293 #define NO_SB KERN_ERR \
294 "md: disabled device %s, could not read superblock.\n"
296 #define BAD_CSUM KERN_WARNING \
297 "md: invalid superblock checksum on %s\n"
299 static int alloc_disk_sb(mdk_rdev_t * rdev)
301 if (rdev->sb_page)
302 MD_BUG();
304 rdev->sb_page = alloc_page(GFP_KERNEL);
305 if (!rdev->sb_page) {
306 printk(OUT_OF_MEM);
307 return -EINVAL;
310 return 0;
313 static void free_disk_sb(mdk_rdev_t * rdev)
315 if (rdev->sb_page) {
316 page_cache_release(rdev->sb_page);
317 rdev->sb_loaded = 0;
318 rdev->sb_page = NULL;
319 rdev->sb_offset = 0;
320 rdev->size = 0;
325 static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
327 if (bio->bi_size)
328 return 1;
330 complete((struct completion*)bio->bi_private);
331 return 0;
334 static int sync_page_io(struct block_device *bdev, sector_t sector, int size,
335 struct page *page, int rw)
337 struct bio bio;
338 struct bio_vec vec;
339 struct completion event;
341 bio_init(&bio);
342 bio.bi_io_vec = &vec;
343 vec.bv_page = page;
344 vec.bv_len = size;
345 vec.bv_offset = 0;
346 bio.bi_vcnt = 1;
347 bio.bi_idx = 0;
348 bio.bi_size = size;
349 bio.bi_bdev = bdev;
350 bio.bi_sector = sector;
351 init_completion(&event);
352 bio.bi_private = &event;
353 bio.bi_end_io = bi_complete;
354 submit_bio(rw, &bio);
355 blk_run_queues();
356 wait_for_completion(&event);
358 return test_bit(BIO_UPTODATE, &bio.bi_flags);
361 static int read_disk_sb(mdk_rdev_t * rdev)
363 sector_t sb_offset;
365 if (!rdev->sb_page) {
366 MD_BUG();
367 return -EINVAL;
369 if (rdev->sb_loaded)
370 return 0;
373 * Calculate the position of the superblock,
374 * it's at the end of the disk.
376 * It also happens to be a multiple of 4Kb.
378 sb_offset = calc_dev_sboffset(rdev->bdev);
379 rdev->sb_offset = sb_offset;
381 if (!sync_page_io(rdev->bdev, sb_offset<<1, MD_SB_BYTES, rdev->sb_page, READ))
382 goto fail;
383 rdev->sb_loaded = 1;
384 return 0;
386 fail:
387 printk(NO_SB,bdev_partition_name(rdev->bdev));
388 return -EINVAL;
391 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
393 if ( (sb1->set_uuid0 == sb2->set_uuid0) &&
394 (sb1->set_uuid1 == sb2->set_uuid1) &&
395 (sb1->set_uuid2 == sb2->set_uuid2) &&
396 (sb1->set_uuid3 == sb2->set_uuid3))
398 return 1;
400 return 0;
404 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
406 int ret;
407 mdp_super_t *tmp1, *tmp2;
409 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
410 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
412 if (!tmp1 || !tmp2) {
413 ret = 0;
414 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n");
415 goto abort;
418 *tmp1 = *sb1;
419 *tmp2 = *sb2;
422 * nr_disks is not constant
424 tmp1->nr_disks = 0;
425 tmp2->nr_disks = 0;
427 if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4))
428 ret = 0;
429 else
430 ret = 1;
432 abort:
433 if (tmp1)
434 kfree(tmp1);
435 if (tmp2)
436 kfree(tmp2);
438 return ret;
441 static unsigned int calc_sb_csum(mdp_super_t * sb)
443 unsigned int disk_csum, csum;
445 disk_csum = sb->sb_csum;
446 sb->sb_csum = 0;
447 csum = csum_partial((void *)sb, MD_SB_BYTES, 0);
448 sb->sb_csum = disk_csum;
449 return csum;
453 * Handle superblock details.
454 * We want to be able to handle multiple superblock formats
455 * so we have a common interface to them all, and an array of
456 * different handlers.
457 * We rely on user-space to write the initial superblock, and support
458 * reading and updating of superblocks.
459 * Interface methods are:
460 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev)
461 * loads and validates a superblock on dev.
462 * if refdev != NULL, compare superblocks on both devices
463 * Return:
464 * 0 - dev has a superblock that is compatible with refdev
465 * 1 - dev has a superblock that is compatible and newer than refdev
466 * so dev should be used as the refdev in future
467 * -EINVAL superblock incompatible or invalid
468 * -othererror e.g. -EIO
470 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
471 * Verify that dev is acceptable into mddev.
472 * The first time, mddev->raid_disks will be 0, and data from
473 * dev should be merged in. Subsequent calls check that dev
474 * is new enough. Return 0 or -EINVAL
476 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
477 * Update the superblock for rdev with data in mddev
478 * This does not write to disc.
482 struct super_type {
483 char *name;
484 struct module *owner;
485 int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev);
486 int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
487 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
491 * load_super for 0.90.0
493 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev)
495 mdp_super_t *sb;
496 int ret;
498 ret = read_disk_sb(rdev);
499 if (ret) return ret;
501 ret = -EINVAL;
503 sb = (mdp_super_t*)page_address(rdev->sb_page);
505 if (sb->md_magic != MD_SB_MAGIC) {
506 printk(BAD_MAGIC, bdev_partition_name(rdev->bdev));
507 goto abort;
510 if (sb->major_version != 0 ||
511 sb->minor_version != 90) {
512 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
513 sb->major_version, sb->minor_version,
514 bdev_partition_name(rdev->bdev));
515 goto abort;
518 if (sb->md_minor >= MAX_MD_DEVS) {
519 printk(BAD_MINOR, bdev_partition_name(rdev->bdev), sb->md_minor);
520 goto abort;
522 if (sb->raid_disks <= 0)
523 goto abort;
525 if (calc_sb_csum(sb) != sb->sb_csum) {
526 printk(BAD_CSUM, bdev_partition_name(rdev->bdev));
527 goto abort;
530 rdev->preferred_minor = sb->md_minor;
532 if (refdev == 0)
533 ret = 1;
534 else {
535 __u64 ev1, ev2;
536 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
537 if (!uuid_equal(refsb, sb)) {
538 printk(KERN_WARNING "md: %s has different UUID to %s\n",
539 bdev_partition_name(rdev->bdev),
540 bdev_partition_name(refdev->bdev));
541 goto abort;
543 if (!sb_equal(refsb, sb)) {
544 printk(KERN_WARNING "md: %s has same UUID but different superblock to %s\n",
545 bdev_partition_name(rdev->bdev),
546 bdev_partition_name(refdev->bdev));
547 goto abort;
549 ev1 = md_event(sb);
550 ev2 = md_event(refsb);
551 if (ev1 > ev2)
552 ret = 1;
553 else
554 ret = 0;
558 abort:
559 return ret;
563 * validate_super for 0.90.0
565 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
567 mdp_disk_t *desc;
568 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
570 if (mddev->raid_disks == 0) {
571 mddev->major_version = sb->major_version;
572 mddev->minor_version = sb->minor_version;
573 mddev->patch_version = sb->patch_version;
574 mddev->persistent = ! sb->not_persistent;
575 mddev->chunk_size = sb->chunk_size;
576 mddev->ctime = sb->ctime;
577 mddev->utime = sb->utime;
578 mddev->level = sb->level;
579 mddev->layout = sb->layout;
580 mddev->raid_disks = sb->raid_disks;
581 mddev->state = sb->state;
582 mddev->size = sb->size;
583 mddev->events = md_event(sb);
585 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
586 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
587 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
588 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
590 mddev->max_disks = MD_SB_DISKS;
591 } else {
592 __u64 ev1;
593 ev1 = md_event(sb);
594 ++ev1;
595 if (ev1 < mddev->events)
596 return -EINVAL;
598 if (mddev->level != LEVEL_MULTIPATH) {
599 rdev->desc_nr = sb->this_disk.number;
600 rdev->raid_disk = -1;
601 rdev->in_sync = rdev->faulty = 0;
602 desc = sb->disks + rdev->desc_nr;
604 if (desc->state & (1<<MD_DISK_FAULTY))
605 rdev->faulty = 1;
606 else if (desc->state & (1<<MD_DISK_SYNC) &&
607 desc->raid_disk < mddev->raid_disks) {
608 rdev->in_sync = 1;
609 rdev->raid_disk = desc->raid_disk;
612 return 0;
616 * sync_super for 0.90.0
618 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
620 mdp_super_t *sb;
621 struct list_head *tmp;
622 mdk_rdev_t *rdev2;
623 int next_spare = mddev->raid_disks;
625 /* make rdev->sb match mddev data..
627 * 1/ zero out disks
628 * 2/ Add info for each disk, keeping track of highest desc_nr
629 * 3/ any empty disks < highest become removed
631 * disks[0] gets initialised to REMOVED because
632 * we cannot be sure from other fields if it has
633 * been initialised or not.
635 int highest = 0;
636 int i;
637 int active=0, working=0,failed=0,spare=0,nr_disks=0;
639 sb = (mdp_super_t*)page_address(rdev->sb_page);
641 memset(sb, 0, sizeof(*sb));
643 sb->md_magic = MD_SB_MAGIC;
644 sb->major_version = mddev->major_version;
645 sb->minor_version = mddev->minor_version;
646 sb->patch_version = mddev->patch_version;
647 sb->gvalid_words = 0; /* ignored */
648 memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
649 memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
650 memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
651 memcpy(&sb->set_uuid3, mddev->uuid+12,4);
653 sb->ctime = mddev->ctime;
654 sb->level = mddev->level;
655 sb->size = mddev->size;
656 sb->raid_disks = mddev->raid_disks;
657 sb->md_minor = mddev->__minor;
658 sb->not_persistent = !mddev->persistent;
659 sb->utime = mddev->utime;
660 sb->state = mddev->state;
661 sb->events_hi = (mddev->events>>32);
662 sb->events_lo = (u32)mddev->events;
664 sb->layout = mddev->layout;
665 sb->chunk_size = mddev->chunk_size;
667 sb->disks[0].state = (1<<MD_DISK_REMOVED);
668 ITERATE_RDEV(mddev,rdev2,tmp) {
669 mdp_disk_t *d;
670 if (rdev2->raid_disk >= 0)
671 rdev2->desc_nr = rdev2->raid_disk;
672 else
673 rdev2->desc_nr = next_spare++;
674 d = &sb->disks[rdev2->desc_nr];
675 nr_disks++;
676 d->number = rdev2->desc_nr;
677 d->major = MAJOR(rdev2->bdev->bd_dev);
678 d->minor = MINOR(rdev2->bdev->bd_dev);
679 if (rdev2->raid_disk >= 0)
680 d->raid_disk = rdev2->raid_disk;
681 else
682 d->raid_disk = rdev2->desc_nr; /* compatability */
683 if (rdev2->faulty) {
684 d->state = (1<<MD_DISK_FAULTY);
685 failed++;
686 } else if (rdev2->in_sync) {
687 d->state = (1<<MD_DISK_ACTIVE);
688 d->state |= (1<<MD_DISK_SYNC);
689 active++;
690 working++;
691 } else {
692 d->state = 0;
693 spare++;
694 working++;
696 if (rdev2->desc_nr > highest)
697 highest = rdev2->desc_nr;
700 /* now set the "removed" bit on any non-trailing holes */
701 for (i=0; i<highest; i++) {
702 mdp_disk_t *d = &sb->disks[i];
703 if (d->state == 0 && d->number == 0) {
704 d->number = i;
705 d->raid_disk = i;
706 d->state = (1<<MD_DISK_REMOVED);
709 sb->nr_disks = nr_disks;
710 sb->active_disks = active;
711 sb->working_disks = working;
712 sb->failed_disks = failed;
713 sb->spare_disks = spare;
715 sb->this_disk = sb->disks[rdev->desc_nr];
716 sb->sb_csum = calc_sb_csum(sb);
719 struct super_type super_types[] = {
720 [0] = {
721 .name = "0.90.0",
722 .owner = THIS_MODULE,
723 .load_super = super_90_load,
724 .validate_super = super_90_validate,
725 .sync_super = super_90_sync,
731 static mdk_rdev_t * match_dev_unit(mddev_t *mddev, mdk_rdev_t *dev)
733 struct list_head *tmp;
734 mdk_rdev_t *rdev;
736 ITERATE_RDEV(mddev,rdev,tmp)
737 if (rdev->bdev->bd_contains == dev->bdev->bd_contains)
738 return rdev;
740 return NULL;
743 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
745 struct list_head *tmp;
746 mdk_rdev_t *rdev;
748 ITERATE_RDEV(mddev1,rdev,tmp)
749 if (match_dev_unit(mddev2, rdev))
750 return 1;
752 return 0;
755 static LIST_HEAD(pending_raid_disks);
757 static void bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
759 mdk_rdev_t *same_pdev;
761 if (rdev->mddev) {
762 MD_BUG();
763 return;
765 same_pdev = match_dev_unit(mddev, rdev);
766 if (same_pdev)
767 printk( KERN_WARNING
768 "md%d: WARNING: %s appears to be on the same physical disk as %s. True\n"
769 " protection against single-disk failure might be compromised.\n",
770 mdidx(mddev), bdev_partition_name(rdev->bdev),
771 bdev_partition_name(same_pdev->bdev));
773 list_add(&rdev->same_set, &mddev->disks);
774 rdev->mddev = mddev;
775 printk(KERN_INFO "md: bind<%s>\n", bdev_partition_name(rdev->bdev));
778 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
780 if (!rdev->mddev) {
781 MD_BUG();
782 return;
784 list_del_init(&rdev->same_set);
785 printk(KERN_INFO "md: unbind<%s>\n", bdev_partition_name(rdev->bdev));
786 rdev->mddev = NULL;
790 * prevent the device from being mounted, repartitioned or
791 * otherwise reused by a RAID array (or any other kernel
792 * subsystem), by opening the device. [simply getting an
793 * inode is not enough, the SCSI module usage code needs
794 * an explicit open() on the device]
796 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
798 int err = 0;
799 struct block_device *bdev;
801 bdev = bdget(dev);
802 if (!bdev)
803 return -ENOMEM;
804 err = blkdev_get(bdev, FMODE_READ|FMODE_WRITE, 0, BDEV_RAW);
805 if (err)
806 return err;
807 err = bd_claim(bdev, rdev);
808 if (err) {
809 blkdev_put(bdev, BDEV_RAW);
810 return err;
812 rdev->bdev = bdev;
813 return err;
816 static void unlock_rdev(mdk_rdev_t *rdev)
818 struct block_device *bdev = rdev->bdev;
819 rdev->bdev = NULL;
820 if (!bdev)
821 MD_BUG();
822 bd_release(bdev);
823 blkdev_put(bdev, BDEV_RAW);
826 void md_autodetect_dev(dev_t dev);
828 static void export_rdev(mdk_rdev_t * rdev)
830 printk(KERN_INFO "md: export_rdev(%s)\n",bdev_partition_name(rdev->bdev));
831 if (rdev->mddev)
832 MD_BUG();
833 free_disk_sb(rdev);
834 list_del_init(&rdev->same_set);
835 #ifndef MODULE
836 md_autodetect_dev(rdev->bdev->bd_dev);
837 #endif
838 unlock_rdev(rdev);
839 kfree(rdev);
842 static void kick_rdev_from_array(mdk_rdev_t * rdev)
844 unbind_rdev_from_array(rdev);
845 export_rdev(rdev);
848 static void export_array(mddev_t *mddev)
850 struct list_head *tmp;
851 mdk_rdev_t *rdev;
853 ITERATE_RDEV(mddev,rdev,tmp) {
854 if (!rdev->mddev) {
855 MD_BUG();
856 continue;
858 kick_rdev_from_array(rdev);
860 if (!list_empty(&mddev->disks))
861 MD_BUG();
862 mddev->raid_disks = 0;
865 #undef BAD_CSUM
866 #undef BAD_MAGIC
867 #undef OUT_OF_MEM
868 #undef NO_SB
870 static void print_desc(mdp_disk_t *desc)
872 printk(" DISK<N:%d,%s(%d,%d),R:%d,S:%d>\n", desc->number,
873 partition_name(MKDEV(desc->major,desc->minor)),
874 desc->major,desc->minor,desc->raid_disk,desc->state);
877 static void print_sb(mdp_super_t *sb)
879 int i;
881 printk(KERN_INFO "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
882 sb->major_version, sb->minor_version, sb->patch_version,
883 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
884 sb->ctime);
885 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n", sb->level,
886 sb->size, sb->nr_disks, sb->raid_disks, sb->md_minor,
887 sb->layout, sb->chunk_size);
888 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d FD:%d SD:%d CSUM:%08x E:%08lx\n",
889 sb->utime, sb->state, sb->active_disks, sb->working_disks,
890 sb->failed_disks, sb->spare_disks,
891 sb->sb_csum, (unsigned long)sb->events_lo);
893 printk(KERN_INFO);
894 for (i = 0; i < MD_SB_DISKS; i++) {
895 mdp_disk_t *desc;
897 desc = sb->disks + i;
898 if (desc->number || desc->major || desc->minor ||
899 desc->raid_disk || (desc->state && (desc->state != 4))) {
900 printk(" D %2d: ", i);
901 print_desc(desc);
904 printk(KERN_INFO "md: THIS: ");
905 print_desc(&sb->this_disk);
909 static void print_rdev(mdk_rdev_t *rdev)
911 printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%d ",
912 bdev_partition_name(rdev->bdev),
913 (unsigned long long)rdev->size, rdev->faulty, rdev->in_sync, rdev->desc_nr);
914 if (rdev->sb_loaded) {
915 printk(KERN_INFO "md: rdev superblock:\n");
916 print_sb((mdp_super_t*)page_address(rdev->sb_page));
917 } else
918 printk(KERN_INFO "md: no rdev superblock!\n");
921 void md_print_devices(void)
923 struct list_head *tmp, *tmp2;
924 mdk_rdev_t *rdev;
925 mddev_t *mddev;
927 printk("\n");
928 printk("md: **********************************\n");
929 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
930 printk("md: **********************************\n");
931 ITERATE_MDDEV(mddev,tmp) {
932 printk("md%d: ", mdidx(mddev));
934 ITERATE_RDEV(mddev,rdev,tmp2)
935 printk("<%s>", bdev_partition_name(rdev->bdev));
937 ITERATE_RDEV(mddev,rdev,tmp2)
938 print_rdev(rdev);
940 printk("md: **********************************\n");
941 printk("\n");
945 static int write_disk_sb(mdk_rdev_t * rdev)
947 sector_t sb_offset;
948 sector_t size;
950 if (!rdev->sb_loaded) {
951 MD_BUG();
952 return 1;
954 if (rdev->faulty) {
955 MD_BUG();
956 return 1;
959 sb_offset = calc_dev_sboffset(rdev->bdev);
960 if (rdev->sb_offset != sb_offset) {
961 printk(KERN_INFO "%s's sb offset has changed from %llu to %llu, skipping\n",
962 bdev_partition_name(rdev->bdev),
963 (unsigned long long)rdev->sb_offset,
964 (unsigned long long)sb_offset);
965 goto skip;
968 * If the disk went offline meanwhile and it's just a spare, then
969 * its size has changed to zero silently, and the MD code does
970 * not yet know that it's faulty.
972 size = calc_dev_size(rdev->bdev, rdev->mddev);
973 if (size != rdev->size) {
974 printk(KERN_INFO "%s's size has changed from %llu to %llu since import, skipping\n",
975 bdev_partition_name(rdev->bdev),
976 (unsigned long long)rdev->size,
977 (unsigned long long)size);
978 goto skip;
981 printk(KERN_INFO "(write) %s's sb offset: %llu\n", bdev_partition_name(rdev->bdev), (unsigned long long)sb_offset);
983 if (!sync_page_io(rdev->bdev, sb_offset<<1, MD_SB_BYTES, rdev->sb_page, WRITE))
984 goto fail;
985 skip:
986 return 0;
987 fail:
988 printk("md: write_disk_sb failed for device %s\n", bdev_partition_name(rdev->bdev));
989 return 1;
992 static void sync_sbs(mddev_t * mddev)
994 mdk_rdev_t *rdev;
995 struct list_head *tmp;
997 ITERATE_RDEV(mddev,rdev,tmp) {
998 super_90_sync(mddev, rdev);
999 rdev->sb_loaded = 1;
1003 static void md_update_sb(mddev_t * mddev)
1005 int err, count = 100;
1006 struct list_head *tmp;
1007 mdk_rdev_t *rdev;
1009 mddev->sb_dirty = 0;
1010 repeat:
1011 mddev->utime = get_seconds();
1012 mddev->events ++;
1014 if (!mddev->events) {
1016 * oops, this 64-bit counter should never wrap.
1017 * Either we are in around ~1 trillion A.C., assuming
1018 * 1 reboot per second, or we have a bug:
1020 MD_BUG();
1021 mddev->events --;
1023 sync_sbs(mddev);
1026 * do not write anything to disk if using
1027 * nonpersistent superblocks
1029 if (!mddev->persistent)
1030 return;
1032 printk(KERN_INFO "md: updating md%d RAID superblock on device\n",
1033 mdidx(mddev));
1035 err = 0;
1036 ITERATE_RDEV(mddev,rdev,tmp) {
1037 printk(KERN_INFO "md: ");
1038 if (rdev->faulty)
1039 printk("(skipping faulty ");
1041 printk("%s ", bdev_partition_name(rdev->bdev));
1042 if (!rdev->faulty) {
1043 err += write_disk_sb(rdev);
1044 } else
1045 printk(")\n");
1046 if (!err && mddev->level == LEVEL_MULTIPATH)
1047 /* only need to write one superblock... */
1048 break;
1050 if (err) {
1051 if (--count) {
1052 printk(KERN_ERR "md: errors occurred during superblock update, repeating\n");
1053 goto repeat;
1055 printk(KERN_ERR "md: excessive errors occurred during superblock update, exiting\n");
1060 * Import a device. If 'on_disk', then sanity check the superblock
1062 * mark the device faulty if:
1064 * - the device is nonexistent (zero size)
1065 * - the device has no valid superblock
1067 * a faulty rdev _never_ has rdev->sb set.
1069 static mdk_rdev_t *md_import_device(dev_t newdev, int on_disk)
1071 int err;
1072 mdk_rdev_t *rdev;
1073 sector_t size;
1075 rdev = (mdk_rdev_t *) kmalloc(sizeof(*rdev), GFP_KERNEL);
1076 if (!rdev) {
1077 printk(KERN_ERR "md: could not alloc mem for %s!\n", partition_name(newdev));
1078 return ERR_PTR(-ENOMEM);
1080 memset(rdev, 0, sizeof(*rdev));
1082 if ((err = alloc_disk_sb(rdev)))
1083 goto abort_free;
1085 err = lock_rdev(rdev, newdev);
1086 if (err) {
1087 printk(KERN_ERR "md: could not lock %s.\n",
1088 partition_name(newdev));
1089 goto abort_free;
1091 rdev->desc_nr = -1;
1092 rdev->faulty = 0;
1093 rdev->in_sync = 0;
1094 atomic_set(&rdev->nr_pending, 0);
1096 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
1097 if (!size) {
1098 printk(KERN_WARNING
1099 "md: %s has zero or unknown size, marking faulty!\n",
1100 bdev_partition_name(rdev->bdev));
1101 err = -EINVAL;
1102 goto abort_free;
1105 if (on_disk) {
1106 err = super_90_load(rdev, NULL);
1107 if (err == -EINVAL) {
1108 printk(KERN_WARNING "md: %s has invalid sb, not importing!\n",
1109 bdev_partition_name(rdev->bdev));
1110 goto abort_free;
1112 if (err < 0) {
1113 printk(KERN_WARNING "md: could not read %s's sb, not importing!\n",
1114 bdev_partition_name(rdev->bdev));
1115 goto abort_free;
1118 INIT_LIST_HEAD(&rdev->same_set);
1120 return rdev;
1122 abort_free:
1123 if (rdev->sb_page) {
1124 if (rdev->bdev)
1125 unlock_rdev(rdev);
1126 free_disk_sb(rdev);
1128 kfree(rdev);
1129 return ERR_PTR(err);
1133 * Check a full RAID array for plausibility
1136 #define INCONSISTENT KERN_ERR \
1137 "md: fatal superblock inconsistency in %s -- removing from array\n"
1139 #define OUT_OF_DATE KERN_ERR \
1140 "md: superblock update time inconsistency -- using the most recent one\n"
1142 #define OLD_VERSION KERN_ALERT \
1143 "md: md%d: unsupported raid array version %d.%d.%d\n"
1145 #define NOT_CLEAN_IGNORE KERN_ERR \
1146 "md: md%d: raid array is not clean -- starting background reconstruction\n"
1148 #define UNKNOWN_LEVEL KERN_ERR \
1149 "md: md%d: unsupported raid level %d\n"
1151 static int analyze_sbs(mddev_t * mddev)
1153 int i;
1154 struct list_head *tmp;
1155 mdk_rdev_t *rdev, *freshest;
1157 freshest = NULL;
1158 ITERATE_RDEV(mddev,rdev,tmp)
1159 switch (super_90_load(rdev, freshest)) {
1160 case 1:
1161 freshest = rdev;
1162 break;
1163 case 0:
1164 break;
1165 default:
1166 printk(INCONSISTENT, bdev_partition_name(rdev->bdev));
1167 kick_rdev_from_array(rdev);
1171 super_90_validate(mddev, freshest);
1173 i = 0;
1174 ITERATE_RDEV(mddev,rdev,tmp) {
1175 if (rdev != freshest)
1176 if (super_90_validate(mddev, rdev)) {
1177 printk(KERN_WARNING "md: kicking non-fresh %s from array!\n",
1178 bdev_partition_name(rdev->bdev));
1179 kick_rdev_from_array(rdev);
1180 continue;
1182 if (mddev->level == LEVEL_MULTIPATH) {
1183 rdev->desc_nr = i++;
1184 rdev->raid_disk = rdev->desc_nr;
1185 rdev->in_sync = 1;
1191 * Check if we can support this RAID array
1193 if (mddev->major_version != MD_MAJOR_VERSION ||
1194 mddev->minor_version > MD_MINOR_VERSION) {
1196 printk(OLD_VERSION, mdidx(mddev), mddev->major_version,
1197 mddev->minor_version, mddev->patch_version);
1198 goto abort;
1201 if ((mddev->state != (1 << MD_SB_CLEAN)) && ((mddev->level == 1) ||
1202 (mddev->level == 4) || (mddev->level == 5)))
1203 printk(NOT_CLEAN_IGNORE, mdidx(mddev));
1205 return 0;
1206 abort:
1207 return 1;
1210 #undef INCONSISTENT
1211 #undef OUT_OF_DATE
1212 #undef OLD_VERSION
1213 #undef OLD_LEVEL
1215 static int device_size_calculation(mddev_t * mddev)
1217 int data_disks = 0;
1218 unsigned int readahead;
1219 struct list_head *tmp;
1220 mdk_rdev_t *rdev;
1223 * Do device size calculation. Bail out if too small.
1224 * (we have to do this after having validated chunk_size,
1225 * because device size has to be modulo chunk_size)
1228 ITERATE_RDEV(mddev,rdev,tmp) {
1229 if (rdev->faulty)
1230 continue;
1231 if (rdev->size) {
1232 MD_BUG();
1233 continue;
1235 rdev->size = calc_dev_size(rdev->bdev, mddev);
1236 if (rdev->size < mddev->chunk_size / 1024) {
1237 printk(KERN_WARNING
1238 "md: Dev %s smaller than chunk_size: %lluk < %dk\n",
1239 bdev_partition_name(rdev->bdev),
1240 (unsigned long long)rdev->size, mddev->chunk_size / 1024);
1241 return -EINVAL;
1245 switch (mddev->level) {
1246 case LEVEL_MULTIPATH:
1247 data_disks = 1;
1248 break;
1249 case -3:
1250 data_disks = 1;
1251 break;
1252 case -2:
1253 data_disks = 1;
1254 break;
1255 case LEVEL_LINEAR:
1256 zoned_raid_size(mddev);
1257 data_disks = 1;
1258 break;
1259 case 0:
1260 zoned_raid_size(mddev);
1261 data_disks = mddev->raid_disks;
1262 break;
1263 case 1:
1264 data_disks = 1;
1265 break;
1266 case 4:
1267 case 5:
1268 data_disks = mddev->raid_disks-1;
1269 break;
1270 default:
1271 printk(UNKNOWN_LEVEL, mdidx(mddev), mddev->level);
1272 goto abort;
1274 if (!md_size[mdidx(mddev)])
1275 md_size[mdidx(mddev)] = mddev->size * data_disks;
1277 readahead = (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
1278 if (!mddev->level || (mddev->level == 4) || (mddev->level == 5)) {
1279 readahead = (mddev->chunk_size>>PAGE_SHIFT) * 4 * data_disks;
1280 if (readahead < data_disks * (MAX_SECTORS>>(PAGE_SHIFT-9))*2)
1281 readahead = data_disks * (MAX_SECTORS>>(PAGE_SHIFT-9))*2;
1282 } else {
1283 // (no multipath branch - it uses the default setting)
1284 if (mddev->level == -3)
1285 readahead = 0;
1288 printk(KERN_INFO "md%d: max total readahead window set to %ldk\n",
1289 mdidx(mddev), readahead*(PAGE_SIZE/1024));
1291 printk(KERN_INFO
1292 "md%d: %d data-disks, max readahead per data-disk: %ldk\n",
1293 mdidx(mddev), data_disks, readahead/data_disks*(PAGE_SIZE/1024));
1294 return 0;
1295 abort:
1296 return 1;
1299 static struct gendisk *md_probe(dev_t dev, int *part, void *data)
1301 static DECLARE_MUTEX(disks_sem);
1302 int unit = MINOR(dev);
1303 mddev_t *mddev = mddev_find(unit);
1304 struct gendisk *disk;
1306 if (!mddev)
1307 return NULL;
1309 down(&disks_sem);
1310 if (disks[unit]) {
1311 up(&disks_sem);
1312 mddev_put(mddev);
1313 return NULL;
1315 disk = alloc_disk(1);
1316 if (!disk) {
1317 up(&disks_sem);
1318 mddev_put(mddev);
1319 return NULL;
1321 disk->major = MD_MAJOR;
1322 disk->first_minor = mdidx(mddev);
1323 sprintf(disk->disk_name, "md%d", mdidx(mddev));
1324 disk->fops = &md_fops;
1325 disk->private_data = mddev;
1326 disk->queue = &mddev->queue;
1327 add_disk(disk);
1328 disks[mdidx(mddev)] = disk;
1329 up(&disks_sem);
1330 return NULL;
1333 #define TOO_BIG_CHUNKSIZE KERN_ERR \
1334 "too big chunk_size: %d > %d\n"
1336 #define TOO_SMALL_CHUNKSIZE KERN_ERR \
1337 "too small chunk_size: %d < %ld\n"
1339 #define BAD_CHUNKSIZE KERN_ERR \
1340 "no chunksize specified, see 'man raidtab'\n"
1342 static int do_md_run(mddev_t * mddev)
1344 int pnum, err;
1345 int chunk_size;
1346 struct list_head *tmp;
1347 mdk_rdev_t *rdev;
1348 struct gendisk *disk;
1350 if (list_empty(&mddev->disks)) {
1351 MD_BUG();
1352 return -EINVAL;
1355 if (mddev->pers)
1356 return -EBUSY;
1359 * Resize disks to align partitions size on a given
1360 * chunk size.
1362 md_size[mdidx(mddev)] = 0;
1365 * Analyze all RAID superblock(s)
1367 if (!mddev->raid_disks && analyze_sbs(mddev)) {
1368 MD_BUG();
1369 return -EINVAL;
1372 chunk_size = mddev->chunk_size;
1373 pnum = level_to_pers(mddev->level);
1375 if ((pnum != MULTIPATH) && (pnum != RAID1)) {
1376 if (!chunk_size) {
1378 * 'default chunksize' in the old md code used to
1379 * be PAGE_SIZE, baaad.
1380 * we abort here to be on the safe side. We dont
1381 * want to continue the bad practice.
1383 printk(BAD_CHUNKSIZE);
1384 return -EINVAL;
1386 if (chunk_size > MAX_CHUNK_SIZE) {
1387 printk(TOO_BIG_CHUNKSIZE, chunk_size, MAX_CHUNK_SIZE);
1388 return -EINVAL;
1391 * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
1393 if ( (1 << ffz(~chunk_size)) != chunk_size) {
1394 MD_BUG();
1395 return -EINVAL;
1397 if (chunk_size < PAGE_SIZE) {
1398 printk(TOO_SMALL_CHUNKSIZE, chunk_size, PAGE_SIZE);
1399 return -EINVAL;
1403 if (pnum >= MAX_PERSONALITY) {
1404 MD_BUG();
1405 return -EINVAL;
1408 if (!pers[pnum])
1410 #ifdef CONFIG_KMOD
1411 char module_name[80];
1412 sprintf (module_name, "md-personality-%d", pnum);
1413 request_module (module_name);
1414 if (!pers[pnum])
1415 #endif
1417 printk(KERN_ERR "md: personality %d is not loaded!\n",
1418 pnum);
1419 return -EINVAL;
1423 if (device_size_calculation(mddev))
1424 return -EINVAL;
1427 * Drop all container device buffers, from now on
1428 * the only valid external interface is through the md
1429 * device.
1430 * Also find largest hardsector size
1432 ITERATE_RDEV(mddev,rdev,tmp) {
1433 if (rdev->faulty)
1434 continue;
1435 sync_blockdev(rdev->bdev);
1436 invalidate_bdev(rdev->bdev, 0);
1437 #if 0
1439 * Aside of obvious breakage (code below results in block size set
1440 * according to the sector size of last component instead of the
1441 * maximal sector size), we have more interesting problem here.
1442 * Namely, we actually ought to set _sector_ size for the array
1443 * and that requires per-array request queues. Disabled for now.
1445 md_blocksizes[mdidx(mddev)] = 1024;
1446 if (bdev_hardsect_size(rdev->bdev) > md_blocksizes[mdidx(mddev)])
1447 md_blocksizes[mdidx(mddev)] = bdev_hardsect_size(rdev->bdev);
1448 #endif
1451 md_probe(mdidx(mddev), NULL, NULL);
1452 disk = disks[mdidx(mddev)];
1453 if (!disk)
1454 return -ENOMEM;
1455 mddev->pers = pers[pnum];
1457 blk_queue_make_request(&mddev->queue, mddev->pers->make_request);
1458 printk("%s: setting max_sectors to %d, segment boundary to %d\n",
1459 disk->disk_name,
1460 chunk_size >> 9,
1461 (chunk_size>>1)-1);
1462 blk_queue_max_sectors(&mddev->queue, chunk_size >> 9);
1463 blk_queue_segment_boundary(&mddev->queue, (chunk_size>>1) - 1);
1464 mddev->queue.queuedata = mddev;
1466 err = mddev->pers->run(mddev);
1467 if (err) {
1468 printk(KERN_ERR "md: pers->run() failed ...\n");
1469 mddev->pers = NULL;
1470 return -EINVAL;
1473 mddev->in_sync = (mddev->state & (1<<MD_SB_CLEAN));
1474 /* if personality doesn't have "sync_request", then
1475 * a dirty array doesn't mean anything
1477 if (mddev->pers->sync_request)
1478 mddev->state &= ~(1 << MD_SB_CLEAN);
1479 md_update_sb(mddev);
1480 md_recover_arrays();
1481 set_capacity(disk, md_size[mdidx(mddev)]<<1);
1482 return (0);
1485 #undef TOO_BIG_CHUNKSIZE
1486 #undef BAD_CHUNKSIZE
1488 static int restart_array(mddev_t *mddev)
1490 struct gendisk *disk = disks[mdidx(mddev)];
1491 int err;
1494 * Complain if it has no devices
1496 err = -ENXIO;
1497 if (list_empty(&mddev->disks))
1498 goto out;
1500 if (mddev->pers) {
1501 err = -EBUSY;
1502 if (!mddev->ro)
1503 goto out;
1505 mddev->ro = 0;
1506 set_disk_ro(disk, 0);
1508 printk(KERN_INFO
1509 "md: md%d switched to read-write mode.\n", mdidx(mddev));
1511 * Kick recovery or resync if necessary
1513 md_recover_arrays();
1514 err = 0;
1515 } else {
1516 printk(KERN_ERR "md: md%d has no personality assigned.\n",
1517 mdidx(mddev));
1518 err = -EINVAL;
1521 out:
1522 return err;
1525 #define STILL_MOUNTED KERN_WARNING \
1526 "md: md%d still mounted.\n"
1527 #define STILL_IN_USE \
1528 "md: md%d still in use.\n"
1530 static int do_md_stop(mddev_t * mddev, int ro)
1532 int err = 0;
1533 struct gendisk *disk = disks[mdidx(mddev)];
1535 if (atomic_read(&mddev->active)>2) {
1536 printk(STILL_IN_USE, mdidx(mddev));
1537 err = -EBUSY;
1538 goto out;
1541 if (mddev->pers) {
1542 if (mddev->sync_thread) {
1543 if (mddev->recovery_running > 0)
1544 mddev->recovery_running = -EINTR;
1545 md_unregister_thread(mddev->sync_thread);
1546 mddev->sync_thread = NULL;
1549 invalidate_device(mk_kdev(disk->major, disk->first_minor), 1);
1551 if (ro) {
1552 err = -ENXIO;
1553 if (mddev->ro)
1554 goto out;
1555 mddev->ro = 1;
1556 } else {
1557 if (mddev->ro)
1558 set_disk_ro(disk, 0);
1559 if (mddev->pers->stop(mddev)) {
1560 err = -EBUSY;
1561 if (mddev->ro)
1562 set_disk_ro(disk, 1);
1563 goto out;
1565 mddev->pers = NULL;
1566 if (mddev->ro)
1567 mddev->ro = 0;
1569 if (mddev->raid_disks) {
1571 * mark it clean only if there was no resync
1572 * interrupted.
1574 if (mddev->in_sync) {
1575 printk(KERN_INFO "md: marking sb clean...\n");
1576 mddev->state |= 1 << MD_SB_CLEAN;
1578 md_update_sb(mddev);
1580 if (ro)
1581 set_disk_ro(disk, 1);
1584 * Free resources if final stop
1586 if (!ro) {
1587 struct gendisk *disk;
1588 printk(KERN_INFO "md: md%d stopped.\n", mdidx(mddev));
1590 export_array(mddev);
1592 md_size[mdidx(mddev)] = 0;
1593 disk = disks[mdidx(mddev)];
1594 if (disk)
1595 set_capacity(disk, 0);
1596 } else
1597 printk(KERN_INFO "md: md%d switched to read-only mode.\n", mdidx(mddev));
1598 err = 0;
1599 out:
1600 return err;
1603 static void autorun_array(mddev_t *mddev)
1605 mdk_rdev_t *rdev;
1606 struct list_head *tmp;
1607 int err;
1609 if (list_empty(&mddev->disks)) {
1610 MD_BUG();
1611 return;
1614 printk(KERN_INFO "md: running: ");
1616 ITERATE_RDEV(mddev,rdev,tmp) {
1617 printk("<%s>", bdev_partition_name(rdev->bdev));
1619 printk("\n");
1621 err = do_md_run (mddev);
1622 if (err) {
1623 printk(KERN_WARNING "md :do_md_run() returned %d\n", err);
1625 * prevent the writeback of an unrunnable array
1627 mddev->sb_dirty = 0;
1628 do_md_stop (mddev, 0);
1633 * lets try to run arrays based on all disks that have arrived
1634 * until now. (those are in pending_raid_disks)
1636 * the method: pick the first pending disk, collect all disks with
1637 * the same UUID, remove all from the pending list and put them into
1638 * the 'same_array' list. Then order this list based on superblock
1639 * update time (freshest comes first), kick out 'old' disks and
1640 * compare superblocks. If everything's fine then run it.
1642 * If "unit" is allocated, then bump its reference count
1644 static void autorun_devices(void)
1646 struct list_head candidates;
1647 struct list_head *tmp;
1648 mdk_rdev_t *rdev0, *rdev;
1649 mddev_t *mddev;
1651 printk(KERN_INFO "md: autorun ...\n");
1652 while (!list_empty(&pending_raid_disks)) {
1653 rdev0 = list_entry(pending_raid_disks.next,
1654 mdk_rdev_t, same_set);
1656 printk(KERN_INFO "md: considering %s ...\n", bdev_partition_name(rdev0->bdev));
1657 INIT_LIST_HEAD(&candidates);
1658 ITERATE_RDEV_PENDING(rdev,tmp)
1659 if (super_90_load(rdev, rdev0) >= 0) {
1660 printk(KERN_INFO "md: adding %s ...\n", bdev_partition_name(rdev->bdev));
1661 list_move(&rdev->same_set, &candidates);
1664 * now we have a set of devices, with all of them having
1665 * mostly sane superblocks. It's time to allocate the
1666 * mddev.
1669 mddev = mddev_find(rdev0->preferred_minor);
1670 if (!mddev) {
1671 printk(KERN_ERR "md: cannot allocate memory for md drive.\n");
1672 break;
1674 if (mddev_lock(mddev))
1675 printk(KERN_WARNING "md: md%d locked, cannot run\n",
1676 mdidx(mddev));
1677 else if (mddev->raid_disks || !list_empty(&mddev->disks)) {
1678 printk(KERN_WARNING "md: md%d already running, cannot run %s\n",
1679 mdidx(mddev), bdev_partition_name(rdev0->bdev));
1680 mddev_unlock(mddev);
1681 } else {
1682 printk(KERN_INFO "md: created md%d\n", mdidx(mddev));
1683 ITERATE_RDEV_GENERIC(candidates,rdev,tmp) {
1684 list_del_init(&rdev->same_set);
1685 bind_rdev_to_array(rdev, mddev);
1687 autorun_array(mddev);
1688 mddev_unlock(mddev);
1690 /* on success, candidates will be empty, on error
1691 * it wont...
1693 ITERATE_RDEV_GENERIC(candidates,rdev,tmp)
1694 export_rdev(rdev);
1695 mddev_put(mddev);
1697 printk(KERN_INFO "md: ... autorun DONE.\n");
1701 * import RAID devices based on one partition
1702 * if possible, the array gets run as well.
1705 #define BAD_VERSION KERN_ERR \
1706 "md: %s has RAID superblock version 0.%d, autodetect needs v0.90 or higher\n"
1708 #define OUT_OF_MEM KERN_ALERT \
1709 "md: out of memory.\n"
1711 #define NO_DEVICE KERN_ERR \
1712 "md: disabled device %s\n"
1714 #define AUTOADD_FAILED KERN_ERR \
1715 "md: auto-adding devices to md%d FAILED (error %d).\n"
1717 #define AUTOADD_FAILED_USED KERN_ERR \
1718 "md: cannot auto-add device %s to md%d, already used.\n"
1720 #define AUTORUN_FAILED KERN_ERR \
1721 "md: auto-running md%d FAILED (error %d).\n"
1723 #define MDDEV_BUSY KERN_ERR \
1724 "md: cannot auto-add to md%d, already running.\n"
1726 #define AUTOADDING KERN_INFO \
1727 "md: auto-adding devices to md%d, based on %s's superblock.\n"
1729 #define AUTORUNNING KERN_INFO \
1730 "md: auto-running md%d.\n"
1732 static int autostart_array(dev_t startdev)
1734 int err = -EINVAL, i;
1735 mdp_super_t *sb = NULL;
1736 mdk_rdev_t *start_rdev = NULL, *rdev;
1738 start_rdev = md_import_device(startdev, 1);
1739 if (IS_ERR(start_rdev)) {
1740 printk(KERN_WARNING "md: could not import %s!\n", partition_name(startdev));
1741 return err;
1744 /* NOTE: this can only work for 0.90.0 superblocks */
1745 sb = (mdp_super_t*)page_address(start_rdev->sb_page);
1746 if (sb->major_version != 0 ||
1747 sb->minor_version != 90 ) {
1748 printk(KERN_WARNING "md: can only autostart 0.90.0 arrays\n");
1749 export_rdev(start_rdev);
1750 return err;
1753 if (start_rdev->faulty) {
1754 printk(KERN_WARNING "md: can not autostart based on faulty %s!\n",
1755 bdev_partition_name(start_rdev->bdev));
1756 export_rdev(start_rdev);
1757 return err;
1759 list_add(&start_rdev->same_set, &pending_raid_disks);
1761 for (i = 0; i < MD_SB_DISKS; i++) {
1762 mdp_disk_t *desc;
1763 dev_t dev;
1765 desc = sb->disks + i;
1766 dev = MKDEV(desc->major, desc->minor);
1768 if (!dev)
1769 continue;
1770 if (dev == startdev)
1771 continue;
1772 rdev = md_import_device(dev, 1);
1773 if (IS_ERR(rdev)) {
1774 printk(KERN_WARNING "md: could not import %s, trying to run array nevertheless.\n",
1775 partition_name(dev));
1776 continue;
1778 list_add(&rdev->same_set, &pending_raid_disks);
1782 * possibly return codes
1784 autorun_devices();
1785 return 0;
1789 #undef BAD_VERSION
1790 #undef OUT_OF_MEM
1791 #undef NO_DEVICE
1792 #undef AUTOADD_FAILED_USED
1793 #undef AUTOADD_FAILED
1794 #undef AUTORUN_FAILED
1795 #undef AUTOADDING
1796 #undef AUTORUNNING
1799 static int get_version(void * arg)
1801 mdu_version_t ver;
1803 ver.major = MD_MAJOR_VERSION;
1804 ver.minor = MD_MINOR_VERSION;
1805 ver.patchlevel = MD_PATCHLEVEL_VERSION;
1807 if (copy_to_user(arg, &ver, sizeof(ver)))
1808 return -EFAULT;
1810 return 0;
1813 static int get_array_info(mddev_t * mddev, void * arg)
1815 mdu_array_info_t info;
1816 int nr,working,active,failed,spare;
1817 mdk_rdev_t *rdev;
1818 struct list_head *tmp;
1820 nr=working=active=failed=spare=0;
1821 ITERATE_RDEV(mddev,rdev,tmp) {
1822 nr++;
1823 if (rdev->faulty)
1824 failed++;
1825 else {
1826 working++;
1827 if (rdev->in_sync)
1828 active++;
1829 else
1830 spare++;
1834 info.major_version = mddev->major_version;
1835 info.major_version = mddev->major_version;
1836 info.minor_version = mddev->minor_version;
1837 info.patch_version = mddev->patch_version;
1838 info.ctime = mddev->ctime;
1839 info.level = mddev->level;
1840 info.size = mddev->size;
1841 info.nr_disks = nr;
1842 info.raid_disks = mddev->raid_disks;
1843 info.md_minor = mddev->__minor;
1844 info.not_persistent= !mddev->persistent;
1846 info.utime = mddev->utime;
1847 info.state = mddev->state;
1848 info.active_disks = active;
1849 info.working_disks = working;
1850 info.failed_disks = failed;
1851 info.spare_disks = spare;
1853 info.layout = mddev->layout;
1854 info.chunk_size = mddev->chunk_size;
1856 if (copy_to_user(arg, &info, sizeof(info)))
1857 return -EFAULT;
1859 return 0;
1861 #undef SET_FROM_SB
1864 static int get_disk_info(mddev_t * mddev, void * arg)
1866 mdu_disk_info_t info;
1867 unsigned int nr;
1868 mdk_rdev_t *rdev;
1870 if (copy_from_user(&info, arg, sizeof(info)))
1871 return -EFAULT;
1873 nr = info.number;
1875 rdev = find_rdev_nr(mddev, nr);
1876 if (rdev) {
1877 info.major = MAJOR(rdev->bdev->bd_dev);
1878 info.minor = MINOR(rdev->bdev->bd_dev);
1879 info.raid_disk = rdev->raid_disk;
1880 info.state = 0;
1881 if (rdev->faulty)
1882 info.state |= (1<<MD_DISK_FAULTY);
1883 else if (rdev->in_sync) {
1884 info.state |= (1<<MD_DISK_ACTIVE);
1885 info.state |= (1<<MD_DISK_SYNC);
1887 } else {
1888 info.major = info.minor = 0;
1889 info.raid_disk = -1;
1890 info.state = (1<<MD_DISK_REMOVED);
1893 if (copy_to_user(arg, &info, sizeof(info)))
1894 return -EFAULT;
1896 return 0;
1899 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
1901 sector_t size;
1902 mdk_rdev_t *rdev;
1903 dev_t dev;
1904 dev = MKDEV(info->major,info->minor);
1905 if (!mddev->raid_disks) {
1906 /* expecting a device which has a superblock */
1907 rdev = md_import_device(dev, 1);
1908 if (IS_ERR(rdev)) {
1909 printk(KERN_WARNING "md: md_import_device returned %ld\n", PTR_ERR(rdev));
1910 return PTR_ERR(rdev);
1912 if (!list_empty(&mddev->disks)) {
1913 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
1914 mdk_rdev_t, same_set);
1915 int err = super_90_load(rdev, NULL);
1916 if (err < 0) {
1917 printk(KERN_WARNING "md: %s has different UUID to %s\n",
1918 bdev_partition_name(rdev->bdev), bdev_partition_name(rdev0->bdev));
1919 export_rdev(rdev);
1920 return -EINVAL;
1923 bind_rdev_to_array(rdev, mddev);
1924 return 0;
1927 if (!(info->state & (1<<MD_DISK_FAULTY))) {
1928 rdev = md_import_device (dev, 0);
1929 if (IS_ERR(rdev)) {
1930 printk(KERN_WARNING "md: error, md_import_device() returned %ld\n", PTR_ERR(rdev));
1931 return PTR_ERR(rdev);
1933 rdev->desc_nr = info->number;
1934 if (info->raid_disk < mddev->raid_disks)
1935 rdev->raid_disk = info->raid_disk;
1936 else
1937 rdev->raid_disk = -1;
1939 rdev->faulty = 0;
1940 if (rdev->raid_disk < mddev->raid_disks)
1941 rdev->in_sync = (info->state & (1<<MD_DISK_SYNC));
1942 else
1943 rdev->in_sync = 0;
1945 bind_rdev_to_array(rdev, mddev);
1947 if (!mddev->persistent)
1948 printk(KERN_INFO "md: nonpersistent superblock ...\n");
1950 size = calc_dev_size(rdev->bdev, mddev);
1951 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
1953 if (!mddev->size || (mddev->size > size))
1954 mddev->size = size;
1957 return 0;
1960 static int hot_generate_error(mddev_t * mddev, dev_t dev)
1962 struct request_queue *q;
1963 mdk_rdev_t *rdev;
1965 if (!mddev->pers)
1966 return -ENODEV;
1968 printk(KERN_INFO "md: trying to generate %s error in md%d ... \n",
1969 partition_name(dev), mdidx(mddev));
1971 rdev = find_rdev(mddev, dev);
1972 if (!rdev) {
1973 MD_BUG();
1974 return -ENXIO;
1977 if (rdev->desc_nr == -1) {
1978 MD_BUG();
1979 return -EINVAL;
1981 if (!rdev->in_sync)
1982 return -ENODEV;
1984 q = bdev_get_queue(rdev->bdev);
1985 if (!q) {
1986 MD_BUG();
1987 return -ENODEV;
1989 printk(KERN_INFO "md: okay, generating error!\n");
1990 // q->oneshot_error = 1; // disabled for now
1992 return 0;
1995 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
1997 mdk_rdev_t *rdev;
1999 if (!mddev->pers)
2000 return -ENODEV;
2002 printk(KERN_INFO "md: trying to remove %s from md%d ... \n",
2003 partition_name(dev), mdidx(mddev));
2005 rdev = find_rdev(mddev, dev);
2006 if (!rdev)
2007 return -ENXIO;
2009 if (rdev->raid_disk >= 0)
2010 goto busy;
2012 kick_rdev_from_array(rdev);
2013 md_update_sb(mddev);
2015 return 0;
2016 busy:
2017 printk(KERN_WARNING "md: cannot remove active disk %s from md%d ... \n",
2018 bdev_partition_name(rdev->bdev), mdidx(mddev));
2019 return -EBUSY;
2022 static int hot_add_disk(mddev_t * mddev, dev_t dev)
2024 int i, err;
2025 unsigned int size;
2026 mdk_rdev_t *rdev;
2028 if (!mddev->pers)
2029 return -ENODEV;
2031 printk(KERN_INFO "md: trying to hot-add %s to md%d ... \n",
2032 partition_name(dev), mdidx(mddev));
2034 if (!mddev->pers->hot_add_disk) {
2035 printk(KERN_WARNING "md%d: personality does not support diskops!\n",
2036 mdidx(mddev));
2037 return -EINVAL;
2040 rdev = md_import_device (dev, 0);
2041 if (IS_ERR(rdev)) {
2042 printk(KERN_WARNING "md: error, md_import_device() returned %ld\n", PTR_ERR(rdev));
2043 return -EINVAL;
2046 size = calc_dev_size(rdev->bdev, mddev);
2048 if (size < mddev->size) {
2049 printk(KERN_WARNING "md%d: disk size %llu blocks < array size %llu\n",
2050 mdidx(mddev), (unsigned long long)size,
2051 (unsigned long long)mddev->size);
2052 err = -ENOSPC;
2053 goto abort_export;
2056 if (rdev->faulty) {
2057 printk(KERN_WARNING "md: can not hot-add faulty %s disk to md%d!\n",
2058 bdev_partition_name(rdev->bdev), mdidx(mddev));
2059 err = -EINVAL;
2060 goto abort_export;
2062 rdev->in_sync = 0;
2063 bind_rdev_to_array(rdev, mddev);
2066 * The rest should better be atomic, we can have disk failures
2067 * noticed in interrupt contexts ...
2069 rdev->size = size;
2070 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
2072 for (i = mddev->raid_disks; i < mddev->max_disks; i++)
2073 if (find_rdev_nr(mddev,i)==NULL)
2074 break;
2076 if (i == mddev->max_disks) {
2077 printk(KERN_WARNING "md%d: can not hot-add to full array!\n",
2078 mdidx(mddev));
2079 err = -EBUSY;
2080 goto abort_unbind_export;
2083 rdev->desc_nr = i;
2084 rdev->raid_disk = -1;
2086 md_update_sb(mddev);
2089 * Kick recovery, maybe this spare has to be added to the
2090 * array immediately.
2092 md_recover_arrays();
2094 return 0;
2096 abort_unbind_export:
2097 unbind_rdev_from_array(rdev);
2099 abort_export:
2100 export_rdev(rdev);
2101 return err;
2104 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
2107 mddev->major_version = MD_MAJOR_VERSION;
2108 mddev->minor_version = MD_MINOR_VERSION;
2109 mddev->patch_version = MD_PATCHLEVEL_VERSION;
2110 mddev->ctime = get_seconds();
2112 mddev->level = info->level;
2113 mddev->size = info->size;
2114 mddev->raid_disks = info->raid_disks;
2115 /* don't set __minor, it is determined by which /dev/md* was
2116 * openned
2118 mddev->state = info->state;
2119 mddev->persistent = ! info->not_persistent;
2121 mddev->layout = info->layout;
2122 mddev->chunk_size = info->chunk_size;
2127 * Generate a 128 bit UUID
2129 get_random_bytes(mddev->uuid, 16);
2131 return 0;
2134 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
2136 mdk_rdev_t *rdev;
2138 rdev = find_rdev(mddev, dev);
2139 if (!rdev)
2140 return 0;
2142 md_error(mddev, rdev);
2143 return 1;
2146 static int md_ioctl(struct inode *inode, struct file *file,
2147 unsigned int cmd, unsigned long arg)
2149 unsigned int minor;
2150 int err = 0;
2151 struct hd_geometry *loc = (struct hd_geometry *) arg;
2152 mddev_t *mddev = NULL;
2153 kdev_t dev;
2155 if (!capable(CAP_SYS_ADMIN))
2156 return -EACCES;
2158 dev = inode->i_rdev;
2159 minor = minor(dev);
2160 if (minor >= MAX_MD_DEVS) {
2161 MD_BUG();
2162 return -EINVAL;
2166 * Commands dealing with the RAID driver but not any
2167 * particular array:
2169 switch (cmd)
2171 case RAID_VERSION:
2172 err = get_version((void *)arg);
2173 goto done;
2175 case PRINT_RAID_DEBUG:
2176 err = 0;
2177 md_print_devices();
2178 goto done;
2180 #ifndef MODULE
2181 case RAID_AUTORUN:
2182 err = 0;
2183 autostart_arrays();
2184 goto done;
2185 #endif
2186 default:;
2190 * Commands creating/starting a new array:
2193 mddev = inode->i_bdev->bd_inode->u.generic_ip;
2195 if (!mddev) {
2196 BUG();
2197 goto abort;
2201 if (cmd == START_ARRAY) {
2202 /* START_ARRAY doesn't need to lock the array as autostart_array
2203 * does the locking, and it could even be a different array
2205 err = autostart_array(arg);
2206 if (err) {
2207 printk(KERN_WARNING "md: autostart %s failed!\n",
2208 partition_name(arg));
2209 goto abort;
2211 goto done;
2214 err = mddev_lock(mddev);
2215 if (err) {
2216 printk(KERN_INFO "md: ioctl lock interrupted, reason %d, cmd %d\n",
2217 err, cmd);
2218 goto abort;
2221 switch (cmd)
2223 case SET_ARRAY_INFO:
2225 if (!list_empty(&mddev->disks)) {
2226 printk(KERN_WARNING "md: array md%d already has disks!\n",
2227 mdidx(mddev));
2228 err = -EBUSY;
2229 goto abort_unlock;
2231 if (mddev->raid_disks) {
2232 printk(KERN_WARNING "md: array md%d already initialised!\n",
2233 mdidx(mddev));
2234 err = -EBUSY;
2235 goto abort_unlock;
2237 if (arg) {
2238 mdu_array_info_t info;
2239 if (copy_from_user(&info, (void*)arg, sizeof(info))) {
2240 err = -EFAULT;
2241 goto abort_unlock;
2243 err = set_array_info(mddev, &info);
2244 if (err) {
2245 printk(KERN_WARNING "md: couldnt set array info. %d\n", err);
2246 goto abort_unlock;
2249 goto done_unlock;
2251 default:;
2255 * Commands querying/configuring an existing array:
2257 /* if we are initialised yet, only ADD_NEW_DISK or STOP_ARRAY is allowed */
2258 if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY && cmd != RUN_ARRAY) {
2259 err = -ENODEV;
2260 goto abort_unlock;
2264 * Commands even a read-only array can execute:
2266 switch (cmd)
2268 case GET_ARRAY_INFO:
2269 err = get_array_info(mddev, (void *)arg);
2270 goto done_unlock;
2272 case GET_DISK_INFO:
2273 err = get_disk_info(mddev, (void *)arg);
2274 goto done_unlock;
2276 case RESTART_ARRAY_RW:
2277 err = restart_array(mddev);
2278 goto done_unlock;
2280 case STOP_ARRAY:
2281 err = do_md_stop (mddev, 0);
2282 goto done_unlock;
2284 case STOP_ARRAY_RO:
2285 err = do_md_stop (mddev, 1);
2286 goto done_unlock;
2289 * We have a problem here : there is no easy way to give a CHS
2290 * virtual geometry. We currently pretend that we have a 2 heads
2291 * 4 sectors (with a BIG number of cylinders...). This drives
2292 * dosfs just mad... ;-)
2294 case HDIO_GETGEO:
2295 if (!loc) {
2296 err = -EINVAL;
2297 goto abort_unlock;
2299 err = put_user (2, (char *) &loc->heads);
2300 if (err)
2301 goto abort_unlock;
2302 err = put_user (4, (char *) &loc->sectors);
2303 if (err)
2304 goto abort_unlock;
2305 err = put_user(get_capacity(disks[mdidx(mddev)])/8,
2306 (short *) &loc->cylinders);
2307 if (err)
2308 goto abort_unlock;
2309 err = put_user (get_start_sect(inode->i_bdev),
2310 (long *) &loc->start);
2311 goto done_unlock;
2315 * The remaining ioctls are changing the state of the
2316 * superblock, so we do not allow read-only arrays
2317 * here:
2319 if (mddev->ro) {
2320 err = -EROFS;
2321 goto abort_unlock;
2324 switch (cmd)
2326 case ADD_NEW_DISK:
2328 mdu_disk_info_t info;
2329 if (copy_from_user(&info, (void*)arg, sizeof(info)))
2330 err = -EFAULT;
2331 else
2332 err = add_new_disk(mddev, &info);
2333 goto done_unlock;
2335 case HOT_GENERATE_ERROR:
2336 err = hot_generate_error(mddev, arg);
2337 goto done_unlock;
2338 case HOT_REMOVE_DISK:
2339 err = hot_remove_disk(mddev, arg);
2340 goto done_unlock;
2342 case HOT_ADD_DISK:
2343 err = hot_add_disk(mddev, arg);
2344 goto done_unlock;
2346 case SET_DISK_FAULTY:
2347 err = set_disk_faulty(mddev, arg);
2348 goto done_unlock;
2350 case RUN_ARRAY:
2352 err = do_md_run (mddev);
2354 * we have to clean up the mess if
2355 * the array cannot be run for some
2356 * reason ...
2358 if (err) {
2359 mddev->sb_dirty = 0;
2360 do_md_stop (mddev, 0);
2362 goto done_unlock;
2365 default:
2366 if (_IOC_TYPE(cmd) == MD_MAJOR)
2367 printk(KERN_WARNING "md: %s(pid %d) used obsolete MD ioctl, "
2368 "upgrade your software to use new ictls.\n",
2369 current->comm, current->pid);
2370 err = -EINVAL;
2371 goto abort_unlock;
2374 done_unlock:
2375 abort_unlock:
2376 mddev_unlock(mddev);
2378 return err;
2379 done:
2380 if (err)
2381 MD_BUG();
2382 abort:
2383 return err;
2386 static int md_open(struct inode *inode, struct file *file)
2389 * Succeed if we can find or allocate a mddev structure.
2391 mddev_t *mddev = mddev_find(minor(inode->i_rdev));
2392 int err = -ENOMEM;
2394 if (!mddev)
2395 goto out;
2397 if ((err = mddev_lock(mddev)))
2398 goto put;
2400 err = 0;
2401 mddev_unlock(mddev);
2402 inode->i_bdev->bd_inode->u.generic_ip = mddev_get(mddev);
2403 put:
2404 mddev_put(mddev);
2405 out:
2406 return err;
2409 static int md_release(struct inode *inode, struct file * file)
2411 mddev_t *mddev = inode->i_bdev->bd_inode->u.generic_ip;
2413 if (!mddev)
2414 BUG();
2415 mddev_put(mddev);
2417 return 0;
2420 static struct block_device_operations md_fops =
2422 .owner = THIS_MODULE,
2423 .open = md_open,
2424 .release = md_release,
2425 .ioctl = md_ioctl,
2429 static inline void flush_curr_signals(void)
2431 spin_lock(&current->sig->siglock);
2432 flush_signals(current);
2433 spin_unlock(&current->sig->siglock);
2436 int md_thread(void * arg)
2438 mdk_thread_t *thread = arg;
2440 lock_kernel();
2443 * Detach thread
2446 daemonize();
2448 sprintf(current->comm, thread->name);
2449 current->exit_signal = SIGCHLD;
2450 siginitsetinv(&current->blocked, sigmask(SIGKILL));
2451 flush_curr_signals();
2452 thread->tsk = current;
2455 * md_thread is a 'system-thread', it's priority should be very
2456 * high. We avoid resource deadlocks individually in each
2457 * raid personality. (RAID5 does preallocation) We also use RR and
2458 * the very same RT priority as kswapd, thus we will never get
2459 * into a priority inversion deadlock.
2461 * we definitely have to have equal or higher priority than
2462 * bdflush, otherwise bdflush will deadlock if there are too
2463 * many dirty RAID5 blocks.
2465 unlock_kernel();
2467 complete(thread->event);
2468 while (thread->run) {
2469 void (*run)(void *data);
2471 wait_event_interruptible(thread->wqueue,
2472 test_bit(THREAD_WAKEUP, &thread->flags));
2474 clear_bit(THREAD_WAKEUP, &thread->flags);
2476 run = thread->run;
2477 if (run) {
2478 run(thread->data);
2479 blk_run_queues();
2481 if (signal_pending(current))
2482 flush_curr_signals();
2484 complete(thread->event);
2485 return 0;
2488 void md_wakeup_thread(mdk_thread_t *thread)
2490 dprintk("md: waking up MD thread %p.\n", thread);
2491 set_bit(THREAD_WAKEUP, &thread->flags);
2492 wake_up(&thread->wqueue);
2495 mdk_thread_t *md_register_thread(void (*run) (void *),
2496 void *data, const char *name)
2498 mdk_thread_t *thread;
2499 int ret;
2500 struct completion event;
2502 thread = (mdk_thread_t *) kmalloc
2503 (sizeof(mdk_thread_t), GFP_KERNEL);
2504 if (!thread)
2505 return NULL;
2507 memset(thread, 0, sizeof(mdk_thread_t));
2508 init_waitqueue_head(&thread->wqueue);
2510 init_completion(&event);
2511 thread->event = &event;
2512 thread->run = run;
2513 thread->data = data;
2514 thread->name = name;
2515 ret = kernel_thread(md_thread, thread, 0);
2516 if (ret < 0) {
2517 kfree(thread);
2518 return NULL;
2520 wait_for_completion(&event);
2521 return thread;
2524 void md_interrupt_thread(mdk_thread_t *thread)
2526 if (!thread->tsk) {
2527 MD_BUG();
2528 return;
2530 dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid);
2531 send_sig(SIGKILL, thread->tsk, 1);
2534 void md_unregister_thread(mdk_thread_t *thread)
2536 struct completion event;
2538 init_completion(&event);
2540 thread->event = &event;
2541 thread->run = NULL;
2542 thread->name = NULL;
2543 md_interrupt_thread(thread);
2544 wait_for_completion(&event);
2545 kfree(thread);
2548 static void md_recover_arrays(void)
2550 if (!md_recovery_thread) {
2551 MD_BUG();
2552 return;
2554 md_wakeup_thread(md_recovery_thread);
2558 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
2560 dprintk("md_error dev:(%d:%d), rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
2561 MD_MAJOR,mdidx(mddev),MAJOR(rdev->bdev->bd_dev),MINOR(rdev->bdev->bd_dev),
2562 __builtin_return_address(0),__builtin_return_address(1),
2563 __builtin_return_address(2),__builtin_return_address(3));
2565 if (!mddev) {
2566 MD_BUG();
2567 return;
2570 if (!rdev || rdev->faulty)
2571 return;
2572 if (!mddev->pers->error_handler)
2573 return;
2574 mddev->pers->error_handler(mddev,rdev);
2575 md_recover_arrays();
2578 static int status_unused(char * page)
2580 int sz = 0, i = 0;
2581 mdk_rdev_t *rdev;
2582 struct list_head *tmp;
2584 sz += sprintf(page + sz, "unused devices: ");
2586 ITERATE_RDEV_PENDING(rdev,tmp) {
2587 i++;
2588 sz += sprintf(page + sz, "%s ",
2589 bdev_partition_name(rdev->bdev));
2591 if (!i)
2592 sz += sprintf(page + sz, "<none>");
2594 sz += sprintf(page + sz, "\n");
2595 return sz;
2599 static int status_resync(char * page, mddev_t * mddev)
2601 int sz = 0;
2602 unsigned long max_blocks, resync, res, dt, db, rt;
2604 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
2605 max_blocks = mddev->size;
2608 * Should not happen.
2610 if (!max_blocks) {
2611 MD_BUG();
2612 return 0;
2614 res = (resync/1024)*1000/(max_blocks/1024 + 1);
2616 int i, x = res/50, y = 20-x;
2617 sz += sprintf(page + sz, "[");
2618 for (i = 0; i < x; i++)
2619 sz += sprintf(page + sz, "=");
2620 sz += sprintf(page + sz, ">");
2621 for (i = 0; i < y; i++)
2622 sz += sprintf(page + sz, ".");
2623 sz += sprintf(page + sz, "] ");
2625 sz += sprintf(page + sz, " %s =%3lu.%lu%% (%lu/%lu)",
2626 (mddev->spares ? "recovery" : "resync"),
2627 res/10, res % 10, resync, max_blocks);
2630 * We do not want to overflow, so the order of operands and
2631 * the * 100 / 100 trick are important. We do a +1 to be
2632 * safe against division by zero. We only estimate anyway.
2634 * dt: time from mark until now
2635 * db: blocks written from mark until now
2636 * rt: remaining time
2638 dt = ((jiffies - mddev->resync_mark) / HZ);
2639 if (!dt) dt++;
2640 db = resync - (mddev->resync_mark_cnt/2);
2641 rt = (dt * ((max_blocks-resync) / (db/100+1)))/100;
2643 sz += sprintf(page + sz, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
2645 sz += sprintf(page + sz, " speed=%ldK/sec", db/dt);
2647 return sz;
2650 static int md_status_read_proc(char *page, char **start, off_t off,
2651 int count, int *eof, void *data)
2653 int sz = 0, j;
2654 sector_t size;
2655 struct list_head *tmp, *tmp2;
2656 mdk_rdev_t *rdev;
2657 mddev_t *mddev;
2659 sz += sprintf(page + sz, "Personalities : ");
2660 for (j = 0; j < MAX_PERSONALITY; j++)
2661 if (pers[j])
2662 sz += sprintf(page+sz, "[%s] ", pers[j]->name);
2664 sz += sprintf(page+sz, "\n");
2666 ITERATE_MDDEV(mddev,tmp) if (mddev_lock(mddev)==0) {
2667 sz += sprintf(page + sz, "md%d : %sactive", mdidx(mddev),
2668 mddev->pers ? "" : "in");
2669 if (mddev->pers) {
2670 if (mddev->ro)
2671 sz += sprintf(page + sz, " (read-only)");
2672 sz += sprintf(page + sz, " %s", mddev->pers->name);
2675 size = 0;
2676 ITERATE_RDEV(mddev,rdev,tmp2) {
2677 sz += sprintf(page + sz, " %s[%d]",
2678 bdev_partition_name(rdev->bdev), rdev->desc_nr);
2679 if (rdev->faulty) {
2680 sz += sprintf(page + sz, "(F)");
2681 continue;
2683 size += rdev->size;
2686 if (!list_empty(&mddev->disks)) {
2687 if (mddev->pers)
2688 sz += sprintf(page + sz, "\n %llu blocks",
2689 (unsigned long long)md_size[mdidx(mddev)]);
2690 else
2691 sz += sprintf(page + sz, "\n %llu blocks", (unsigned long long)size);
2694 if (!mddev->pers) {
2695 sz += sprintf(page+sz, "\n");
2696 mddev_unlock(mddev);
2697 continue;
2700 sz += mddev->pers->status (page+sz, mddev);
2702 sz += sprintf(page+sz, "\n ");
2703 if (mddev->curr_resync > 2)
2704 sz += status_resync (page+sz, mddev);
2705 else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
2706 sz += sprintf(page + sz, " resync=DELAYED");
2708 sz += sprintf(page + sz, "\n");
2709 mddev_unlock(mddev);
2711 sz += status_unused(page + sz);
2713 return sz;
2716 int register_md_personality(int pnum, mdk_personality_t *p)
2718 if (pnum >= MAX_PERSONALITY) {
2719 MD_BUG();
2720 return -EINVAL;
2723 if (pers[pnum]) {
2724 MD_BUG();
2725 return -EBUSY;
2728 pers[pnum] = p;
2729 printk(KERN_INFO "md: %s personality registered as nr %d\n", p->name, pnum);
2730 return 0;
2733 int unregister_md_personality(int pnum)
2735 if (pnum >= MAX_PERSONALITY) {
2736 MD_BUG();
2737 return -EINVAL;
2740 printk(KERN_INFO "md: %s personality unregistered\n", pers[pnum]->name);
2741 pers[pnum] = NULL;
2742 return 0;
2745 void md_sync_acct(mdk_rdev_t *rdev, unsigned long nr_sectors)
2747 rdev->bdev->bd_disk->sync_io += nr_sectors;
2750 static int is_mddev_idle(mddev_t *mddev)
2752 mdk_rdev_t * rdev;
2753 struct list_head *tmp;
2754 int idle;
2755 unsigned long curr_events;
2757 idle = 1;
2758 ITERATE_RDEV(mddev,rdev,tmp) {
2759 struct gendisk *disk = rdev->bdev->bd_disk;
2760 curr_events = disk->read_sectors + disk->write_sectors - disk->sync_io;
2761 if ((curr_events - rdev->last_events) > 32) {
2762 rdev->last_events = curr_events;
2763 idle = 0;
2766 return idle;
2769 void md_done_sync(mddev_t *mddev, int blocks, int ok)
2771 /* another "blocks" (512byte) blocks have been synced */
2772 atomic_sub(blocks, &mddev->recovery_active);
2773 wake_up(&mddev->recovery_wait);
2774 if (!ok) {
2775 mddev->recovery_running = -EIO;
2776 md_recover_arrays();
2777 // stop recovery, signal do_sync ....
2782 DECLARE_WAIT_QUEUE_HEAD(resync_wait);
2784 #define SYNC_MARKS 10
2785 #define SYNC_MARK_STEP (3*HZ)
2786 static void md_do_sync(void *data)
2788 mddev_t *mddev = data;
2789 mddev_t *mddev2;
2790 unsigned int max_sectors, currspeed = 0,
2791 j, window, err;
2792 unsigned long mark[SYNC_MARKS];
2793 unsigned long mark_cnt[SYNC_MARKS];
2794 int last_mark,m;
2795 struct list_head *tmp;
2796 unsigned long last_check;
2798 /* just incase thread restarts... */
2799 if (mddev->recovery_running <= 0)
2800 return;
2802 /* we overload curr_resync somewhat here.
2803 * 0 == not engaged in resync at all
2804 * 2 == checking that there is no conflict with another sync
2805 * 1 == like 2, but have yielded to allow conflicting resync to
2806 * commense
2807 * other == active in resync - this many blocks
2809 do {
2810 mddev->curr_resync = 2;
2812 ITERATE_MDDEV(mddev2,tmp) {
2813 if (mddev2 == mddev)
2814 continue;
2815 if (mddev2->curr_resync &&
2816 match_mddev_units(mddev,mddev2)) {
2817 printk(KERN_INFO "md: delaying resync of md%d until md%d "
2818 "has finished resync (they share one or more physical units)\n",
2819 mdidx(mddev), mdidx(mddev2));
2820 if (mddev < mddev2) /* arbitrarily yield */
2821 mddev->curr_resync = 1;
2822 if (wait_event_interruptible(resync_wait,
2823 mddev2->curr_resync < 2)) {
2824 flush_curr_signals();
2825 err = -EINTR;
2826 mddev_put(mddev2);
2827 goto skip;
2831 } while (mddev->curr_resync < 2);
2833 max_sectors = mddev->size << 1;
2835 printk(KERN_INFO "md: syncing RAID array md%d\n", mdidx(mddev));
2836 printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed: %d KB/sec/disc.\n", sysctl_speed_limit_min);
2837 printk(KERN_INFO "md: using maximum available idle IO bandwith "
2838 "(but not more than %d KB/sec) for reconstruction.\n",
2839 sysctl_speed_limit_max);
2841 is_mddev_idle(mddev); /* this also initializes IO event counters */
2842 for (m = 0; m < SYNC_MARKS; m++) {
2843 mark[m] = jiffies;
2844 mark_cnt[m] = 0;
2846 last_mark = 0;
2847 mddev->resync_mark = mark[last_mark];
2848 mddev->resync_mark_cnt = mark_cnt[last_mark];
2851 * Tune reconstruction:
2853 window = 32*(PAGE_SIZE/512);
2854 printk(KERN_INFO "md: using %dk window, over a total of %d blocks.\n",
2855 window/2,max_sectors/2);
2857 atomic_set(&mddev->recovery_active, 0);
2858 init_waitqueue_head(&mddev->recovery_wait);
2859 last_check = 0;
2860 for (j = 0; j < max_sectors;) {
2861 int sectors;
2863 sectors = mddev->pers->sync_request(mddev, j, currspeed < sysctl_speed_limit_min);
2864 if (sectors < 0) {
2865 err = sectors;
2866 goto out;
2868 atomic_add(sectors, &mddev->recovery_active);
2869 j += sectors;
2870 if (j>1) mddev->curr_resync = j;
2872 if (last_check + window > j)
2873 continue;
2875 last_check = j;
2877 blk_run_queues();
2879 repeat:
2880 if (jiffies >= mark[last_mark] + SYNC_MARK_STEP ) {
2881 /* step marks */
2882 int next = (last_mark+1) % SYNC_MARKS;
2884 mddev->resync_mark = mark[next];
2885 mddev->resync_mark_cnt = mark_cnt[next];
2886 mark[next] = jiffies;
2887 mark_cnt[next] = j - atomic_read(&mddev->recovery_active);
2888 last_mark = next;
2892 if (signal_pending(current)) {
2894 * got a signal, exit.
2896 printk(KERN_INFO "md: md_do_sync() got signal ... exiting\n");
2897 flush_curr_signals();
2898 err = -EINTR;
2899 goto out;
2903 * this loop exits only if either when we are slower than
2904 * the 'hard' speed limit, or the system was IO-idle for
2905 * a jiffy.
2906 * the system might be non-idle CPU-wise, but we only care
2907 * about not overloading the IO subsystem. (things like an
2908 * e2fsck being done on the RAID array should execute fast)
2910 cond_resched();
2912 currspeed = (j-mddev->resync_mark_cnt)/2/((jiffies-mddev->resync_mark)/HZ +1) +1;
2914 if (currspeed > sysctl_speed_limit_min) {
2915 if ((currspeed > sysctl_speed_limit_max) ||
2916 !is_mddev_idle(mddev)) {
2917 current->state = TASK_INTERRUPTIBLE;
2918 schedule_timeout(HZ/4);
2919 goto repeat;
2923 printk(KERN_INFO "md: md%d: sync done.\n",mdidx(mddev));
2924 err = 0;
2926 * this also signals 'finished resyncing' to md_stop
2928 out:
2929 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
2930 /* tell personality that we are finished */
2931 mddev->pers->sync_request(mddev, max_sectors, 1);
2932 skip:
2933 mddev->curr_resync = 0;
2934 if (err)
2935 mddev->recovery_running = err;
2936 if (mddev->recovery_running > 0)
2937 mddev->recovery_running = 0;
2938 if (mddev->recovery_running == 0)
2939 mddev->in_sync = 1;
2940 md_recover_arrays();
2945 * This is the kernel thread that watches all md arrays for re-sync and other
2946 * action that might be needed.
2947 * It does not do any resync itself, but rather "forks" off other threads
2948 * to do that as needed.
2949 * When it is determined that resync is needed, we set "->recovery_running" and
2950 * create a thread at ->sync_thread.
2951 * When the thread finishes it clears recovery_running (or sets an error)
2952 * and wakeup up this thread which will reap the thread and finish up.
2953 * This thread also removes any faulty devices (with nr_pending == 0).
2955 * The overall approach is:
2956 * 1/ if the superblock needs updating, update it.
2957 * 2/ If a recovery thread is running, don't do anything else.
2958 * 3/ If recovery has finished, clean up, possibly marking spares active.
2959 * 4/ If there are any faulty devices, remove them.
2960 * 5/ If array is degraded, try to add spares devices
2961 * 6/ If array has spares or is not in-sync, start a resync thread.
2963 void md_do_recovery(void *data)
2965 mddev_t *mddev;
2966 mdk_rdev_t *rdev;
2967 struct list_head *tmp, *rtmp;
2970 dprintk(KERN_INFO "md: recovery thread got woken up ...\n");
2972 ITERATE_MDDEV(mddev,tmp) if (mddev_lock(mddev)==0) {
2973 if (!mddev->raid_disks || !mddev->pers || mddev->ro)
2974 goto unlock;
2975 if (mddev->sb_dirty)
2976 md_update_sb(mddev);
2977 if (mddev->recovery_running > 0)
2978 /* resync/recovery still happening */
2979 goto unlock;
2980 if (mddev->sync_thread) {
2981 /* resync has finished, collect result */
2982 md_unregister_thread(mddev->sync_thread);
2983 mddev->sync_thread = NULL;
2984 if (mddev->recovery_running == 0) {
2985 /* success...*/
2986 /* activate any spares */
2987 mddev->pers->spare_active(mddev);
2988 mddev->spares = 0;
2990 md_update_sb(mddev);
2991 mddev->recovery_running = 0;
2992 wake_up(&resync_wait);
2993 goto unlock;
2995 if (mddev->recovery_running) {
2996 /* that's odd.. */
2997 mddev->recovery_running = 0;
2998 wake_up(&resync_wait);
3001 /* no recovery is running.
3002 * remove any failed drives, then
3003 * add spares if possible
3005 mddev->spares = 0;
3006 ITERATE_RDEV(mddev,rdev,rtmp) {
3007 if (rdev->raid_disk >= 0 &&
3008 rdev->faulty &&
3009 atomic_read(&rdev->nr_pending)==0) {
3010 mddev->pers->hot_remove_disk(mddev, rdev->raid_disk);
3011 rdev->raid_disk = -1;
3013 if (!rdev->faulty && rdev->raid_disk >= 0 && !rdev->in_sync)
3014 mddev->spares++;
3016 if (mddev->degraded) {
3017 ITERATE_RDEV(mddev,rdev,rtmp)
3018 if (rdev->raid_disk < 0
3019 && !rdev->faulty) {
3020 if (mddev->pers->hot_add_disk(mddev,rdev))
3021 mddev->spares++;
3022 else
3023 break;
3027 if (!mddev->spares && mddev->in_sync) {
3028 /* nothing we can do ... */
3029 goto unlock;
3031 if (mddev->pers->sync_request) {
3032 mddev->sync_thread = md_register_thread(md_do_sync,
3033 mddev,
3034 "md_resync");
3035 if (!mddev->sync_thread) {
3036 printk(KERN_ERR "md%d: could not start resync thread...\n", mdidx(mddev));
3037 /* leave the spares where they are, it shouldn't hurt */
3038 mddev->recovery_running = 0;
3039 } else {
3040 mddev->recovery_running = 1;
3041 md_wakeup_thread(mddev->sync_thread);
3044 unlock:
3045 mddev_unlock(mddev);
3047 dprintk(KERN_INFO "md: recovery thread finished ...\n");
3051 int md_notify_reboot(struct notifier_block *this,
3052 unsigned long code, void *x)
3054 struct list_head *tmp;
3055 mddev_t *mddev;
3057 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
3059 printk(KERN_INFO "md: stopping all md devices.\n");
3061 ITERATE_MDDEV(mddev,tmp)
3062 if (mddev_trylock(mddev)==0)
3063 do_md_stop (mddev, 1);
3065 * certain more exotic SCSI devices are known to be
3066 * volatile wrt too early system reboots. While the
3067 * right place to handle this issue is the given
3068 * driver, we do want to have a safe RAID driver ...
3070 mdelay(1000*1);
3072 return NOTIFY_DONE;
3075 struct notifier_block md_notifier = {
3076 .notifier_call = md_notify_reboot,
3077 .next = NULL,
3078 .priority = INT_MAX, /* before any real devices */
3081 static void md_geninit(void)
3083 int i;
3085 for(i = 0; i < MAX_MD_DEVS; i++) {
3086 md_size[i] = 0;
3089 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
3091 #ifdef CONFIG_PROC_FS
3092 create_proc_read_entry("mdstat", 0, NULL, md_status_read_proc, NULL);
3093 #endif
3096 int __init md_init(void)
3098 static char * name = "mdrecoveryd";
3099 int minor;
3101 printk(KERN_INFO "md: md driver %d.%d.%d MAX_MD_DEVS=%d, MD_SB_DISKS=%d\n",
3102 MD_MAJOR_VERSION, MD_MINOR_VERSION,
3103 MD_PATCHLEVEL_VERSION, MAX_MD_DEVS, MD_SB_DISKS);
3105 if (register_blkdev (MAJOR_NR, "md", &md_fops)) {
3106 printk(KERN_ALERT "md: Unable to get major %d for md\n", MAJOR_NR);
3107 return (-1);
3109 devfs_handle = devfs_mk_dir (NULL, "md", NULL);
3110 blk_register_region(MKDEV(MAJOR_NR, 0), MAX_MD_DEVS, THIS_MODULE,
3111 md_probe, NULL, NULL);
3112 for (minor=0; minor < MAX_MD_DEVS; ++minor) {
3113 char devname[128];
3114 sprintf (devname, "%u", minor);
3115 devfs_register (devfs_handle,
3116 devname, DEVFS_FL_DEFAULT, MAJOR_NR, minor,
3117 S_IFBLK | S_IRUSR | S_IWUSR, &md_fops, NULL);
3120 md_recovery_thread = md_register_thread(md_do_recovery, NULL, name);
3121 if (!md_recovery_thread)
3122 printk(KERN_ALERT
3123 "md: bug: couldn't allocate md_recovery_thread\n");
3125 register_reboot_notifier(&md_notifier);
3126 raid_table_header = register_sysctl_table(raid_root_table, 1);
3128 md_geninit();
3129 return (0);
3133 #ifndef MODULE
3136 * Searches all registered partitions for autorun RAID arrays
3137 * at boot time.
3139 static dev_t detected_devices[128];
3140 static int dev_cnt;
3142 void md_autodetect_dev(dev_t dev)
3144 if (dev_cnt >= 0 && dev_cnt < 127)
3145 detected_devices[dev_cnt++] = dev;
3149 static void autostart_arrays(void)
3151 mdk_rdev_t *rdev;
3152 int i;
3154 printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
3156 for (i = 0; i < dev_cnt; i++) {
3157 dev_t dev = detected_devices[i];
3159 rdev = md_import_device(dev,1);
3160 if (IS_ERR(rdev)) {
3161 printk(KERN_ALERT "md: could not import %s!\n",
3162 partition_name(dev));
3163 continue;
3165 if (rdev->faulty) {
3166 MD_BUG();
3167 continue;
3169 list_add(&rdev->same_set, &pending_raid_disks);
3171 dev_cnt = 0;
3173 autorun_devices();
3176 #endif
3178 static __exit void md_exit(void)
3180 int i;
3181 blk_unregister_region(MKDEV(MAJOR_NR,0), MAX_MD_DEVS);
3182 md_unregister_thread(md_recovery_thread);
3183 devfs_unregister(devfs_handle);
3185 unregister_blkdev(MAJOR_NR,"md");
3186 unregister_reboot_notifier(&md_notifier);
3187 unregister_sysctl_table(raid_table_header);
3188 #ifdef CONFIG_PROC_FS
3189 remove_proc_entry("mdstat", NULL);
3190 #endif
3191 for (i = 0; i < MAX_MD_DEVS; i++) {
3192 struct gendisk *disk = disks[i];
3193 mddev_t *mddev;
3194 if (!disks[i])
3195 continue;
3196 mddev = disk->private_data;
3197 del_gendisk(disk);
3198 put_disk(disk);
3199 mddev_put(mddev);
3203 module_init(md_init)
3204 module_exit(md_exit)
3206 EXPORT_SYMBOL(md_size);
3207 EXPORT_SYMBOL(register_md_personality);
3208 EXPORT_SYMBOL(unregister_md_personality);
3209 EXPORT_SYMBOL(md_error);
3210 EXPORT_SYMBOL(md_sync_acct);
3211 EXPORT_SYMBOL(md_done_sync);
3212 EXPORT_SYMBOL(md_register_thread);
3213 EXPORT_SYMBOL(md_unregister_thread);
3214 EXPORT_SYMBOL(md_wakeup_thread);
3215 EXPORT_SYMBOL(md_print_devices);
3216 EXPORT_SYMBOL(md_interrupt_thread);
3217 MODULE_LICENSE("GPL");