2 * multipath.c : Multiple Devices driver for Linux
4 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
6 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
8 * MULTIPATH management functions.
10 * derived from raid1.c.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * You should have received a copy of the GNU General Public License
18 * (for example /usr/src/linux/COPYING); if not, write to the Free
19 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #include <linux/blkdev.h>
23 #include <linux/raid/md_u.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
27 #include "multipath.h"
29 #define MAX_WORK_PER_DISK 128
31 #define NR_RESERVED_BUFS 32
34 static int multipath_map (multipath_conf_t
*conf
)
36 int i
, disks
= conf
->raid_disks
;
39 * Later we do read balancing on the read side
40 * now we use the first available disk.
44 for (i
= 0; i
< disks
; i
++) {
45 mdk_rdev_t
*rdev
= rcu_dereference(conf
->multipaths
[i
].rdev
);
46 if (rdev
&& test_bit(In_sync
, &rdev
->flags
)) {
47 atomic_inc(&rdev
->nr_pending
);
54 printk(KERN_ERR
"multipath_map(): no more operational IO paths?\n");
58 static void multipath_reschedule_retry (struct multipath_bh
*mp_bh
)
61 mddev_t
*mddev
= mp_bh
->mddev
;
62 multipath_conf_t
*conf
= mddev
->private;
64 spin_lock_irqsave(&conf
->device_lock
, flags
);
65 list_add(&mp_bh
->retry_list
, &conf
->retry_list
);
66 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
67 md_wakeup_thread(mddev
->thread
);
72 * multipath_end_bh_io() is called when we have finished servicing a multipathed
73 * operation and are ready to return a success/failure code to the buffer
76 static void multipath_end_bh_io (struct multipath_bh
*mp_bh
, int err
)
78 struct bio
*bio
= mp_bh
->master_bio
;
79 multipath_conf_t
*conf
= mp_bh
->mddev
->private;
82 mempool_free(mp_bh
, conf
->pool
);
85 static void multipath_end_request(struct bio
*bio
, int error
)
87 int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
88 struct multipath_bh
*mp_bh
= bio
->bi_private
;
89 multipath_conf_t
*conf
= mp_bh
->mddev
->private;
90 mdk_rdev_t
*rdev
= conf
->multipaths
[mp_bh
->path
].rdev
;
93 multipath_end_bh_io(mp_bh
, 0);
94 else if (!(bio
->bi_rw
& REQ_RAHEAD
)) {
98 char b
[BDEVNAME_SIZE
];
99 md_error (mp_bh
->mddev
, rdev
);
100 printk(KERN_ERR
"multipath: %s: rescheduling sector %llu\n",
101 bdevname(rdev
->bdev
,b
),
102 (unsigned long long)bio
->bi_sector
);
103 multipath_reschedule_retry(mp_bh
);
105 multipath_end_bh_io(mp_bh
, error
);
106 rdev_dec_pending(rdev
, conf
->mddev
);
109 static void unplug_slaves(mddev_t
*mddev
)
111 multipath_conf_t
*conf
= mddev
->private;
115 for (i
=0; i
<mddev
->raid_disks
; i
++) {
116 mdk_rdev_t
*rdev
= rcu_dereference(conf
->multipaths
[i
].rdev
);
117 if (rdev
&& !test_bit(Faulty
, &rdev
->flags
)
118 && atomic_read(&rdev
->nr_pending
)) {
119 struct request_queue
*r_queue
= bdev_get_queue(rdev
->bdev
);
121 atomic_inc(&rdev
->nr_pending
);
126 rdev_dec_pending(rdev
, mddev
);
133 static void multipath_unplug(struct request_queue
*q
)
135 unplug_slaves(q
->queuedata
);
139 static int multipath_make_request(mddev_t
*mddev
, struct bio
* bio
)
141 multipath_conf_t
*conf
= mddev
->private;
142 struct multipath_bh
* mp_bh
;
143 struct multipath_info
*multipath
;
145 if (unlikely(bio
->bi_rw
& REQ_FLUSH
)) {
146 md_flush_request(mddev
, bio
);
150 mp_bh
= mempool_alloc(conf
->pool
, GFP_NOIO
);
152 mp_bh
->master_bio
= bio
;
153 mp_bh
->mddev
= mddev
;
155 mp_bh
->path
= multipath_map(conf
);
156 if (mp_bh
->path
< 0) {
157 bio_endio(bio
, -EIO
);
158 mempool_free(mp_bh
, conf
->pool
);
161 multipath
= conf
->multipaths
+ mp_bh
->path
;
164 mp_bh
->bio
.bi_sector
+= multipath
->rdev
->data_offset
;
165 mp_bh
->bio
.bi_bdev
= multipath
->rdev
->bdev
;
166 mp_bh
->bio
.bi_rw
|= REQ_FAILFAST_TRANSPORT
;
167 mp_bh
->bio
.bi_end_io
= multipath_end_request
;
168 mp_bh
->bio
.bi_private
= mp_bh
;
169 generic_make_request(&mp_bh
->bio
);
173 static void multipath_status (struct seq_file
*seq
, mddev_t
*mddev
)
175 multipath_conf_t
*conf
= mddev
->private;
178 seq_printf (seq
, " [%d/%d] [", conf
->raid_disks
,
179 conf
->working_disks
);
180 for (i
= 0; i
< conf
->raid_disks
; i
++)
181 seq_printf (seq
, "%s",
182 conf
->multipaths
[i
].rdev
&&
183 test_bit(In_sync
, &conf
->multipaths
[i
].rdev
->flags
) ? "U" : "_");
184 seq_printf (seq
, "]");
187 static int multipath_congested(void *data
, int bits
)
189 mddev_t
*mddev
= data
;
190 multipath_conf_t
*conf
= mddev
->private;
193 if (mddev_congested(mddev
, bits
))
197 for (i
= 0; i
< mddev
->raid_disks
; i
++) {
198 mdk_rdev_t
*rdev
= rcu_dereference(conf
->multipaths
[i
].rdev
);
199 if (rdev
&& !test_bit(Faulty
, &rdev
->flags
)) {
200 struct request_queue
*q
= bdev_get_queue(rdev
->bdev
);
202 ret
|= bdi_congested(&q
->backing_dev_info
, bits
);
203 /* Just like multipath_map, we just check the
204 * first available device
214 * Careful, this can execute in IRQ contexts as well!
216 static void multipath_error (mddev_t
*mddev
, mdk_rdev_t
*rdev
)
218 multipath_conf_t
*conf
= mddev
->private;
220 if (conf
->working_disks
<= 1) {
222 * Uh oh, we can do nothing if this is our last path, but
223 * first check if this is a queued request for a device
224 * which has just failed.
227 "multipath: only one IO path left and IO error.\n");
228 /* leave it active... it's all we have */
231 * Mark disk as unusable
233 if (!test_bit(Faulty
, &rdev
->flags
)) {
234 char b
[BDEVNAME_SIZE
];
235 clear_bit(In_sync
, &rdev
->flags
);
236 set_bit(Faulty
, &rdev
->flags
);
237 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
238 conf
->working_disks
--;
240 printk(KERN_ALERT
"multipath: IO failure on %s,"
241 " disabling IO path.\n"
242 "multipath: Operation continuing"
243 " on %d IO paths.\n",
244 bdevname (rdev
->bdev
,b
),
245 conf
->working_disks
);
250 static void print_multipath_conf (multipath_conf_t
*conf
)
253 struct multipath_info
*tmp
;
255 printk("MULTIPATH conf printout:\n");
257 printk("(conf==NULL)\n");
260 printk(" --- wd:%d rd:%d\n", conf
->working_disks
,
263 for (i
= 0; i
< conf
->raid_disks
; i
++) {
264 char b
[BDEVNAME_SIZE
];
265 tmp
= conf
->multipaths
+ i
;
267 printk(" disk%d, o:%d, dev:%s\n",
268 i
,!test_bit(Faulty
, &tmp
->rdev
->flags
),
269 bdevname(tmp
->rdev
->bdev
,b
));
274 static int multipath_add_disk(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
276 multipath_conf_t
*conf
= mddev
->private;
277 struct request_queue
*q
;
280 struct multipath_info
*p
;
282 int last
= mddev
->raid_disks
- 1;
284 if (rdev
->raid_disk
>= 0)
285 first
= last
= rdev
->raid_disk
;
287 print_multipath_conf(conf
);
289 for (path
= first
; path
<= last
; path
++)
290 if ((p
=conf
->multipaths
+path
)->rdev
== NULL
) {
291 q
= rdev
->bdev
->bd_disk
->queue
;
292 disk_stack_limits(mddev
->gendisk
, rdev
->bdev
,
293 rdev
->data_offset
<< 9);
295 /* as we don't honour merge_bvec_fn, we must never risk
296 * violating it, so limit ->max_segments to one, lying
297 * within a single page.
298 * (Note: it is very unlikely that a device with
299 * merge_bvec_fn will be involved in multipath.)
301 if (q
->merge_bvec_fn
) {
302 blk_queue_max_segments(mddev
->queue
, 1);
303 blk_queue_segment_boundary(mddev
->queue
,
304 PAGE_CACHE_SIZE
- 1);
307 conf
->working_disks
++;
309 rdev
->raid_disk
= path
;
310 set_bit(In_sync
, &rdev
->flags
);
311 rcu_assign_pointer(p
->rdev
, rdev
);
313 md_integrity_add_rdev(rdev
, mddev
);
317 print_multipath_conf(conf
);
322 static int multipath_remove_disk(mddev_t
*mddev
, int number
)
324 multipath_conf_t
*conf
= mddev
->private;
327 struct multipath_info
*p
= conf
->multipaths
+ number
;
329 print_multipath_conf(conf
);
333 if (test_bit(In_sync
, &rdev
->flags
) ||
334 atomic_read(&rdev
->nr_pending
)) {
335 printk(KERN_ERR
"hot-remove-disk, slot %d is identified"
336 " but is still operational!\n", number
);
342 if (atomic_read(&rdev
->nr_pending
)) {
343 /* lost the race, try later */
348 md_integrity_register(mddev
);
352 print_multipath_conf(conf
);
359 * This is a kernel thread which:
361 * 1. Retries failed read operations on working multipaths.
362 * 2. Updates the raid superblock when problems encounter.
363 * 3. Performs writes following reads for array syncronising.
366 static void multipathd (mddev_t
*mddev
)
368 struct multipath_bh
*mp_bh
;
371 multipath_conf_t
*conf
= mddev
->private;
372 struct list_head
*head
= &conf
->retry_list
;
374 md_check_recovery(mddev
);
376 char b
[BDEVNAME_SIZE
];
377 spin_lock_irqsave(&conf
->device_lock
, flags
);
378 if (list_empty(head
))
380 mp_bh
= list_entry(head
->prev
, struct multipath_bh
, retry_list
);
381 list_del(head
->prev
);
382 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
385 bio
->bi_sector
= mp_bh
->master_bio
->bi_sector
;
387 if ((mp_bh
->path
= multipath_map (conf
))<0) {
388 printk(KERN_ALERT
"multipath: %s: unrecoverable IO read"
389 " error for block %llu\n",
390 bdevname(bio
->bi_bdev
,b
),
391 (unsigned long long)bio
->bi_sector
);
392 multipath_end_bh_io(mp_bh
, -EIO
);
394 printk(KERN_ERR
"multipath: %s: redirecting sector %llu"
395 " to another IO path\n",
396 bdevname(bio
->bi_bdev
,b
),
397 (unsigned long long)bio
->bi_sector
);
398 *bio
= *(mp_bh
->master_bio
);
399 bio
->bi_sector
+= conf
->multipaths
[mp_bh
->path
].rdev
->data_offset
;
400 bio
->bi_bdev
= conf
->multipaths
[mp_bh
->path
].rdev
->bdev
;
401 bio
->bi_rw
|= REQ_FAILFAST_TRANSPORT
;
402 bio
->bi_end_io
= multipath_end_request
;
403 bio
->bi_private
= mp_bh
;
404 generic_make_request(bio
);
407 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
410 static sector_t
multipath_size(mddev_t
*mddev
, sector_t sectors
, int raid_disks
)
412 WARN_ONCE(sectors
|| raid_disks
,
413 "%s does not support generic reshape\n", __func__
);
415 return mddev
->dev_sectors
;
418 static int multipath_run (mddev_t
*mddev
)
420 multipath_conf_t
*conf
;
422 struct multipath_info
*disk
;
425 if (md_check_no_bitmap(mddev
))
428 if (mddev
->level
!= LEVEL_MULTIPATH
) {
429 printk("multipath: %s: raid level not set to multipath IO (%d)\n",
430 mdname(mddev
), mddev
->level
);
434 * copy the already verified devices into our private MULTIPATH
435 * bookkeeping area. [whatever we allocate in multipath_run(),
436 * should be freed in multipath_stop()]
438 mddev
->queue
->queue_lock
= &mddev
->queue
->__queue_lock
;
440 conf
= kzalloc(sizeof(multipath_conf_t
), GFP_KERNEL
);
441 mddev
->private = conf
;
444 "multipath: couldn't allocate memory for %s\n",
449 conf
->multipaths
= kzalloc(sizeof(struct multipath_info
)*mddev
->raid_disks
,
451 if (!conf
->multipaths
) {
453 "multipath: couldn't allocate memory for %s\n",
458 conf
->working_disks
= 0;
459 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
460 disk_idx
= rdev
->raid_disk
;
462 disk_idx
>= mddev
->raid_disks
)
465 disk
= conf
->multipaths
+ disk_idx
;
467 disk_stack_limits(mddev
->gendisk
, rdev
->bdev
,
468 rdev
->data_offset
<< 9);
470 /* as we don't honour merge_bvec_fn, we must never risk
471 * violating it, not that we ever expect a device with
472 * a merge_bvec_fn to be involved in multipath */
473 if (rdev
->bdev
->bd_disk
->queue
->merge_bvec_fn
) {
474 blk_queue_max_segments(mddev
->queue
, 1);
475 blk_queue_segment_boundary(mddev
->queue
,
476 PAGE_CACHE_SIZE
- 1);
479 if (!test_bit(Faulty
, &rdev
->flags
))
480 conf
->working_disks
++;
483 conf
->raid_disks
= mddev
->raid_disks
;
485 spin_lock_init(&conf
->device_lock
);
486 INIT_LIST_HEAD(&conf
->retry_list
);
488 if (!conf
->working_disks
) {
489 printk(KERN_ERR
"multipath: no operational IO paths for %s\n",
493 mddev
->degraded
= conf
->raid_disks
- conf
->working_disks
;
495 conf
->pool
= mempool_create_kmalloc_pool(NR_RESERVED_BUFS
,
496 sizeof(struct multipath_bh
));
497 if (conf
->pool
== NULL
) {
499 "multipath: couldn't allocate memory for %s\n",
505 mddev
->thread
= md_register_thread(multipathd
, mddev
, NULL
);
506 if (!mddev
->thread
) {
507 printk(KERN_ERR
"multipath: couldn't allocate thread"
508 " for %s\n", mdname(mddev
));
514 "multipath: array %s active with %d out of %d IO paths\n",
515 mdname(mddev
), conf
->working_disks
, mddev
->raid_disks
);
517 * Ok, everything is just fine now
519 md_set_array_sectors(mddev
, multipath_size(mddev
, 0, 0));
521 mddev
->queue
->unplug_fn
= multipath_unplug
;
522 mddev
->queue
->backing_dev_info
.congested_fn
= multipath_congested
;
523 mddev
->queue
->backing_dev_info
.congested_data
= mddev
;
524 md_integrity_register(mddev
);
529 mempool_destroy(conf
->pool
);
530 kfree(conf
->multipaths
);
532 mddev
->private = NULL
;
538 static int multipath_stop (mddev_t
*mddev
)
540 multipath_conf_t
*conf
= mddev
->private;
542 md_unregister_thread(mddev
->thread
);
543 mddev
->thread
= NULL
;
544 blk_sync_queue(mddev
->queue
); /* the unplug fn references 'conf'*/
545 mempool_destroy(conf
->pool
);
546 kfree(conf
->multipaths
);
548 mddev
->private = NULL
;
552 static struct mdk_personality multipath_personality
=
555 .level
= LEVEL_MULTIPATH
,
556 .owner
= THIS_MODULE
,
557 .make_request
= multipath_make_request
,
558 .run
= multipath_run
,
559 .stop
= multipath_stop
,
560 .status
= multipath_status
,
561 .error_handler
= multipath_error
,
562 .hot_add_disk
= multipath_add_disk
,
563 .hot_remove_disk
= multipath_remove_disk
,
564 .size
= multipath_size
,
567 static int __init
multipath_init (void)
569 return register_md_personality (&multipath_personality
);
572 static void __exit
multipath_exit (void)
574 unregister_md_personality (&multipath_personality
);
577 module_init(multipath_init
);
578 module_exit(multipath_exit
);
579 MODULE_LICENSE("GPL");
580 MODULE_DESCRIPTION("simple multi-path personality for MD");
581 MODULE_ALIAS("md-personality-7"); /* MULTIPATH */
582 MODULE_ALIAS("md-multipath");
583 MODULE_ALIAS("md-level--4");