[S390] sclp: sysfs interface for SCLP cpi
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / md / dm-raid1.c
blob31123d4a6b9cc39dbeec455d50b540b97ac99c06
1 /*
2 * Copyright (C) 2003 Sistina Software Limited.
4 * This file is released under the GPL.
5 */
7 #include "dm.h"
8 #include "dm-bio-list.h"
9 #include "dm-io.h"
10 #include "dm-log.h"
11 #include "kcopyd.h"
13 #include <linux/ctype.h>
14 #include <linux/init.h>
15 #include <linux/mempool.h>
16 #include <linux/module.h>
17 #include <linux/pagemap.h>
18 #include <linux/slab.h>
19 #include <linux/time.h>
20 #include <linux/vmalloc.h>
21 #include <linux/workqueue.h>
22 #include <linux/log2.h>
24 #define DM_MSG_PREFIX "raid1"
25 #define DM_IO_PAGES 64
27 #define DM_RAID1_HANDLE_ERRORS 0x01
28 #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
30 static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
32 /*-----------------------------------------------------------------
33 * Region hash
35 * The mirror splits itself up into discrete regions. Each
36 * region can be in one of three states: clean, dirty,
37 * nosync. There is no need to put clean regions in the hash.
39 * In addition to being present in the hash table a region _may_
40 * be present on one of three lists.
42 * clean_regions: Regions on this list have no io pending to
43 * them, they are in sync, we are no longer interested in them,
44 * they are dull. rh_update_states() will remove them from the
45 * hash table.
47 * quiesced_regions: These regions have been spun down, ready
48 * for recovery. rh_recovery_start() will remove regions from
49 * this list and hand them to kmirrord, which will schedule the
50 * recovery io with kcopyd.
52 * recovered_regions: Regions that kcopyd has successfully
53 * recovered. rh_update_states() will now schedule any delayed
54 * io, up the recovery_count, and remove the region from the
55 * hash.
57 * There are 2 locks:
58 * A rw spin lock 'hash_lock' protects just the hash table,
59 * this is never held in write mode from interrupt context,
60 * which I believe means that we only have to disable irqs when
61 * doing a write lock.
63 * An ordinary spin lock 'region_lock' that protects the three
64 * lists in the region_hash, with the 'state', 'list' and
65 * 'bhs_delayed' fields of the regions. This is used from irq
66 * context, so all other uses will have to suspend local irqs.
67 *---------------------------------------------------------------*/
68 struct mirror_set;
69 struct region_hash {
70 struct mirror_set *ms;
71 uint32_t region_size;
72 unsigned region_shift;
74 /* holds persistent region state */
75 struct dirty_log *log;
77 /* hash table */
78 rwlock_t hash_lock;
79 mempool_t *region_pool;
80 unsigned int mask;
81 unsigned int nr_buckets;
82 struct list_head *buckets;
84 spinlock_t region_lock;
85 atomic_t recovery_in_flight;
86 struct semaphore recovery_count;
87 struct list_head clean_regions;
88 struct list_head quiesced_regions;
89 struct list_head recovered_regions;
90 struct list_head failed_recovered_regions;
93 enum {
94 RH_CLEAN,
95 RH_DIRTY,
96 RH_NOSYNC,
97 RH_RECOVERING
100 struct region {
101 struct region_hash *rh; /* FIXME: can we get rid of this ? */
102 region_t key;
103 int state;
105 struct list_head hash_list;
106 struct list_head list;
108 atomic_t pending;
109 struct bio_list delayed_bios;
113 /*-----------------------------------------------------------------
114 * Mirror set structures.
115 *---------------------------------------------------------------*/
116 struct mirror {
117 struct mirror_set *ms;
118 atomic_t error_count;
119 struct dm_dev *dev;
120 sector_t offset;
123 struct mirror_set {
124 struct dm_target *ti;
125 struct list_head list;
126 struct region_hash rh;
127 struct kcopyd_client *kcopyd_client;
128 uint64_t features;
130 spinlock_t lock; /* protects the next two lists */
131 struct bio_list reads;
132 struct bio_list writes;
134 struct dm_io_client *io_client;
136 /* recovery */
137 region_t nr_regions;
138 int in_sync;
139 int log_failure;
141 struct mirror *default_mirror; /* Default mirror */
143 struct workqueue_struct *kmirrord_wq;
144 struct work_struct kmirrord_work;
146 unsigned int nr_mirrors;
147 struct mirror mirror[0];
151 * Conversion fns
153 static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio)
155 return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift;
158 static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
160 return region << rh->region_shift;
163 static void wake(struct mirror_set *ms)
165 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
168 /* FIXME move this */
169 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
171 #define MIN_REGIONS 64
172 #define MAX_RECOVERY 1
173 static int rh_init(struct region_hash *rh, struct mirror_set *ms,
174 struct dirty_log *log, uint32_t region_size,
175 region_t nr_regions)
177 unsigned int nr_buckets, max_buckets;
178 size_t i;
181 * Calculate a suitable number of buckets for our hash
182 * table.
184 max_buckets = nr_regions >> 6;
185 for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
187 nr_buckets >>= 1;
189 rh->ms = ms;
190 rh->log = log;
191 rh->region_size = region_size;
192 rh->region_shift = ffs(region_size) - 1;
193 rwlock_init(&rh->hash_lock);
194 rh->mask = nr_buckets - 1;
195 rh->nr_buckets = nr_buckets;
197 rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
198 if (!rh->buckets) {
199 DMERR("unable to allocate region hash memory");
200 return -ENOMEM;
203 for (i = 0; i < nr_buckets; i++)
204 INIT_LIST_HEAD(rh->buckets + i);
206 spin_lock_init(&rh->region_lock);
207 sema_init(&rh->recovery_count, 0);
208 atomic_set(&rh->recovery_in_flight, 0);
209 INIT_LIST_HEAD(&rh->clean_regions);
210 INIT_LIST_HEAD(&rh->quiesced_regions);
211 INIT_LIST_HEAD(&rh->recovered_regions);
212 INIT_LIST_HEAD(&rh->failed_recovered_regions);
214 rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
215 sizeof(struct region));
216 if (!rh->region_pool) {
217 vfree(rh->buckets);
218 rh->buckets = NULL;
219 return -ENOMEM;
222 return 0;
225 static void rh_exit(struct region_hash *rh)
227 unsigned int h;
228 struct region *reg, *nreg;
230 BUG_ON(!list_empty(&rh->quiesced_regions));
231 for (h = 0; h < rh->nr_buckets; h++) {
232 list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) {
233 BUG_ON(atomic_read(&reg->pending));
234 mempool_free(reg, rh->region_pool);
238 if (rh->log)
239 dm_destroy_dirty_log(rh->log);
240 if (rh->region_pool)
241 mempool_destroy(rh->region_pool);
242 vfree(rh->buckets);
245 #define RH_HASH_MULT 2654435387U
247 static inline unsigned int rh_hash(struct region_hash *rh, region_t region)
249 return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask;
252 static struct region *__rh_lookup(struct region_hash *rh, region_t region)
254 struct region *reg;
256 list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list)
257 if (reg->key == region)
258 return reg;
260 return NULL;
263 static void __rh_insert(struct region_hash *rh, struct region *reg)
265 unsigned int h = rh_hash(rh, reg->key);
266 list_add(&reg->hash_list, rh->buckets + h);
269 static struct region *__rh_alloc(struct region_hash *rh, region_t region)
271 struct region *reg, *nreg;
273 read_unlock(&rh->hash_lock);
274 nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
275 if (unlikely(!nreg))
276 nreg = kmalloc(sizeof(struct region), GFP_NOIO);
277 nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
278 RH_CLEAN : RH_NOSYNC;
279 nreg->rh = rh;
280 nreg->key = region;
282 INIT_LIST_HEAD(&nreg->list);
284 atomic_set(&nreg->pending, 0);
285 bio_list_init(&nreg->delayed_bios);
286 write_lock_irq(&rh->hash_lock);
288 reg = __rh_lookup(rh, region);
289 if (reg)
290 /* we lost the race */
291 mempool_free(nreg, rh->region_pool);
293 else {
294 __rh_insert(rh, nreg);
295 if (nreg->state == RH_CLEAN) {
296 spin_lock(&rh->region_lock);
297 list_add(&nreg->list, &rh->clean_regions);
298 spin_unlock(&rh->region_lock);
300 reg = nreg;
302 write_unlock_irq(&rh->hash_lock);
303 read_lock(&rh->hash_lock);
305 return reg;
308 static inline struct region *__rh_find(struct region_hash *rh, region_t region)
310 struct region *reg;
312 reg = __rh_lookup(rh, region);
313 if (!reg)
314 reg = __rh_alloc(rh, region);
316 return reg;
319 static int rh_state(struct region_hash *rh, region_t region, int may_block)
321 int r;
322 struct region *reg;
324 read_lock(&rh->hash_lock);
325 reg = __rh_lookup(rh, region);
326 read_unlock(&rh->hash_lock);
328 if (reg)
329 return reg->state;
332 * The region wasn't in the hash, so we fall back to the
333 * dirty log.
335 r = rh->log->type->in_sync(rh->log, region, may_block);
338 * Any error from the dirty log (eg. -EWOULDBLOCK) gets
339 * taken as a RH_NOSYNC
341 return r == 1 ? RH_CLEAN : RH_NOSYNC;
344 static inline int rh_in_sync(struct region_hash *rh,
345 region_t region, int may_block)
347 int state = rh_state(rh, region, may_block);
348 return state == RH_CLEAN || state == RH_DIRTY;
351 static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list)
353 struct bio *bio;
355 while ((bio = bio_list_pop(bio_list))) {
356 queue_bio(ms, bio, WRITE);
360 static void complete_resync_work(struct region *reg, int success)
362 struct region_hash *rh = reg->rh;
364 rh->log->type->set_region_sync(rh->log, reg->key, success);
365 dispatch_bios(rh->ms, &reg->delayed_bios);
366 if (atomic_dec_and_test(&rh->recovery_in_flight))
367 wake_up_all(&_kmirrord_recovery_stopped);
368 up(&rh->recovery_count);
371 static void rh_update_states(struct region_hash *rh)
373 struct region *reg, *next;
375 LIST_HEAD(clean);
376 LIST_HEAD(recovered);
377 LIST_HEAD(failed_recovered);
380 * Quickly grab the lists.
382 write_lock_irq(&rh->hash_lock);
383 spin_lock(&rh->region_lock);
384 if (!list_empty(&rh->clean_regions)) {
385 list_splice(&rh->clean_regions, &clean);
386 INIT_LIST_HEAD(&rh->clean_regions);
388 list_for_each_entry(reg, &clean, list)
389 list_del(&reg->hash_list);
392 if (!list_empty(&rh->recovered_regions)) {
393 list_splice(&rh->recovered_regions, &recovered);
394 INIT_LIST_HEAD(&rh->recovered_regions);
396 list_for_each_entry (reg, &recovered, list)
397 list_del(&reg->hash_list);
400 if (!list_empty(&rh->failed_recovered_regions)) {
401 list_splice(&rh->failed_recovered_regions, &failed_recovered);
402 INIT_LIST_HEAD(&rh->failed_recovered_regions);
404 list_for_each_entry(reg, &failed_recovered, list)
405 list_del(&reg->hash_list);
408 spin_unlock(&rh->region_lock);
409 write_unlock_irq(&rh->hash_lock);
412 * All the regions on the recovered and clean lists have
413 * now been pulled out of the system, so no need to do
414 * any more locking.
416 list_for_each_entry_safe (reg, next, &recovered, list) {
417 rh->log->type->clear_region(rh->log, reg->key);
418 complete_resync_work(reg, 1);
419 mempool_free(reg, rh->region_pool);
422 list_for_each_entry_safe(reg, next, &failed_recovered, list) {
423 complete_resync_work(reg, errors_handled(rh->ms) ? 0 : 1);
424 mempool_free(reg, rh->region_pool);
427 list_for_each_entry_safe(reg, next, &clean, list) {
428 rh->log->type->clear_region(rh->log, reg->key);
429 mempool_free(reg, rh->region_pool);
432 rh->log->type->flush(rh->log);
435 static void rh_inc(struct region_hash *rh, region_t region)
437 struct region *reg;
439 read_lock(&rh->hash_lock);
440 reg = __rh_find(rh, region);
442 spin_lock_irq(&rh->region_lock);
443 atomic_inc(&reg->pending);
445 if (reg->state == RH_CLEAN) {
446 reg->state = RH_DIRTY;
447 list_del_init(&reg->list); /* take off the clean list */
448 spin_unlock_irq(&rh->region_lock);
450 rh->log->type->mark_region(rh->log, reg->key);
451 } else
452 spin_unlock_irq(&rh->region_lock);
455 read_unlock(&rh->hash_lock);
458 static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios)
460 struct bio *bio;
462 for (bio = bios->head; bio; bio = bio->bi_next)
463 rh_inc(rh, bio_to_region(rh, bio));
466 static void rh_dec(struct region_hash *rh, region_t region)
468 unsigned long flags;
469 struct region *reg;
470 int should_wake = 0;
472 read_lock(&rh->hash_lock);
473 reg = __rh_lookup(rh, region);
474 read_unlock(&rh->hash_lock);
476 spin_lock_irqsave(&rh->region_lock, flags);
477 if (atomic_dec_and_test(&reg->pending)) {
479 * There is no pending I/O for this region.
480 * We can move the region to corresponding list for next action.
481 * At this point, the region is not yet connected to any list.
483 * If the state is RH_NOSYNC, the region should be kept off
484 * from clean list.
485 * The hash entry for RH_NOSYNC will remain in memory
486 * until the region is recovered or the map is reloaded.
489 /* do nothing for RH_NOSYNC */
490 if (reg->state == RH_RECOVERING) {
491 list_add_tail(&reg->list, &rh->quiesced_regions);
492 } else if (reg->state == RH_DIRTY) {
493 reg->state = RH_CLEAN;
494 list_add(&reg->list, &rh->clean_regions);
496 should_wake = 1;
498 spin_unlock_irqrestore(&rh->region_lock, flags);
500 if (should_wake)
501 wake(rh->ms);
505 * Starts quiescing a region in preparation for recovery.
507 static int __rh_recovery_prepare(struct region_hash *rh)
509 int r;
510 struct region *reg;
511 region_t region;
514 * Ask the dirty log what's next.
516 r = rh->log->type->get_resync_work(rh->log, &region);
517 if (r <= 0)
518 return r;
521 * Get this region, and start it quiescing by setting the
522 * recovering flag.
524 read_lock(&rh->hash_lock);
525 reg = __rh_find(rh, region);
526 read_unlock(&rh->hash_lock);
528 spin_lock_irq(&rh->region_lock);
529 reg->state = RH_RECOVERING;
531 /* Already quiesced ? */
532 if (atomic_read(&reg->pending))
533 list_del_init(&reg->list);
534 else
535 list_move(&reg->list, &rh->quiesced_regions);
537 spin_unlock_irq(&rh->region_lock);
539 return 1;
542 static void rh_recovery_prepare(struct region_hash *rh)
544 /* Extra reference to avoid race with rh_stop_recovery */
545 atomic_inc(&rh->recovery_in_flight);
547 while (!down_trylock(&rh->recovery_count)) {
548 atomic_inc(&rh->recovery_in_flight);
549 if (__rh_recovery_prepare(rh) <= 0) {
550 atomic_dec(&rh->recovery_in_flight);
551 up(&rh->recovery_count);
552 break;
556 /* Drop the extra reference */
557 if (atomic_dec_and_test(&rh->recovery_in_flight))
558 wake_up_all(&_kmirrord_recovery_stopped);
562 * Returns any quiesced regions.
564 static struct region *rh_recovery_start(struct region_hash *rh)
566 struct region *reg = NULL;
568 spin_lock_irq(&rh->region_lock);
569 if (!list_empty(&rh->quiesced_regions)) {
570 reg = list_entry(rh->quiesced_regions.next,
571 struct region, list);
572 list_del_init(&reg->list); /* remove from the quiesced list */
574 spin_unlock_irq(&rh->region_lock);
576 return reg;
579 static void rh_recovery_end(struct region *reg, int success)
581 struct region_hash *rh = reg->rh;
583 spin_lock_irq(&rh->region_lock);
584 if (success)
585 list_add(&reg->list, &reg->rh->recovered_regions);
586 else {
587 reg->state = RH_NOSYNC;
588 list_add(&reg->list, &reg->rh->failed_recovered_regions);
590 spin_unlock_irq(&rh->region_lock);
592 wake(rh->ms);
595 static int rh_flush(struct region_hash *rh)
597 return rh->log->type->flush(rh->log);
600 static void rh_delay(struct region_hash *rh, struct bio *bio)
602 struct region *reg;
604 read_lock(&rh->hash_lock);
605 reg = __rh_find(rh, bio_to_region(rh, bio));
606 bio_list_add(&reg->delayed_bios, bio);
607 read_unlock(&rh->hash_lock);
610 static void rh_stop_recovery(struct region_hash *rh)
612 int i;
614 /* wait for any recovering regions */
615 for (i = 0; i < MAX_RECOVERY; i++)
616 down(&rh->recovery_count);
619 static void rh_start_recovery(struct region_hash *rh)
621 int i;
623 for (i = 0; i < MAX_RECOVERY; i++)
624 up(&rh->recovery_count);
626 wake(rh->ms);
630 * Every mirror should look like this one.
632 #define DEFAULT_MIRROR 0
635 * This is yucky. We squirrel the mirror_set struct away inside
636 * bi_next for write buffers. This is safe since the bh
637 * doesn't get submitted to the lower levels of block layer.
639 static struct mirror_set *bio_get_ms(struct bio *bio)
641 return (struct mirror_set *) bio->bi_next;
644 static void bio_set_ms(struct bio *bio, struct mirror_set *ms)
646 bio->bi_next = (struct bio *) ms;
649 /*-----------------------------------------------------------------
650 * Recovery.
652 * When a mirror is first activated we may find that some regions
653 * are in the no-sync state. We have to recover these by
654 * recopying from the default mirror to all the others.
655 *---------------------------------------------------------------*/
656 static void recovery_complete(int read_err, unsigned int write_err,
657 void *context)
659 struct region *reg = (struct region *) context;
661 if (read_err)
662 /* Read error means the failure of default mirror. */
663 DMERR_LIMIT("Unable to read primary mirror during recovery");
665 if (write_err)
666 DMERR_LIMIT("Write error during recovery (error = 0x%x)",
667 write_err);
669 rh_recovery_end(reg, !(read_err || write_err));
672 static int recover(struct mirror_set *ms, struct region *reg)
674 int r;
675 unsigned int i;
676 struct io_region from, to[KCOPYD_MAX_REGIONS], *dest;
677 struct mirror *m;
678 unsigned long flags = 0;
680 /* fill in the source */
681 m = ms->default_mirror;
682 from.bdev = m->dev->bdev;
683 from.sector = m->offset + region_to_sector(reg->rh, reg->key);
684 if (reg->key == (ms->nr_regions - 1)) {
686 * The final region may be smaller than
687 * region_size.
689 from.count = ms->ti->len & (reg->rh->region_size - 1);
690 if (!from.count)
691 from.count = reg->rh->region_size;
692 } else
693 from.count = reg->rh->region_size;
695 /* fill in the destinations */
696 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
697 if (&ms->mirror[i] == ms->default_mirror)
698 continue;
700 m = ms->mirror + i;
701 dest->bdev = m->dev->bdev;
702 dest->sector = m->offset + region_to_sector(reg->rh, reg->key);
703 dest->count = from.count;
704 dest++;
707 /* hand to kcopyd */
708 set_bit(KCOPYD_IGNORE_ERROR, &flags);
709 r = kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, flags,
710 recovery_complete, reg);
712 return r;
715 static void do_recovery(struct mirror_set *ms)
717 int r;
718 struct region *reg;
719 struct dirty_log *log = ms->rh.log;
722 * Start quiescing some regions.
724 rh_recovery_prepare(&ms->rh);
727 * Copy any already quiesced regions.
729 while ((reg = rh_recovery_start(&ms->rh))) {
730 r = recover(ms, reg);
731 if (r)
732 rh_recovery_end(reg, 0);
736 * Update the in sync flag.
738 if (!ms->in_sync &&
739 (log->type->get_sync_count(log) == ms->nr_regions)) {
740 /* the sync is complete */
741 dm_table_event(ms->ti->table);
742 ms->in_sync = 1;
746 /*-----------------------------------------------------------------
747 * Reads
748 *---------------------------------------------------------------*/
749 static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
751 /* FIXME: add read balancing */
752 return ms->default_mirror;
756 * remap a buffer to a particular mirror.
758 static void map_bio(struct mirror_set *ms, struct mirror *m, struct bio *bio)
760 bio->bi_bdev = m->dev->bdev;
761 bio->bi_sector = m->offset + (bio->bi_sector - ms->ti->begin);
764 static void do_reads(struct mirror_set *ms, struct bio_list *reads)
766 region_t region;
767 struct bio *bio;
768 struct mirror *m;
770 while ((bio = bio_list_pop(reads))) {
771 region = bio_to_region(&ms->rh, bio);
774 * We can only read balance if the region is in sync.
776 if (rh_in_sync(&ms->rh, region, 1))
777 m = choose_mirror(ms, bio->bi_sector);
778 else
779 m = ms->default_mirror;
781 map_bio(ms, m, bio);
782 generic_make_request(bio);
786 /*-----------------------------------------------------------------
787 * Writes.
789 * We do different things with the write io depending on the
790 * state of the region that it's in:
792 * SYNC: increment pending, use kcopyd to write to *all* mirrors
793 * RECOVERING: delay the io until recovery completes
794 * NOSYNC: increment pending, just write to the default mirror
795 *---------------------------------------------------------------*/
796 static void write_callback(unsigned long error, void *context)
798 unsigned int i;
799 int uptodate = 1;
800 struct bio *bio = (struct bio *) context;
801 struct mirror_set *ms;
803 ms = bio_get_ms(bio);
804 bio_set_ms(bio, NULL);
807 * NOTE: We don't decrement the pending count here,
808 * instead it is done by the targets endio function.
809 * This way we handle both writes to SYNC and NOSYNC
810 * regions with the same code.
813 if (error) {
815 * only error the io if all mirrors failed.
816 * FIXME: bogus
818 uptodate = 0;
819 for (i = 0; i < ms->nr_mirrors; i++)
820 if (!test_bit(i, &error)) {
821 uptodate = 1;
822 break;
825 bio_endio(bio, 0);
828 static void do_write(struct mirror_set *ms, struct bio *bio)
830 unsigned int i;
831 struct io_region io[KCOPYD_MAX_REGIONS+1];
832 struct mirror *m;
833 struct dm_io_request io_req = {
834 .bi_rw = WRITE,
835 .mem.type = DM_IO_BVEC,
836 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
837 .notify.fn = write_callback,
838 .notify.context = bio,
839 .client = ms->io_client,
842 for (i = 0; i < ms->nr_mirrors; i++) {
843 m = ms->mirror + i;
845 io[i].bdev = m->dev->bdev;
846 io[i].sector = m->offset + (bio->bi_sector - ms->ti->begin);
847 io[i].count = bio->bi_size >> 9;
850 bio_set_ms(bio, ms);
852 (void) dm_io(&io_req, ms->nr_mirrors, io, NULL);
855 static void do_writes(struct mirror_set *ms, struct bio_list *writes)
857 int state;
858 struct bio *bio;
859 struct bio_list sync, nosync, recover, *this_list = NULL;
861 if (!writes->head)
862 return;
865 * Classify each write.
867 bio_list_init(&sync);
868 bio_list_init(&nosync);
869 bio_list_init(&recover);
871 while ((bio = bio_list_pop(writes))) {
872 state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1);
873 switch (state) {
874 case RH_CLEAN:
875 case RH_DIRTY:
876 this_list = &sync;
877 break;
879 case RH_NOSYNC:
880 this_list = &nosync;
881 break;
883 case RH_RECOVERING:
884 this_list = &recover;
885 break;
888 bio_list_add(this_list, bio);
892 * Increment the pending counts for any regions that will
893 * be written to (writes to recover regions are going to
894 * be delayed).
896 rh_inc_pending(&ms->rh, &sync);
897 rh_inc_pending(&ms->rh, &nosync);
898 ms->log_failure = rh_flush(&ms->rh) ? 1 : 0;
901 * Dispatch io.
903 if (unlikely(ms->log_failure))
904 while ((bio = bio_list_pop(&sync)))
905 bio_endio(bio, -EIO);
906 else while ((bio = bio_list_pop(&sync)))
907 do_write(ms, bio);
909 while ((bio = bio_list_pop(&recover)))
910 rh_delay(&ms->rh, bio);
912 while ((bio = bio_list_pop(&nosync))) {
913 map_bio(ms, ms->default_mirror, bio);
914 generic_make_request(bio);
918 /*-----------------------------------------------------------------
919 * kmirrord
920 *---------------------------------------------------------------*/
921 static void do_mirror(struct work_struct *work)
923 struct mirror_set *ms =container_of(work, struct mirror_set,
924 kmirrord_work);
925 struct bio_list reads, writes;
927 spin_lock(&ms->lock);
928 reads = ms->reads;
929 writes = ms->writes;
930 bio_list_init(&ms->reads);
931 bio_list_init(&ms->writes);
932 spin_unlock(&ms->lock);
934 rh_update_states(&ms->rh);
935 do_recovery(ms);
936 do_reads(ms, &reads);
937 do_writes(ms, &writes);
940 /*-----------------------------------------------------------------
941 * Target functions
942 *---------------------------------------------------------------*/
943 static struct mirror_set *alloc_context(unsigned int nr_mirrors,
944 uint32_t region_size,
945 struct dm_target *ti,
946 struct dirty_log *dl)
948 size_t len;
949 struct mirror_set *ms = NULL;
951 if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors))
952 return NULL;
954 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
956 ms = kzalloc(len, GFP_KERNEL);
957 if (!ms) {
958 ti->error = "Cannot allocate mirror context";
959 return NULL;
962 spin_lock_init(&ms->lock);
964 ms->ti = ti;
965 ms->nr_mirrors = nr_mirrors;
966 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
967 ms->in_sync = 0;
968 ms->default_mirror = &ms->mirror[DEFAULT_MIRROR];
970 ms->io_client = dm_io_client_create(DM_IO_PAGES);
971 if (IS_ERR(ms->io_client)) {
972 ti->error = "Error creating dm_io client";
973 kfree(ms);
974 return NULL;
977 if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
978 ti->error = "Error creating dirty region hash";
979 dm_io_client_destroy(ms->io_client);
980 kfree(ms);
981 return NULL;
984 return ms;
987 static void free_context(struct mirror_set *ms, struct dm_target *ti,
988 unsigned int m)
990 while (m--)
991 dm_put_device(ti, ms->mirror[m].dev);
993 dm_io_client_destroy(ms->io_client);
994 rh_exit(&ms->rh);
995 kfree(ms);
998 static inline int _check_region_size(struct dm_target *ti, uint32_t size)
1000 return !(size % (PAGE_SIZE >> 9) || !is_power_of_2(size) ||
1001 size > ti->len);
1004 static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
1005 unsigned int mirror, char **argv)
1007 unsigned long long offset;
1009 if (sscanf(argv[1], "%llu", &offset) != 1) {
1010 ti->error = "Invalid offset";
1011 return -EINVAL;
1014 if (dm_get_device(ti, argv[0], offset, ti->len,
1015 dm_table_get_mode(ti->table),
1016 &ms->mirror[mirror].dev)) {
1017 ti->error = "Device lookup failure";
1018 return -ENXIO;
1021 ms->mirror[mirror].ms = ms;
1022 ms->mirror[mirror].offset = offset;
1024 return 0;
1028 * Create dirty log: log_type #log_params <log_params>
1030 static struct dirty_log *create_dirty_log(struct dm_target *ti,
1031 unsigned int argc, char **argv,
1032 unsigned int *args_used)
1034 unsigned int param_count;
1035 struct dirty_log *dl;
1037 if (argc < 2) {
1038 ti->error = "Insufficient mirror log arguments";
1039 return NULL;
1042 if (sscanf(argv[1], "%u", &param_count) != 1) {
1043 ti->error = "Invalid mirror log argument count";
1044 return NULL;
1047 *args_used = 2 + param_count;
1049 if (argc < *args_used) {
1050 ti->error = "Insufficient mirror log arguments";
1051 return NULL;
1054 dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2);
1055 if (!dl) {
1056 ti->error = "Error creating mirror dirty log";
1057 return NULL;
1060 if (!_check_region_size(ti, dl->type->get_region_size(dl))) {
1061 ti->error = "Invalid region size";
1062 dm_destroy_dirty_log(dl);
1063 return NULL;
1066 return dl;
1069 static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
1070 unsigned *args_used)
1072 unsigned num_features;
1073 struct dm_target *ti = ms->ti;
1075 *args_used = 0;
1077 if (!argc)
1078 return 0;
1080 if (sscanf(argv[0], "%u", &num_features) != 1) {
1081 ti->error = "Invalid number of features";
1082 return -EINVAL;
1085 argc--;
1086 argv++;
1087 (*args_used)++;
1089 if (num_features > argc) {
1090 ti->error = "Not enough arguments to support feature count";
1091 return -EINVAL;
1094 if (!strcmp("handle_errors", argv[0]))
1095 ms->features |= DM_RAID1_HANDLE_ERRORS;
1096 else {
1097 ti->error = "Unrecognised feature requested";
1098 return -EINVAL;
1101 (*args_used)++;
1103 return 0;
1107 * Construct a mirror mapping:
1109 * log_type #log_params <log_params>
1110 * #mirrors [mirror_path offset]{2,}
1111 * [#features <features>]
1113 * log_type is "core" or "disk"
1114 * #log_params is between 1 and 3
1116 * If present, features must be "handle_errors".
1118 static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1120 int r;
1121 unsigned int nr_mirrors, m, args_used;
1122 struct mirror_set *ms;
1123 struct dirty_log *dl;
1125 dl = create_dirty_log(ti, argc, argv, &args_used);
1126 if (!dl)
1127 return -EINVAL;
1129 argv += args_used;
1130 argc -= args_used;
1132 if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
1133 nr_mirrors < 2 || nr_mirrors > KCOPYD_MAX_REGIONS + 1) {
1134 ti->error = "Invalid number of mirrors";
1135 dm_destroy_dirty_log(dl);
1136 return -EINVAL;
1139 argv++, argc--;
1141 if (argc < nr_mirrors * 2) {
1142 ti->error = "Too few mirror arguments";
1143 dm_destroy_dirty_log(dl);
1144 return -EINVAL;
1147 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1148 if (!ms) {
1149 dm_destroy_dirty_log(dl);
1150 return -ENOMEM;
1153 /* Get the mirror parameter sets */
1154 for (m = 0; m < nr_mirrors; m++) {
1155 r = get_mirror(ms, ti, m, argv);
1156 if (r) {
1157 free_context(ms, ti, m);
1158 return r;
1160 argv += 2;
1161 argc -= 2;
1164 ti->private = ms;
1165 ti->split_io = ms->rh.region_size;
1167 ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
1168 if (!ms->kmirrord_wq) {
1169 DMERR("couldn't start kmirrord");
1170 r = -ENOMEM;
1171 goto err_free_context;
1173 INIT_WORK(&ms->kmirrord_work, do_mirror);
1175 r = parse_features(ms, argc, argv, &args_used);
1176 if (r)
1177 goto err_destroy_wq;
1179 argv += args_used;
1180 argc -= args_used;
1183 * Any read-balancing addition depends on the
1184 * DM_RAID1_HANDLE_ERRORS flag being present.
1185 * This is because the decision to balance depends
1186 * on the sync state of a region. If the above
1187 * flag is not present, we ignore errors; and
1188 * the sync state may be inaccurate.
1191 if (argc) {
1192 ti->error = "Too many mirror arguments";
1193 r = -EINVAL;
1194 goto err_destroy_wq;
1197 r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
1198 if (r)
1199 goto err_destroy_wq;
1201 wake(ms);
1202 return 0;
1204 err_destroy_wq:
1205 destroy_workqueue(ms->kmirrord_wq);
1206 err_free_context:
1207 free_context(ms, ti, ms->nr_mirrors);
1208 return r;
1211 static void mirror_dtr(struct dm_target *ti)
1213 struct mirror_set *ms = (struct mirror_set *) ti->private;
1215 flush_workqueue(ms->kmirrord_wq);
1216 kcopyd_client_destroy(ms->kcopyd_client);
1217 destroy_workqueue(ms->kmirrord_wq);
1218 free_context(ms, ti, ms->nr_mirrors);
1221 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
1223 int should_wake = 0;
1224 struct bio_list *bl;
1226 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
1227 spin_lock(&ms->lock);
1228 should_wake = !(bl->head);
1229 bio_list_add(bl, bio);
1230 spin_unlock(&ms->lock);
1232 if (should_wake)
1233 wake(ms);
1237 * Mirror mapping function
1239 static int mirror_map(struct dm_target *ti, struct bio *bio,
1240 union map_info *map_context)
1242 int r, rw = bio_rw(bio);
1243 struct mirror *m;
1244 struct mirror_set *ms = ti->private;
1246 map_context->ll = bio_to_region(&ms->rh, bio);
1248 if (rw == WRITE) {
1249 queue_bio(ms, bio, rw);
1250 return DM_MAPIO_SUBMITTED;
1253 r = ms->rh.log->type->in_sync(ms->rh.log,
1254 bio_to_region(&ms->rh, bio), 0);
1255 if (r < 0 && r != -EWOULDBLOCK)
1256 return r;
1258 if (r == -EWOULDBLOCK) /* FIXME: ugly */
1259 r = DM_MAPIO_SUBMITTED;
1262 * We don't want to fast track a recovery just for a read
1263 * ahead. So we just let it silently fail.
1264 * FIXME: get rid of this.
1266 if (!r && rw == READA)
1267 return -EIO;
1269 if (!r) {
1270 /* Pass this io over to the daemon */
1271 queue_bio(ms, bio, rw);
1272 return DM_MAPIO_SUBMITTED;
1275 m = choose_mirror(ms, bio->bi_sector);
1276 if (!m)
1277 return -EIO;
1279 map_bio(ms, m, bio);
1280 return DM_MAPIO_REMAPPED;
1283 static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1284 int error, union map_info *map_context)
1286 int rw = bio_rw(bio);
1287 struct mirror_set *ms = (struct mirror_set *) ti->private;
1288 region_t region = map_context->ll;
1291 * We need to dec pending if this was a write.
1293 if (rw == WRITE)
1294 rh_dec(&ms->rh, region);
1296 return 0;
1299 static void mirror_postsuspend(struct dm_target *ti)
1301 struct mirror_set *ms = (struct mirror_set *) ti->private;
1302 struct dirty_log *log = ms->rh.log;
1304 rh_stop_recovery(&ms->rh);
1306 /* Wait for all I/O we generated to complete */
1307 wait_event(_kmirrord_recovery_stopped,
1308 !atomic_read(&ms->rh.recovery_in_flight));
1310 if (log->type->postsuspend && log->type->postsuspend(log))
1311 /* FIXME: need better error handling */
1312 DMWARN("log suspend failed");
1315 static void mirror_resume(struct dm_target *ti)
1317 struct mirror_set *ms = (struct mirror_set *) ti->private;
1318 struct dirty_log *log = ms->rh.log;
1319 if (log->type->resume && log->type->resume(log))
1320 /* FIXME: need better error handling */
1321 DMWARN("log resume failed");
1322 rh_start_recovery(&ms->rh);
1325 static int mirror_status(struct dm_target *ti, status_type_t type,
1326 char *result, unsigned int maxlen)
1328 unsigned int m, sz = 0;
1329 struct mirror_set *ms = (struct mirror_set *) ti->private;
1331 switch (type) {
1332 case STATUSTYPE_INFO:
1333 DMEMIT("%d ", ms->nr_mirrors);
1334 for (m = 0; m < ms->nr_mirrors; m++)
1335 DMEMIT("%s ", ms->mirror[m].dev->name);
1337 DMEMIT("%llu/%llu 0 ",
1338 (unsigned long long)ms->rh.log->type->
1339 get_sync_count(ms->rh.log),
1340 (unsigned long long)ms->nr_regions);
1342 sz += ms->rh.log->type->status(ms->rh.log, type, result+sz, maxlen-sz);
1344 break;
1346 case STATUSTYPE_TABLE:
1347 sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
1349 DMEMIT("%d", ms->nr_mirrors);
1350 for (m = 0; m < ms->nr_mirrors; m++)
1351 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
1352 (unsigned long long)ms->mirror[m].offset);
1354 if (ms->features & DM_RAID1_HANDLE_ERRORS)
1355 DMEMIT(" 1 handle_errors");
1358 return 0;
1361 static struct target_type mirror_target = {
1362 .name = "mirror",
1363 .version = {1, 0, 3},
1364 .module = THIS_MODULE,
1365 .ctr = mirror_ctr,
1366 .dtr = mirror_dtr,
1367 .map = mirror_map,
1368 .end_io = mirror_end_io,
1369 .postsuspend = mirror_postsuspend,
1370 .resume = mirror_resume,
1371 .status = mirror_status,
1374 static int __init dm_mirror_init(void)
1376 int r;
1378 r = dm_dirty_log_init();
1379 if (r)
1380 return r;
1382 r = dm_register_target(&mirror_target);
1383 if (r < 0) {
1384 DMERR("Failed to register mirror target");
1385 dm_dirty_log_exit();
1388 return r;
1391 static void __exit dm_mirror_exit(void)
1393 int r;
1395 r = dm_unregister_target(&mirror_target);
1396 if (r < 0)
1397 DMERR("unregister failed %d", r);
1399 dm_dirty_log_exit();
1402 /* Module hooks */
1403 module_init(dm_mirror_init);
1404 module_exit(dm_mirror_exit);
1406 MODULE_DESCRIPTION(DM_NAME " mirror target");
1407 MODULE_AUTHOR("Joe Thornber");
1408 MODULE_LICENSE("GPL");