2 * Copyright (C) 2003 Sistina Software Limited.
4 * This file is released under the GPL.
8 #include "dm-bio-list.h"
13 #include <linux/ctype.h>
14 #include <linux/init.h>
15 #include <linux/mempool.h>
16 #include <linux/module.h>
17 #include <linux/pagemap.h>
18 #include <linux/slab.h>
19 #include <linux/time.h>
20 #include <linux/vmalloc.h>
21 #include <linux/workqueue.h>
22 #include <linux/log2.h>
24 #define DM_MSG_PREFIX "raid1"
25 #define DM_IO_PAGES 64
27 #define DM_RAID1_HANDLE_ERRORS 0x01
28 #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
30 static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped
);
32 /*-----------------------------------------------------------------
35 * The mirror splits itself up into discrete regions. Each
36 * region can be in one of three states: clean, dirty,
37 * nosync. There is no need to put clean regions in the hash.
39 * In addition to being present in the hash table a region _may_
40 * be present on one of three lists.
42 * clean_regions: Regions on this list have no io pending to
43 * them, they are in sync, we are no longer interested in them,
44 * they are dull. rh_update_states() will remove them from the
47 * quiesced_regions: These regions have been spun down, ready
48 * for recovery. rh_recovery_start() will remove regions from
49 * this list and hand them to kmirrord, which will schedule the
50 * recovery io with kcopyd.
52 * recovered_regions: Regions that kcopyd has successfully
53 * recovered. rh_update_states() will now schedule any delayed
54 * io, up the recovery_count, and remove the region from the
58 * A rw spin lock 'hash_lock' protects just the hash table,
59 * this is never held in write mode from interrupt context,
60 * which I believe means that we only have to disable irqs when
63 * An ordinary spin lock 'region_lock' that protects the three
64 * lists in the region_hash, with the 'state', 'list' and
65 * 'bhs_delayed' fields of the regions. This is used from irq
66 * context, so all other uses will have to suspend local irqs.
67 *---------------------------------------------------------------*/
70 struct mirror_set
*ms
;
72 unsigned region_shift
;
74 /* holds persistent region state */
75 struct dirty_log
*log
;
79 mempool_t
*region_pool
;
81 unsigned int nr_buckets
;
82 struct list_head
*buckets
;
84 spinlock_t region_lock
;
85 atomic_t recovery_in_flight
;
86 struct semaphore recovery_count
;
87 struct list_head clean_regions
;
88 struct list_head quiesced_regions
;
89 struct list_head recovered_regions
;
90 struct list_head failed_recovered_regions
;
101 struct region_hash
*rh
; /* FIXME: can we get rid of this ? */
105 struct list_head hash_list
;
106 struct list_head list
;
109 struct bio_list delayed_bios
;
113 /*-----------------------------------------------------------------
114 * Mirror set structures.
115 *---------------------------------------------------------------*/
117 struct mirror_set
*ms
;
118 atomic_t error_count
;
124 struct dm_target
*ti
;
125 struct list_head list
;
126 struct region_hash rh
;
127 struct kcopyd_client
*kcopyd_client
;
130 spinlock_t lock
; /* protects the next two lists */
131 struct bio_list reads
;
132 struct bio_list writes
;
134 struct dm_io_client
*io_client
;
141 struct mirror
*default_mirror
; /* Default mirror */
143 struct workqueue_struct
*kmirrord_wq
;
144 struct work_struct kmirrord_work
;
146 unsigned int nr_mirrors
;
147 struct mirror mirror
[0];
153 static inline region_t
bio_to_region(struct region_hash
*rh
, struct bio
*bio
)
155 return (bio
->bi_sector
- rh
->ms
->ti
->begin
) >> rh
->region_shift
;
158 static inline sector_t
region_to_sector(struct region_hash
*rh
, region_t region
)
160 return region
<< rh
->region_shift
;
163 static void wake(struct mirror_set
*ms
)
165 queue_work(ms
->kmirrord_wq
, &ms
->kmirrord_work
);
168 /* FIXME move this */
169 static void queue_bio(struct mirror_set
*ms
, struct bio
*bio
, int rw
);
171 #define MIN_REGIONS 64
172 #define MAX_RECOVERY 1
173 static int rh_init(struct region_hash
*rh
, struct mirror_set
*ms
,
174 struct dirty_log
*log
, uint32_t region_size
,
177 unsigned int nr_buckets
, max_buckets
;
181 * Calculate a suitable number of buckets for our hash
184 max_buckets
= nr_regions
>> 6;
185 for (nr_buckets
= 128u; nr_buckets
< max_buckets
; nr_buckets
<<= 1)
191 rh
->region_size
= region_size
;
192 rh
->region_shift
= ffs(region_size
) - 1;
193 rwlock_init(&rh
->hash_lock
);
194 rh
->mask
= nr_buckets
- 1;
195 rh
->nr_buckets
= nr_buckets
;
197 rh
->buckets
= vmalloc(nr_buckets
* sizeof(*rh
->buckets
));
199 DMERR("unable to allocate region hash memory");
203 for (i
= 0; i
< nr_buckets
; i
++)
204 INIT_LIST_HEAD(rh
->buckets
+ i
);
206 spin_lock_init(&rh
->region_lock
);
207 sema_init(&rh
->recovery_count
, 0);
208 atomic_set(&rh
->recovery_in_flight
, 0);
209 INIT_LIST_HEAD(&rh
->clean_regions
);
210 INIT_LIST_HEAD(&rh
->quiesced_regions
);
211 INIT_LIST_HEAD(&rh
->recovered_regions
);
212 INIT_LIST_HEAD(&rh
->failed_recovered_regions
);
214 rh
->region_pool
= mempool_create_kmalloc_pool(MIN_REGIONS
,
215 sizeof(struct region
));
216 if (!rh
->region_pool
) {
225 static void rh_exit(struct region_hash
*rh
)
228 struct region
*reg
, *nreg
;
230 BUG_ON(!list_empty(&rh
->quiesced_regions
));
231 for (h
= 0; h
< rh
->nr_buckets
; h
++) {
232 list_for_each_entry_safe(reg
, nreg
, rh
->buckets
+ h
, hash_list
) {
233 BUG_ON(atomic_read(®
->pending
));
234 mempool_free(reg
, rh
->region_pool
);
239 dm_destroy_dirty_log(rh
->log
);
241 mempool_destroy(rh
->region_pool
);
245 #define RH_HASH_MULT 2654435387U
247 static inline unsigned int rh_hash(struct region_hash
*rh
, region_t region
)
249 return (unsigned int) ((region
* RH_HASH_MULT
) >> 12) & rh
->mask
;
252 static struct region
*__rh_lookup(struct region_hash
*rh
, region_t region
)
256 list_for_each_entry (reg
, rh
->buckets
+ rh_hash(rh
, region
), hash_list
)
257 if (reg
->key
== region
)
263 static void __rh_insert(struct region_hash
*rh
, struct region
*reg
)
265 unsigned int h
= rh_hash(rh
, reg
->key
);
266 list_add(®
->hash_list
, rh
->buckets
+ h
);
269 static struct region
*__rh_alloc(struct region_hash
*rh
, region_t region
)
271 struct region
*reg
, *nreg
;
273 read_unlock(&rh
->hash_lock
);
274 nreg
= mempool_alloc(rh
->region_pool
, GFP_ATOMIC
);
276 nreg
= kmalloc(sizeof(struct region
), GFP_NOIO
);
277 nreg
->state
= rh
->log
->type
->in_sync(rh
->log
, region
, 1) ?
278 RH_CLEAN
: RH_NOSYNC
;
282 INIT_LIST_HEAD(&nreg
->list
);
284 atomic_set(&nreg
->pending
, 0);
285 bio_list_init(&nreg
->delayed_bios
);
286 write_lock_irq(&rh
->hash_lock
);
288 reg
= __rh_lookup(rh
, region
);
290 /* we lost the race */
291 mempool_free(nreg
, rh
->region_pool
);
294 __rh_insert(rh
, nreg
);
295 if (nreg
->state
== RH_CLEAN
) {
296 spin_lock(&rh
->region_lock
);
297 list_add(&nreg
->list
, &rh
->clean_regions
);
298 spin_unlock(&rh
->region_lock
);
302 write_unlock_irq(&rh
->hash_lock
);
303 read_lock(&rh
->hash_lock
);
308 static inline struct region
*__rh_find(struct region_hash
*rh
, region_t region
)
312 reg
= __rh_lookup(rh
, region
);
314 reg
= __rh_alloc(rh
, region
);
319 static int rh_state(struct region_hash
*rh
, region_t region
, int may_block
)
324 read_lock(&rh
->hash_lock
);
325 reg
= __rh_lookup(rh
, region
);
326 read_unlock(&rh
->hash_lock
);
332 * The region wasn't in the hash, so we fall back to the
335 r
= rh
->log
->type
->in_sync(rh
->log
, region
, may_block
);
338 * Any error from the dirty log (eg. -EWOULDBLOCK) gets
339 * taken as a RH_NOSYNC
341 return r
== 1 ? RH_CLEAN
: RH_NOSYNC
;
344 static inline int rh_in_sync(struct region_hash
*rh
,
345 region_t region
, int may_block
)
347 int state
= rh_state(rh
, region
, may_block
);
348 return state
== RH_CLEAN
|| state
== RH_DIRTY
;
351 static void dispatch_bios(struct mirror_set
*ms
, struct bio_list
*bio_list
)
355 while ((bio
= bio_list_pop(bio_list
))) {
356 queue_bio(ms
, bio
, WRITE
);
360 static void complete_resync_work(struct region
*reg
, int success
)
362 struct region_hash
*rh
= reg
->rh
;
364 rh
->log
->type
->set_region_sync(rh
->log
, reg
->key
, success
);
365 dispatch_bios(rh
->ms
, ®
->delayed_bios
);
366 if (atomic_dec_and_test(&rh
->recovery_in_flight
))
367 wake_up_all(&_kmirrord_recovery_stopped
);
368 up(&rh
->recovery_count
);
371 static void rh_update_states(struct region_hash
*rh
)
373 struct region
*reg
, *next
;
376 LIST_HEAD(recovered
);
377 LIST_HEAD(failed_recovered
);
380 * Quickly grab the lists.
382 write_lock_irq(&rh
->hash_lock
);
383 spin_lock(&rh
->region_lock
);
384 if (!list_empty(&rh
->clean_regions
)) {
385 list_splice(&rh
->clean_regions
, &clean
);
386 INIT_LIST_HEAD(&rh
->clean_regions
);
388 list_for_each_entry(reg
, &clean
, list
)
389 list_del(®
->hash_list
);
392 if (!list_empty(&rh
->recovered_regions
)) {
393 list_splice(&rh
->recovered_regions
, &recovered
);
394 INIT_LIST_HEAD(&rh
->recovered_regions
);
396 list_for_each_entry (reg
, &recovered
, list
)
397 list_del(®
->hash_list
);
400 if (!list_empty(&rh
->failed_recovered_regions
)) {
401 list_splice(&rh
->failed_recovered_regions
, &failed_recovered
);
402 INIT_LIST_HEAD(&rh
->failed_recovered_regions
);
404 list_for_each_entry(reg
, &failed_recovered
, list
)
405 list_del(®
->hash_list
);
408 spin_unlock(&rh
->region_lock
);
409 write_unlock_irq(&rh
->hash_lock
);
412 * All the regions on the recovered and clean lists have
413 * now been pulled out of the system, so no need to do
416 list_for_each_entry_safe (reg
, next
, &recovered
, list
) {
417 rh
->log
->type
->clear_region(rh
->log
, reg
->key
);
418 complete_resync_work(reg
, 1);
419 mempool_free(reg
, rh
->region_pool
);
422 list_for_each_entry_safe(reg
, next
, &failed_recovered
, list
) {
423 complete_resync_work(reg
, errors_handled(rh
->ms
) ? 0 : 1);
424 mempool_free(reg
, rh
->region_pool
);
427 list_for_each_entry_safe(reg
, next
, &clean
, list
) {
428 rh
->log
->type
->clear_region(rh
->log
, reg
->key
);
429 mempool_free(reg
, rh
->region_pool
);
432 rh
->log
->type
->flush(rh
->log
);
435 static void rh_inc(struct region_hash
*rh
, region_t region
)
439 read_lock(&rh
->hash_lock
);
440 reg
= __rh_find(rh
, region
);
442 spin_lock_irq(&rh
->region_lock
);
443 atomic_inc(®
->pending
);
445 if (reg
->state
== RH_CLEAN
) {
446 reg
->state
= RH_DIRTY
;
447 list_del_init(®
->list
); /* take off the clean list */
448 spin_unlock_irq(&rh
->region_lock
);
450 rh
->log
->type
->mark_region(rh
->log
, reg
->key
);
452 spin_unlock_irq(&rh
->region_lock
);
455 read_unlock(&rh
->hash_lock
);
458 static void rh_inc_pending(struct region_hash
*rh
, struct bio_list
*bios
)
462 for (bio
= bios
->head
; bio
; bio
= bio
->bi_next
)
463 rh_inc(rh
, bio_to_region(rh
, bio
));
466 static void rh_dec(struct region_hash
*rh
, region_t region
)
472 read_lock(&rh
->hash_lock
);
473 reg
= __rh_lookup(rh
, region
);
474 read_unlock(&rh
->hash_lock
);
476 spin_lock_irqsave(&rh
->region_lock
, flags
);
477 if (atomic_dec_and_test(®
->pending
)) {
479 * There is no pending I/O for this region.
480 * We can move the region to corresponding list for next action.
481 * At this point, the region is not yet connected to any list.
483 * If the state is RH_NOSYNC, the region should be kept off
485 * The hash entry for RH_NOSYNC will remain in memory
486 * until the region is recovered or the map is reloaded.
489 /* do nothing for RH_NOSYNC */
490 if (reg
->state
== RH_RECOVERING
) {
491 list_add_tail(®
->list
, &rh
->quiesced_regions
);
492 } else if (reg
->state
== RH_DIRTY
) {
493 reg
->state
= RH_CLEAN
;
494 list_add(®
->list
, &rh
->clean_regions
);
498 spin_unlock_irqrestore(&rh
->region_lock
, flags
);
505 * Starts quiescing a region in preparation for recovery.
507 static int __rh_recovery_prepare(struct region_hash
*rh
)
514 * Ask the dirty log what's next.
516 r
= rh
->log
->type
->get_resync_work(rh
->log
, ®ion
);
521 * Get this region, and start it quiescing by setting the
524 read_lock(&rh
->hash_lock
);
525 reg
= __rh_find(rh
, region
);
526 read_unlock(&rh
->hash_lock
);
528 spin_lock_irq(&rh
->region_lock
);
529 reg
->state
= RH_RECOVERING
;
531 /* Already quiesced ? */
532 if (atomic_read(®
->pending
))
533 list_del_init(®
->list
);
535 list_move(®
->list
, &rh
->quiesced_regions
);
537 spin_unlock_irq(&rh
->region_lock
);
542 static void rh_recovery_prepare(struct region_hash
*rh
)
544 /* Extra reference to avoid race with rh_stop_recovery */
545 atomic_inc(&rh
->recovery_in_flight
);
547 while (!down_trylock(&rh
->recovery_count
)) {
548 atomic_inc(&rh
->recovery_in_flight
);
549 if (__rh_recovery_prepare(rh
) <= 0) {
550 atomic_dec(&rh
->recovery_in_flight
);
551 up(&rh
->recovery_count
);
556 /* Drop the extra reference */
557 if (atomic_dec_and_test(&rh
->recovery_in_flight
))
558 wake_up_all(&_kmirrord_recovery_stopped
);
562 * Returns any quiesced regions.
564 static struct region
*rh_recovery_start(struct region_hash
*rh
)
566 struct region
*reg
= NULL
;
568 spin_lock_irq(&rh
->region_lock
);
569 if (!list_empty(&rh
->quiesced_regions
)) {
570 reg
= list_entry(rh
->quiesced_regions
.next
,
571 struct region
, list
);
572 list_del_init(®
->list
); /* remove from the quiesced list */
574 spin_unlock_irq(&rh
->region_lock
);
579 static void rh_recovery_end(struct region
*reg
, int success
)
581 struct region_hash
*rh
= reg
->rh
;
583 spin_lock_irq(&rh
->region_lock
);
585 list_add(®
->list
, ®
->rh
->recovered_regions
);
587 reg
->state
= RH_NOSYNC
;
588 list_add(®
->list
, ®
->rh
->failed_recovered_regions
);
590 spin_unlock_irq(&rh
->region_lock
);
595 static int rh_flush(struct region_hash
*rh
)
597 return rh
->log
->type
->flush(rh
->log
);
600 static void rh_delay(struct region_hash
*rh
, struct bio
*bio
)
604 read_lock(&rh
->hash_lock
);
605 reg
= __rh_find(rh
, bio_to_region(rh
, bio
));
606 bio_list_add(®
->delayed_bios
, bio
);
607 read_unlock(&rh
->hash_lock
);
610 static void rh_stop_recovery(struct region_hash
*rh
)
614 /* wait for any recovering regions */
615 for (i
= 0; i
< MAX_RECOVERY
; i
++)
616 down(&rh
->recovery_count
);
619 static void rh_start_recovery(struct region_hash
*rh
)
623 for (i
= 0; i
< MAX_RECOVERY
; i
++)
624 up(&rh
->recovery_count
);
630 * Every mirror should look like this one.
632 #define DEFAULT_MIRROR 0
635 * This is yucky. We squirrel the mirror_set struct away inside
636 * bi_next for write buffers. This is safe since the bh
637 * doesn't get submitted to the lower levels of block layer.
639 static struct mirror_set
*bio_get_ms(struct bio
*bio
)
641 return (struct mirror_set
*) bio
->bi_next
;
644 static void bio_set_ms(struct bio
*bio
, struct mirror_set
*ms
)
646 bio
->bi_next
= (struct bio
*) ms
;
649 /*-----------------------------------------------------------------
652 * When a mirror is first activated we may find that some regions
653 * are in the no-sync state. We have to recover these by
654 * recopying from the default mirror to all the others.
655 *---------------------------------------------------------------*/
656 static void recovery_complete(int read_err
, unsigned int write_err
,
659 struct region
*reg
= (struct region
*) context
;
662 /* Read error means the failure of default mirror. */
663 DMERR_LIMIT("Unable to read primary mirror during recovery");
666 DMERR_LIMIT("Write error during recovery (error = 0x%x)",
669 rh_recovery_end(reg
, !(read_err
|| write_err
));
672 static int recover(struct mirror_set
*ms
, struct region
*reg
)
676 struct io_region from
, to
[KCOPYD_MAX_REGIONS
], *dest
;
678 unsigned long flags
= 0;
680 /* fill in the source */
681 m
= ms
->default_mirror
;
682 from
.bdev
= m
->dev
->bdev
;
683 from
.sector
= m
->offset
+ region_to_sector(reg
->rh
, reg
->key
);
684 if (reg
->key
== (ms
->nr_regions
- 1)) {
686 * The final region may be smaller than
689 from
.count
= ms
->ti
->len
& (reg
->rh
->region_size
- 1);
691 from
.count
= reg
->rh
->region_size
;
693 from
.count
= reg
->rh
->region_size
;
695 /* fill in the destinations */
696 for (i
= 0, dest
= to
; i
< ms
->nr_mirrors
; i
++) {
697 if (&ms
->mirror
[i
] == ms
->default_mirror
)
701 dest
->bdev
= m
->dev
->bdev
;
702 dest
->sector
= m
->offset
+ region_to_sector(reg
->rh
, reg
->key
);
703 dest
->count
= from
.count
;
708 set_bit(KCOPYD_IGNORE_ERROR
, &flags
);
709 r
= kcopyd_copy(ms
->kcopyd_client
, &from
, ms
->nr_mirrors
- 1, to
, flags
,
710 recovery_complete
, reg
);
715 static void do_recovery(struct mirror_set
*ms
)
719 struct dirty_log
*log
= ms
->rh
.log
;
722 * Start quiescing some regions.
724 rh_recovery_prepare(&ms
->rh
);
727 * Copy any already quiesced regions.
729 while ((reg
= rh_recovery_start(&ms
->rh
))) {
730 r
= recover(ms
, reg
);
732 rh_recovery_end(reg
, 0);
736 * Update the in sync flag.
739 (log
->type
->get_sync_count(log
) == ms
->nr_regions
)) {
740 /* the sync is complete */
741 dm_table_event(ms
->ti
->table
);
746 /*-----------------------------------------------------------------
748 *---------------------------------------------------------------*/
749 static struct mirror
*choose_mirror(struct mirror_set
*ms
, sector_t sector
)
751 /* FIXME: add read balancing */
752 return ms
->default_mirror
;
756 * remap a buffer to a particular mirror.
758 static void map_bio(struct mirror_set
*ms
, struct mirror
*m
, struct bio
*bio
)
760 bio
->bi_bdev
= m
->dev
->bdev
;
761 bio
->bi_sector
= m
->offset
+ (bio
->bi_sector
- ms
->ti
->begin
);
764 static void do_reads(struct mirror_set
*ms
, struct bio_list
*reads
)
770 while ((bio
= bio_list_pop(reads
))) {
771 region
= bio_to_region(&ms
->rh
, bio
);
774 * We can only read balance if the region is in sync.
776 if (rh_in_sync(&ms
->rh
, region
, 1))
777 m
= choose_mirror(ms
, bio
->bi_sector
);
779 m
= ms
->default_mirror
;
782 generic_make_request(bio
);
786 /*-----------------------------------------------------------------
789 * We do different things with the write io depending on the
790 * state of the region that it's in:
792 * SYNC: increment pending, use kcopyd to write to *all* mirrors
793 * RECOVERING: delay the io until recovery completes
794 * NOSYNC: increment pending, just write to the default mirror
795 *---------------------------------------------------------------*/
796 static void write_callback(unsigned long error
, void *context
)
800 struct bio
*bio
= (struct bio
*) context
;
801 struct mirror_set
*ms
;
803 ms
= bio_get_ms(bio
);
804 bio_set_ms(bio
, NULL
);
807 * NOTE: We don't decrement the pending count here,
808 * instead it is done by the targets endio function.
809 * This way we handle both writes to SYNC and NOSYNC
810 * regions with the same code.
815 * only error the io if all mirrors failed.
819 for (i
= 0; i
< ms
->nr_mirrors
; i
++)
820 if (!test_bit(i
, &error
)) {
828 static void do_write(struct mirror_set
*ms
, struct bio
*bio
)
831 struct io_region io
[KCOPYD_MAX_REGIONS
+1];
833 struct dm_io_request io_req
= {
835 .mem
.type
= DM_IO_BVEC
,
836 .mem
.ptr
.bvec
= bio
->bi_io_vec
+ bio
->bi_idx
,
837 .notify
.fn
= write_callback
,
838 .notify
.context
= bio
,
839 .client
= ms
->io_client
,
842 for (i
= 0; i
< ms
->nr_mirrors
; i
++) {
845 io
[i
].bdev
= m
->dev
->bdev
;
846 io
[i
].sector
= m
->offset
+ (bio
->bi_sector
- ms
->ti
->begin
);
847 io
[i
].count
= bio
->bi_size
>> 9;
852 (void) dm_io(&io_req
, ms
->nr_mirrors
, io
, NULL
);
855 static void do_writes(struct mirror_set
*ms
, struct bio_list
*writes
)
859 struct bio_list sync
, nosync
, recover
, *this_list
= NULL
;
865 * Classify each write.
867 bio_list_init(&sync
);
868 bio_list_init(&nosync
);
869 bio_list_init(&recover
);
871 while ((bio
= bio_list_pop(writes
))) {
872 state
= rh_state(&ms
->rh
, bio_to_region(&ms
->rh
, bio
), 1);
884 this_list
= &recover
;
888 bio_list_add(this_list
, bio
);
892 * Increment the pending counts for any regions that will
893 * be written to (writes to recover regions are going to
896 rh_inc_pending(&ms
->rh
, &sync
);
897 rh_inc_pending(&ms
->rh
, &nosync
);
898 ms
->log_failure
= rh_flush(&ms
->rh
) ? 1 : 0;
903 if (unlikely(ms
->log_failure
))
904 while ((bio
= bio_list_pop(&sync
)))
905 bio_endio(bio
, -EIO
);
906 else while ((bio
= bio_list_pop(&sync
)))
909 while ((bio
= bio_list_pop(&recover
)))
910 rh_delay(&ms
->rh
, bio
);
912 while ((bio
= bio_list_pop(&nosync
))) {
913 map_bio(ms
, ms
->default_mirror
, bio
);
914 generic_make_request(bio
);
918 /*-----------------------------------------------------------------
920 *---------------------------------------------------------------*/
921 static void do_mirror(struct work_struct
*work
)
923 struct mirror_set
*ms
=container_of(work
, struct mirror_set
,
925 struct bio_list reads
, writes
;
927 spin_lock(&ms
->lock
);
930 bio_list_init(&ms
->reads
);
931 bio_list_init(&ms
->writes
);
932 spin_unlock(&ms
->lock
);
934 rh_update_states(&ms
->rh
);
936 do_reads(ms
, &reads
);
937 do_writes(ms
, &writes
);
940 /*-----------------------------------------------------------------
942 *---------------------------------------------------------------*/
943 static struct mirror_set
*alloc_context(unsigned int nr_mirrors
,
944 uint32_t region_size
,
945 struct dm_target
*ti
,
946 struct dirty_log
*dl
)
949 struct mirror_set
*ms
= NULL
;
951 if (array_too_big(sizeof(*ms
), sizeof(ms
->mirror
[0]), nr_mirrors
))
954 len
= sizeof(*ms
) + (sizeof(ms
->mirror
[0]) * nr_mirrors
);
956 ms
= kzalloc(len
, GFP_KERNEL
);
958 ti
->error
= "Cannot allocate mirror context";
962 spin_lock_init(&ms
->lock
);
965 ms
->nr_mirrors
= nr_mirrors
;
966 ms
->nr_regions
= dm_sector_div_up(ti
->len
, region_size
);
968 ms
->default_mirror
= &ms
->mirror
[DEFAULT_MIRROR
];
970 ms
->io_client
= dm_io_client_create(DM_IO_PAGES
);
971 if (IS_ERR(ms
->io_client
)) {
972 ti
->error
= "Error creating dm_io client";
977 if (rh_init(&ms
->rh
, ms
, dl
, region_size
, ms
->nr_regions
)) {
978 ti
->error
= "Error creating dirty region hash";
979 dm_io_client_destroy(ms
->io_client
);
987 static void free_context(struct mirror_set
*ms
, struct dm_target
*ti
,
991 dm_put_device(ti
, ms
->mirror
[m
].dev
);
993 dm_io_client_destroy(ms
->io_client
);
998 static inline int _check_region_size(struct dm_target
*ti
, uint32_t size
)
1000 return !(size
% (PAGE_SIZE
>> 9) || !is_power_of_2(size
) ||
1004 static int get_mirror(struct mirror_set
*ms
, struct dm_target
*ti
,
1005 unsigned int mirror
, char **argv
)
1007 unsigned long long offset
;
1009 if (sscanf(argv
[1], "%llu", &offset
) != 1) {
1010 ti
->error
= "Invalid offset";
1014 if (dm_get_device(ti
, argv
[0], offset
, ti
->len
,
1015 dm_table_get_mode(ti
->table
),
1016 &ms
->mirror
[mirror
].dev
)) {
1017 ti
->error
= "Device lookup failure";
1021 ms
->mirror
[mirror
].ms
= ms
;
1022 ms
->mirror
[mirror
].offset
= offset
;
1028 * Create dirty log: log_type #log_params <log_params>
1030 static struct dirty_log
*create_dirty_log(struct dm_target
*ti
,
1031 unsigned int argc
, char **argv
,
1032 unsigned int *args_used
)
1034 unsigned int param_count
;
1035 struct dirty_log
*dl
;
1038 ti
->error
= "Insufficient mirror log arguments";
1042 if (sscanf(argv
[1], "%u", ¶m_count
) != 1) {
1043 ti
->error
= "Invalid mirror log argument count";
1047 *args_used
= 2 + param_count
;
1049 if (argc
< *args_used
) {
1050 ti
->error
= "Insufficient mirror log arguments";
1054 dl
= dm_create_dirty_log(argv
[0], ti
, param_count
, argv
+ 2);
1056 ti
->error
= "Error creating mirror dirty log";
1060 if (!_check_region_size(ti
, dl
->type
->get_region_size(dl
))) {
1061 ti
->error
= "Invalid region size";
1062 dm_destroy_dirty_log(dl
);
1069 static int parse_features(struct mirror_set
*ms
, unsigned argc
, char **argv
,
1070 unsigned *args_used
)
1072 unsigned num_features
;
1073 struct dm_target
*ti
= ms
->ti
;
1080 if (sscanf(argv
[0], "%u", &num_features
) != 1) {
1081 ti
->error
= "Invalid number of features";
1089 if (num_features
> argc
) {
1090 ti
->error
= "Not enough arguments to support feature count";
1094 if (!strcmp("handle_errors", argv
[0]))
1095 ms
->features
|= DM_RAID1_HANDLE_ERRORS
;
1097 ti
->error
= "Unrecognised feature requested";
1107 * Construct a mirror mapping:
1109 * log_type #log_params <log_params>
1110 * #mirrors [mirror_path offset]{2,}
1111 * [#features <features>]
1113 * log_type is "core" or "disk"
1114 * #log_params is between 1 and 3
1116 * If present, features must be "handle_errors".
1118 static int mirror_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
1121 unsigned int nr_mirrors
, m
, args_used
;
1122 struct mirror_set
*ms
;
1123 struct dirty_log
*dl
;
1125 dl
= create_dirty_log(ti
, argc
, argv
, &args_used
);
1132 if (!argc
|| sscanf(argv
[0], "%u", &nr_mirrors
) != 1 ||
1133 nr_mirrors
< 2 || nr_mirrors
> KCOPYD_MAX_REGIONS
+ 1) {
1134 ti
->error
= "Invalid number of mirrors";
1135 dm_destroy_dirty_log(dl
);
1141 if (argc
< nr_mirrors
* 2) {
1142 ti
->error
= "Too few mirror arguments";
1143 dm_destroy_dirty_log(dl
);
1147 ms
= alloc_context(nr_mirrors
, dl
->type
->get_region_size(dl
), ti
, dl
);
1149 dm_destroy_dirty_log(dl
);
1153 /* Get the mirror parameter sets */
1154 for (m
= 0; m
< nr_mirrors
; m
++) {
1155 r
= get_mirror(ms
, ti
, m
, argv
);
1157 free_context(ms
, ti
, m
);
1165 ti
->split_io
= ms
->rh
.region_size
;
1167 ms
->kmirrord_wq
= create_singlethread_workqueue("kmirrord");
1168 if (!ms
->kmirrord_wq
) {
1169 DMERR("couldn't start kmirrord");
1171 goto err_free_context
;
1173 INIT_WORK(&ms
->kmirrord_work
, do_mirror
);
1175 r
= parse_features(ms
, argc
, argv
, &args_used
);
1177 goto err_destroy_wq
;
1183 * Any read-balancing addition depends on the
1184 * DM_RAID1_HANDLE_ERRORS flag being present.
1185 * This is because the decision to balance depends
1186 * on the sync state of a region. If the above
1187 * flag is not present, we ignore errors; and
1188 * the sync state may be inaccurate.
1192 ti
->error
= "Too many mirror arguments";
1194 goto err_destroy_wq
;
1197 r
= kcopyd_client_create(DM_IO_PAGES
, &ms
->kcopyd_client
);
1199 goto err_destroy_wq
;
1205 destroy_workqueue(ms
->kmirrord_wq
);
1207 free_context(ms
, ti
, ms
->nr_mirrors
);
1211 static void mirror_dtr(struct dm_target
*ti
)
1213 struct mirror_set
*ms
= (struct mirror_set
*) ti
->private;
1215 flush_workqueue(ms
->kmirrord_wq
);
1216 kcopyd_client_destroy(ms
->kcopyd_client
);
1217 destroy_workqueue(ms
->kmirrord_wq
);
1218 free_context(ms
, ti
, ms
->nr_mirrors
);
1221 static void queue_bio(struct mirror_set
*ms
, struct bio
*bio
, int rw
)
1223 int should_wake
= 0;
1224 struct bio_list
*bl
;
1226 bl
= (rw
== WRITE
) ? &ms
->writes
: &ms
->reads
;
1227 spin_lock(&ms
->lock
);
1228 should_wake
= !(bl
->head
);
1229 bio_list_add(bl
, bio
);
1230 spin_unlock(&ms
->lock
);
1237 * Mirror mapping function
1239 static int mirror_map(struct dm_target
*ti
, struct bio
*bio
,
1240 union map_info
*map_context
)
1242 int r
, rw
= bio_rw(bio
);
1244 struct mirror_set
*ms
= ti
->private;
1246 map_context
->ll
= bio_to_region(&ms
->rh
, bio
);
1249 queue_bio(ms
, bio
, rw
);
1250 return DM_MAPIO_SUBMITTED
;
1253 r
= ms
->rh
.log
->type
->in_sync(ms
->rh
.log
,
1254 bio_to_region(&ms
->rh
, bio
), 0);
1255 if (r
< 0 && r
!= -EWOULDBLOCK
)
1258 if (r
== -EWOULDBLOCK
) /* FIXME: ugly */
1259 r
= DM_MAPIO_SUBMITTED
;
1262 * We don't want to fast track a recovery just for a read
1263 * ahead. So we just let it silently fail.
1264 * FIXME: get rid of this.
1266 if (!r
&& rw
== READA
)
1270 /* Pass this io over to the daemon */
1271 queue_bio(ms
, bio
, rw
);
1272 return DM_MAPIO_SUBMITTED
;
1275 m
= choose_mirror(ms
, bio
->bi_sector
);
1279 map_bio(ms
, m
, bio
);
1280 return DM_MAPIO_REMAPPED
;
1283 static int mirror_end_io(struct dm_target
*ti
, struct bio
*bio
,
1284 int error
, union map_info
*map_context
)
1286 int rw
= bio_rw(bio
);
1287 struct mirror_set
*ms
= (struct mirror_set
*) ti
->private;
1288 region_t region
= map_context
->ll
;
1291 * We need to dec pending if this was a write.
1294 rh_dec(&ms
->rh
, region
);
1299 static void mirror_postsuspend(struct dm_target
*ti
)
1301 struct mirror_set
*ms
= (struct mirror_set
*) ti
->private;
1302 struct dirty_log
*log
= ms
->rh
.log
;
1304 rh_stop_recovery(&ms
->rh
);
1306 /* Wait for all I/O we generated to complete */
1307 wait_event(_kmirrord_recovery_stopped
,
1308 !atomic_read(&ms
->rh
.recovery_in_flight
));
1310 if (log
->type
->postsuspend
&& log
->type
->postsuspend(log
))
1311 /* FIXME: need better error handling */
1312 DMWARN("log suspend failed");
1315 static void mirror_resume(struct dm_target
*ti
)
1317 struct mirror_set
*ms
= (struct mirror_set
*) ti
->private;
1318 struct dirty_log
*log
= ms
->rh
.log
;
1319 if (log
->type
->resume
&& log
->type
->resume(log
))
1320 /* FIXME: need better error handling */
1321 DMWARN("log resume failed");
1322 rh_start_recovery(&ms
->rh
);
1325 static int mirror_status(struct dm_target
*ti
, status_type_t type
,
1326 char *result
, unsigned int maxlen
)
1328 unsigned int m
, sz
= 0;
1329 struct mirror_set
*ms
= (struct mirror_set
*) ti
->private;
1332 case STATUSTYPE_INFO
:
1333 DMEMIT("%d ", ms
->nr_mirrors
);
1334 for (m
= 0; m
< ms
->nr_mirrors
; m
++)
1335 DMEMIT("%s ", ms
->mirror
[m
].dev
->name
);
1337 DMEMIT("%llu/%llu 0 ",
1338 (unsigned long long)ms
->rh
.log
->type
->
1339 get_sync_count(ms
->rh
.log
),
1340 (unsigned long long)ms
->nr_regions
);
1342 sz
+= ms
->rh
.log
->type
->status(ms
->rh
.log
, type
, result
+sz
, maxlen
-sz
);
1346 case STATUSTYPE_TABLE
:
1347 sz
= ms
->rh
.log
->type
->status(ms
->rh
.log
, type
, result
, maxlen
);
1349 DMEMIT("%d", ms
->nr_mirrors
);
1350 for (m
= 0; m
< ms
->nr_mirrors
; m
++)
1351 DMEMIT(" %s %llu", ms
->mirror
[m
].dev
->name
,
1352 (unsigned long long)ms
->mirror
[m
].offset
);
1354 if (ms
->features
& DM_RAID1_HANDLE_ERRORS
)
1355 DMEMIT(" 1 handle_errors");
1361 static struct target_type mirror_target
= {
1363 .version
= {1, 0, 3},
1364 .module
= THIS_MODULE
,
1368 .end_io
= mirror_end_io
,
1369 .postsuspend
= mirror_postsuspend
,
1370 .resume
= mirror_resume
,
1371 .status
= mirror_status
,
1374 static int __init
dm_mirror_init(void)
1378 r
= dm_dirty_log_init();
1382 r
= dm_register_target(&mirror_target
);
1384 DMERR("Failed to register mirror target");
1385 dm_dirty_log_exit();
1391 static void __exit
dm_mirror_exit(void)
1395 r
= dm_unregister_target(&mirror_target
);
1397 DMERR("unregister failed %d", r
);
1399 dm_dirty_log_exit();
1403 module_init(dm_mirror_init
);
1404 module_exit(dm_mirror_exit
);
1406 MODULE_DESCRIPTION(DM_NAME
" mirror target");
1407 MODULE_AUTHOR("Joe Thornber");
1408 MODULE_LICENSE("GPL");