2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/blkdev.h>
13 #include <linux/namei.h>
14 #include <linux/ctype.h>
15 #include <linux/slab.h>
16 #include <linux/interrupt.h>
17 #include <linux/mutex.h>
18 #include <asm/atomic.h>
20 #define DM_MSG_PREFIX "table"
23 #define NODE_SIZE L1_CACHE_BYTES
24 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
25 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
28 struct mapped_device
*md
;
33 unsigned int counts
[MAX_DEPTH
]; /* in nodes */
34 sector_t
*index
[MAX_DEPTH
];
36 unsigned int num_targets
;
37 unsigned int num_allocated
;
39 struct dm_target
*targets
;
42 * Indicates the rw permissions for the new logical
43 * device. This should be a combination of FMODE_READ
48 /* a list of devices used by this table */
49 struct list_head devices
;
52 * These are optimistic limits taken from all the
53 * targets, some targets will need smaller limits.
55 struct io_restrictions limits
;
57 /* events get handed up using this callback */
58 void (*event_fn
)(void *);
63 * Similar to ceiling(log_size(n))
65 static unsigned int int_log(unsigned int n
, unsigned int base
)
70 n
= dm_div_up(n
, base
);
78 * Returns the minimum that is _not_ zero, unless both are zero.
80 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
83 * Combine two io_restrictions, always taking the lower value.
85 static void combine_restrictions_low(struct io_restrictions
*lhs
,
86 struct io_restrictions
*rhs
)
89 min_not_zero(lhs
->max_sectors
, rhs
->max_sectors
);
91 lhs
->max_phys_segments
=
92 min_not_zero(lhs
->max_phys_segments
, rhs
->max_phys_segments
);
94 lhs
->max_hw_segments
=
95 min_not_zero(lhs
->max_hw_segments
, rhs
->max_hw_segments
);
97 lhs
->hardsect_size
= max(lhs
->hardsect_size
, rhs
->hardsect_size
);
99 lhs
->max_segment_size
=
100 min_not_zero(lhs
->max_segment_size
, rhs
->max_segment_size
);
102 lhs
->seg_boundary_mask
=
103 min_not_zero(lhs
->seg_boundary_mask
, rhs
->seg_boundary_mask
);
105 lhs
->no_cluster
|= rhs
->no_cluster
;
109 * Calculate the index of the child node of the n'th node k'th key.
111 static inline unsigned int get_child(unsigned int n
, unsigned int k
)
113 return (n
* CHILDREN_PER_NODE
) + k
;
117 * Return the n'th node of level l from table t.
119 static inline sector_t
*get_node(struct dm_table
*t
,
120 unsigned int l
, unsigned int n
)
122 return t
->index
[l
] + (n
* KEYS_PER_NODE
);
126 * Return the highest key that you could lookup from the n'th
127 * node on level l of the btree.
129 static sector_t
high(struct dm_table
*t
, unsigned int l
, unsigned int n
)
131 for (; l
< t
->depth
- 1; l
++)
132 n
= get_child(n
, CHILDREN_PER_NODE
- 1);
134 if (n
>= t
->counts
[l
])
135 return (sector_t
) - 1;
137 return get_node(t
, l
, n
)[KEYS_PER_NODE
- 1];
141 * Fills in a level of the btree based on the highs of the level
144 static int setup_btree_index(unsigned int l
, struct dm_table
*t
)
149 for (n
= 0U; n
< t
->counts
[l
]; n
++) {
150 node
= get_node(t
, l
, n
);
152 for (k
= 0U; k
< KEYS_PER_NODE
; k
++)
153 node
[k
] = high(t
, l
+ 1, get_child(n
, k
));
159 void *dm_vcalloc(unsigned long nmemb
, unsigned long elem_size
)
165 * Check that we're not going to overflow.
167 if (nmemb
> (ULONG_MAX
/ elem_size
))
170 size
= nmemb
* elem_size
;
171 addr
= vmalloc(size
);
173 memset(addr
, 0, size
);
179 * highs, and targets are managed as dynamic arrays during a
182 static int alloc_targets(struct dm_table
*t
, unsigned int num
)
185 struct dm_target
*n_targets
;
186 int n
= t
->num_targets
;
189 * Allocate both the target array and offset array at once.
190 * Append an empty entry to catch sectors beyond the end of
193 n_highs
= (sector_t
*) dm_vcalloc(num
+ 1, sizeof(struct dm_target
) +
198 n_targets
= (struct dm_target
*) (n_highs
+ num
);
201 memcpy(n_highs
, t
->highs
, sizeof(*n_highs
) * n
);
202 memcpy(n_targets
, t
->targets
, sizeof(*n_targets
) * n
);
205 memset(n_highs
+ n
, -1, sizeof(*n_highs
) * (num
- n
));
208 t
->num_allocated
= num
;
210 t
->targets
= n_targets
;
215 int dm_table_create(struct dm_table
**result
, int mode
,
216 unsigned num_targets
, struct mapped_device
*md
)
218 struct dm_table
*t
= kmalloc(sizeof(*t
), GFP_KERNEL
);
223 memset(t
, 0, sizeof(*t
));
224 INIT_LIST_HEAD(&t
->devices
);
225 atomic_set(&t
->holders
, 1);
228 num_targets
= KEYS_PER_NODE
;
230 num_targets
= dm_round_up(num_targets
, KEYS_PER_NODE
);
232 if (alloc_targets(t
, num_targets
)) {
244 int dm_create_error_table(struct dm_table
**result
, struct mapped_device
*md
)
247 sector_t dev_size
= 1;
251 * Find current size of device.
252 * Default to 1 sector if inactive.
254 t
= dm_get_table(md
);
256 dev_size
= dm_table_get_size(t
);
260 r
= dm_table_create(&t
, FMODE_READ
, 1, md
);
264 r
= dm_table_add_target(t
, "error", 0, dev_size
, NULL
);
268 r
= dm_table_complete(t
);
280 EXPORT_SYMBOL_GPL(dm_create_error_table
);
282 static void free_devices(struct list_head
*devices
)
284 struct list_head
*tmp
, *next
;
286 for (tmp
= devices
->next
; tmp
!= devices
; tmp
= next
) {
287 struct dm_dev
*dd
= list_entry(tmp
, struct dm_dev
, list
);
293 static void table_destroy(struct dm_table
*t
)
297 /* free the indexes (see dm_table_complete) */
299 vfree(t
->index
[t
->depth
- 2]);
301 /* free the targets */
302 for (i
= 0; i
< t
->num_targets
; i
++) {
303 struct dm_target
*tgt
= t
->targets
+ i
;
308 dm_put_target_type(tgt
->type
);
313 /* free the device list */
314 if (t
->devices
.next
!= &t
->devices
) {
315 DMWARN("devices still present during destroy: "
316 "dm_table_remove_device calls missing");
318 free_devices(&t
->devices
);
324 void dm_table_get(struct dm_table
*t
)
326 atomic_inc(&t
->holders
);
329 void dm_table_put(struct dm_table
*t
)
334 if (atomic_dec_and_test(&t
->holders
))
339 * Checks to see if we need to extend highs or targets.
341 static inline int check_space(struct dm_table
*t
)
343 if (t
->num_targets
>= t
->num_allocated
)
344 return alloc_targets(t
, t
->num_allocated
* 2);
350 * Convert a device path to a dev_t.
352 static int lookup_device(const char *path
, dev_t
*dev
)
358 if ((r
= path_lookup(path
, LOOKUP_FOLLOW
, &nd
)))
361 inode
= nd
.dentry
->d_inode
;
367 if (!S_ISBLK(inode
->i_mode
)) {
372 *dev
= inode
->i_rdev
;
380 * See if we've already got a device in the list.
382 static struct dm_dev
*find_device(struct list_head
*l
, dev_t dev
)
386 list_for_each_entry (dd
, l
, list
)
387 if (dd
->bdev
->bd_dev
== dev
)
394 * Open a device so we can use it as a map destination.
396 static int open_dev(struct dm_dev
*d
, dev_t dev
, struct mapped_device
*md
)
398 static char *_claim_ptr
= "I belong to device-mapper";
399 struct block_device
*bdev
;
405 bdev
= open_by_devnum(dev
, d
->mode
);
407 return PTR_ERR(bdev
);
408 r
= bd_claim_by_disk(bdev
, _claim_ptr
, dm_disk(md
));
417 * Close a device that we've been using.
419 static void close_dev(struct dm_dev
*d
, struct mapped_device
*md
)
424 bd_release_from_disk(d
->bdev
, dm_disk(md
));
430 * If possible, this checks an area of a destination device is valid.
432 static int check_device_area(struct dm_dev
*dd
, sector_t start
, sector_t len
)
434 sector_t dev_size
= dd
->bdev
->bd_inode
->i_size
>> SECTOR_SHIFT
;
439 return ((start
< dev_size
) && (len
<= (dev_size
- start
)));
443 * This upgrades the mode on an already open dm_dev. Being
444 * careful to leave things as they were if we fail to reopen the
447 static int upgrade_mode(struct dm_dev
*dd
, int new_mode
, struct mapped_device
*md
)
450 struct dm_dev dd_copy
;
451 dev_t dev
= dd
->bdev
->bd_dev
;
455 dd
->mode
|= new_mode
;
457 r
= open_dev(dd
, dev
, md
);
459 close_dev(&dd_copy
, md
);
467 * Add a device to the list, or just increment the usage count if
468 * it's already present.
470 static int __table_get_device(struct dm_table
*t
, struct dm_target
*ti
,
471 const char *path
, sector_t start
, sector_t len
,
472 int mode
, struct dm_dev
**result
)
477 unsigned int major
, minor
;
481 if (sscanf(path
, "%u:%u", &major
, &minor
) == 2) {
482 /* Extract the major/minor numbers */
483 dev
= MKDEV(major
, minor
);
484 if (MAJOR(dev
) != major
|| MINOR(dev
) != minor
)
487 /* convert the path to a device */
488 if ((r
= lookup_device(path
, &dev
)))
492 dd
= find_device(&t
->devices
, dev
);
494 dd
= kmalloc(sizeof(*dd
), GFP_KERNEL
);
501 if ((r
= open_dev(dd
, dev
, t
->md
))) {
506 format_dev_t(dd
->name
, dev
);
508 atomic_set(&dd
->count
, 0);
509 list_add(&dd
->list
, &t
->devices
);
511 } else if (dd
->mode
!= (mode
| dd
->mode
)) {
512 r
= upgrade_mode(dd
, mode
, t
->md
);
516 atomic_inc(&dd
->count
);
518 if (!check_device_area(dd
, start
, len
)) {
519 DMWARN("device %s too small for target", path
);
520 dm_put_device(ti
, dd
);
529 void dm_set_device_limits(struct dm_target
*ti
, struct block_device
*bdev
)
531 struct request_queue
*q
= bdev_get_queue(bdev
);
532 struct io_restrictions
*rs
= &ti
->limits
;
535 * Combine the device limits low.
537 * FIXME: if we move an io_restriction struct
538 * into q this would just be a call to
539 * combine_restrictions_low()
542 min_not_zero(rs
->max_sectors
, q
->max_sectors
);
544 /* FIXME: Device-Mapper on top of RAID-0 breaks because DM
545 * currently doesn't honor MD's merge_bvec_fn routine.
546 * In this case, we'll force DM to use PAGE_SIZE or
547 * smaller I/O, just to be safe. A better fix is in the
548 * works, but add this for the time being so it will at
549 * least operate correctly.
551 if (q
->merge_bvec_fn
)
553 min_not_zero(rs
->max_sectors
,
554 (unsigned int) (PAGE_SIZE
>> 9));
556 rs
->max_phys_segments
=
557 min_not_zero(rs
->max_phys_segments
,
558 q
->max_phys_segments
);
560 rs
->max_hw_segments
=
561 min_not_zero(rs
->max_hw_segments
, q
->max_hw_segments
);
563 rs
->hardsect_size
= max(rs
->hardsect_size
, q
->hardsect_size
);
565 rs
->max_segment_size
=
566 min_not_zero(rs
->max_segment_size
, q
->max_segment_size
);
568 rs
->seg_boundary_mask
=
569 min_not_zero(rs
->seg_boundary_mask
,
570 q
->seg_boundary_mask
);
572 rs
->no_cluster
|= !test_bit(QUEUE_FLAG_CLUSTER
, &q
->queue_flags
);
574 EXPORT_SYMBOL_GPL(dm_set_device_limits
);
576 int dm_get_device(struct dm_target
*ti
, const char *path
, sector_t start
,
577 sector_t len
, int mode
, struct dm_dev
**result
)
579 int r
= __table_get_device(ti
->table
, ti
, path
,
580 start
, len
, mode
, result
);
583 dm_set_device_limits(ti
, (*result
)->bdev
);
589 * Decrement a devices use count and remove it if necessary.
591 void dm_put_device(struct dm_target
*ti
, struct dm_dev
*dd
)
593 if (atomic_dec_and_test(&dd
->count
)) {
594 close_dev(dd
, ti
->table
->md
);
601 * Checks to see if the target joins onto the end of the table.
603 static int adjoin(struct dm_table
*table
, struct dm_target
*ti
)
605 struct dm_target
*prev
;
607 if (!table
->num_targets
)
610 prev
= &table
->targets
[table
->num_targets
- 1];
611 return (ti
->begin
== (prev
->begin
+ prev
->len
));
615 * Used to dynamically allocate the arg array.
617 static char **realloc_argv(unsigned *array_size
, char **old_argv
)
622 new_size
= *array_size
? *array_size
* 2 : 64;
623 argv
= kmalloc(new_size
* sizeof(*argv
), GFP_KERNEL
);
625 memcpy(argv
, old_argv
, *array_size
* sizeof(*argv
));
626 *array_size
= new_size
;
634 * Destructively splits up the argument list to pass to ctr.
636 int dm_split_args(int *argc
, char ***argvp
, char *input
)
638 char *start
, *end
= input
, *out
, **argv
= NULL
;
639 unsigned array_size
= 0;
648 argv
= realloc_argv(&array_size
, argv
);
655 /* Skip whitespace */
656 while (*start
&& isspace(*start
))
660 break; /* success, we hit the end */
662 /* 'out' is used to remove any back-quotes */
665 /* Everything apart from '\0' can be quoted */
666 if (*end
== '\\' && *(end
+ 1)) {
673 break; /* end of token */
678 /* have we already filled the array ? */
679 if ((*argc
+ 1) > array_size
) {
680 argv
= realloc_argv(&array_size
, argv
);
685 /* we know this is whitespace */
689 /* terminate the string and put it in the array */
699 static void check_for_valid_limits(struct io_restrictions
*rs
)
701 if (!rs
->max_sectors
)
702 rs
->max_sectors
= SAFE_MAX_SECTORS
;
703 if (!rs
->max_phys_segments
)
704 rs
->max_phys_segments
= MAX_PHYS_SEGMENTS
;
705 if (!rs
->max_hw_segments
)
706 rs
->max_hw_segments
= MAX_HW_SEGMENTS
;
707 if (!rs
->hardsect_size
)
708 rs
->hardsect_size
= 1 << SECTOR_SHIFT
;
709 if (!rs
->max_segment_size
)
710 rs
->max_segment_size
= MAX_SEGMENT_SIZE
;
711 if (!rs
->seg_boundary_mask
)
712 rs
->seg_boundary_mask
= -1;
715 int dm_table_add_target(struct dm_table
*t
, const char *type
,
716 sector_t start
, sector_t len
, char *params
)
718 int r
= -EINVAL
, argc
;
720 struct dm_target
*tgt
;
722 if ((r
= check_space(t
)))
725 tgt
= t
->targets
+ t
->num_targets
;
726 memset(tgt
, 0, sizeof(*tgt
));
729 DMERR("%s: zero-length target", dm_device_name(t
->md
));
733 tgt
->type
= dm_get_target_type(type
);
735 DMERR("%s: %s: unknown target type", dm_device_name(t
->md
),
743 tgt
->error
= "Unknown error";
746 * Does this target adjoin the previous one ?
748 if (!adjoin(t
, tgt
)) {
749 tgt
->error
= "Gap in table";
754 r
= dm_split_args(&argc
, &argv
, params
);
756 tgt
->error
= "couldn't split parameters (insufficient memory)";
760 r
= tgt
->type
->ctr(tgt
, argc
, argv
);
765 t
->highs
[t
->num_targets
++] = tgt
->begin
+ tgt
->len
- 1;
767 /* FIXME: the plan is to combine high here and then have
768 * the merge fn apply the target level restrictions. */
769 combine_restrictions_low(&t
->limits
, &tgt
->limits
);
773 DMERR("%s: %s: %s", dm_device_name(t
->md
), type
, tgt
->error
);
774 dm_put_target_type(tgt
->type
);
778 static int setup_indexes(struct dm_table
*t
)
781 unsigned int total
= 0;
784 /* allocate the space for *all* the indexes */
785 for (i
= t
->depth
- 2; i
>= 0; i
--) {
786 t
->counts
[i
] = dm_div_up(t
->counts
[i
+ 1], CHILDREN_PER_NODE
);
787 total
+= t
->counts
[i
];
790 indexes
= (sector_t
*) dm_vcalloc(total
, (unsigned long) NODE_SIZE
);
794 /* set up internal nodes, bottom-up */
795 for (i
= t
->depth
- 2, total
= 0; i
>= 0; i
--) {
796 t
->index
[i
] = indexes
;
797 indexes
+= (KEYS_PER_NODE
* t
->counts
[i
]);
798 setup_btree_index(i
, t
);
805 * Builds the btree to index the map.
807 int dm_table_complete(struct dm_table
*t
)
810 unsigned int leaf_nodes
;
812 check_for_valid_limits(&t
->limits
);
814 /* how many indexes will the btree have ? */
815 leaf_nodes
= dm_div_up(t
->num_targets
, KEYS_PER_NODE
);
816 t
->depth
= 1 + int_log(leaf_nodes
, CHILDREN_PER_NODE
);
818 /* leaf layer has already been set up */
819 t
->counts
[t
->depth
- 1] = leaf_nodes
;
820 t
->index
[t
->depth
- 1] = t
->highs
;
823 r
= setup_indexes(t
);
828 static DEFINE_MUTEX(_event_lock
);
829 void dm_table_event_callback(struct dm_table
*t
,
830 void (*fn
)(void *), void *context
)
832 mutex_lock(&_event_lock
);
834 t
->event_context
= context
;
835 mutex_unlock(&_event_lock
);
838 void dm_table_event(struct dm_table
*t
)
841 * You can no longer call dm_table_event() from interrupt
842 * context, use a bottom half instead.
844 BUG_ON(in_interrupt());
846 mutex_lock(&_event_lock
);
848 t
->event_fn(t
->event_context
);
849 mutex_unlock(&_event_lock
);
852 sector_t
dm_table_get_size(struct dm_table
*t
)
854 return t
->num_targets
? (t
->highs
[t
->num_targets
- 1] + 1) : 0;
857 struct dm_target
*dm_table_get_target(struct dm_table
*t
, unsigned int index
)
859 if (index
>= t
->num_targets
)
862 return t
->targets
+ index
;
866 * Search the btree for the correct target.
868 * Caller should check returned pointer with dm_target_is_valid()
869 * to trap I/O beyond end of device.
871 struct dm_target
*dm_table_find_target(struct dm_table
*t
, sector_t sector
)
873 unsigned int l
, n
= 0, k
= 0;
876 for (l
= 0; l
< t
->depth
; l
++) {
878 node
= get_node(t
, l
, n
);
880 for (k
= 0; k
< KEYS_PER_NODE
; k
++)
881 if (node
[k
] >= sector
)
885 return &t
->targets
[(KEYS_PER_NODE
* n
) + k
];
888 void dm_table_set_restrictions(struct dm_table
*t
, struct request_queue
*q
)
891 * Make sure we obey the optimistic sub devices
894 blk_queue_max_sectors(q
, t
->limits
.max_sectors
);
895 q
->max_phys_segments
= t
->limits
.max_phys_segments
;
896 q
->max_hw_segments
= t
->limits
.max_hw_segments
;
897 q
->hardsect_size
= t
->limits
.hardsect_size
;
898 q
->max_segment_size
= t
->limits
.max_segment_size
;
899 q
->seg_boundary_mask
= t
->limits
.seg_boundary_mask
;
900 if (t
->limits
.no_cluster
)
901 q
->queue_flags
&= ~(1 << QUEUE_FLAG_CLUSTER
);
903 q
->queue_flags
|= (1 << QUEUE_FLAG_CLUSTER
);
907 unsigned int dm_table_get_num_targets(struct dm_table
*t
)
909 return t
->num_targets
;
912 struct list_head
*dm_table_get_devices(struct dm_table
*t
)
917 int dm_table_get_mode(struct dm_table
*t
)
922 static void suspend_targets(struct dm_table
*t
, unsigned postsuspend
)
924 int i
= t
->num_targets
;
925 struct dm_target
*ti
= t
->targets
;
929 if (ti
->type
->postsuspend
)
930 ti
->type
->postsuspend(ti
);
931 } else if (ti
->type
->presuspend
)
932 ti
->type
->presuspend(ti
);
938 void dm_table_presuspend_targets(struct dm_table
*t
)
943 return suspend_targets(t
, 0);
946 void dm_table_postsuspend_targets(struct dm_table
*t
)
951 return suspend_targets(t
, 1);
954 int dm_table_resume_targets(struct dm_table
*t
)
958 for (i
= 0; i
< t
->num_targets
; i
++) {
959 struct dm_target
*ti
= t
->targets
+ i
;
961 if (!ti
->type
->preresume
)
964 r
= ti
->type
->preresume(ti
);
969 for (i
= 0; i
< t
->num_targets
; i
++) {
970 struct dm_target
*ti
= t
->targets
+ i
;
972 if (ti
->type
->resume
)
973 ti
->type
->resume(ti
);
979 int dm_table_any_congested(struct dm_table
*t
, int bdi_bits
)
981 struct list_head
*d
, *devices
;
984 devices
= dm_table_get_devices(t
);
985 for (d
= devices
->next
; d
!= devices
; d
= d
->next
) {
986 struct dm_dev
*dd
= list_entry(d
, struct dm_dev
, list
);
987 struct request_queue
*q
= bdev_get_queue(dd
->bdev
);
988 r
|= bdi_congested(&q
->backing_dev_info
, bdi_bits
);
994 void dm_table_unplug_all(struct dm_table
*t
)
996 struct list_head
*d
, *devices
= dm_table_get_devices(t
);
998 for (d
= devices
->next
; d
!= devices
; d
= d
->next
) {
999 struct dm_dev
*dd
= list_entry(d
, struct dm_dev
, list
);
1000 struct request_queue
*q
= bdev_get_queue(dd
->bdev
);
1007 int dm_table_flush_all(struct dm_table
*t
)
1009 struct list_head
*d
, *devices
= dm_table_get_devices(t
);
1013 for (i
= 0; i
< t
->num_targets
; i
++)
1014 if (t
->targets
[i
].type
->flush
)
1015 t
->targets
[i
].type
->flush(&t
->targets
[i
]);
1017 for (d
= devices
->next
; d
!= devices
; d
= d
->next
) {
1018 struct dm_dev
*dd
= list_entry(d
, struct dm_dev
, list
);
1019 struct request_queue
*q
= bdev_get_queue(dd
->bdev
);
1022 if (!q
->issue_flush_fn
)
1025 err
= q
->issue_flush_fn(q
, dd
->bdev
->bd_disk
, NULL
);
1034 struct mapped_device
*dm_table_get_md(struct dm_table
*t
)
1041 EXPORT_SYMBOL(dm_vcalloc
);
1042 EXPORT_SYMBOL(dm_get_device
);
1043 EXPORT_SYMBOL(dm_put_device
);
1044 EXPORT_SYMBOL(dm_table_event
);
1045 EXPORT_SYMBOL(dm_table_get_size
);
1046 EXPORT_SYMBOL(dm_table_get_mode
);
1047 EXPORT_SYMBOL(dm_table_get_md
);
1048 EXPORT_SYMBOL(dm_table_put
);
1049 EXPORT_SYMBOL(dm_table_get
);
1050 EXPORT_SYMBOL(dm_table_unplug_all
);
1051 EXPORT_SYMBOL(dm_table_flush_all
);