2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/blkdev.h>
13 #include <linux/namei.h>
14 #include <linux/ctype.h>
15 #include <linux/slab.h>
16 #include <linux/interrupt.h>
17 #include <linux/mutex.h>
18 #include <asm/atomic.h>
20 #define DM_MSG_PREFIX "table"
23 #define NODE_SIZE L1_CACHE_BYTES
24 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
25 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
28 struct mapped_device
*md
;
33 unsigned int counts
[MAX_DEPTH
]; /* in nodes */
34 sector_t
*index
[MAX_DEPTH
];
36 unsigned int num_targets
;
37 unsigned int num_allocated
;
39 struct dm_target
*targets
;
42 * Indicates the rw permissions for the new logical
43 * device. This should be a combination of FMODE_READ
48 /* a list of devices used by this table */
49 struct list_head devices
;
52 * These are optimistic limits taken from all the
53 * targets, some targets will need smaller limits.
55 struct io_restrictions limits
;
57 /* events get handed up using this callback */
58 void (*event_fn
)(void *);
63 * Similar to ceiling(log_size(n))
65 static unsigned int int_log(unsigned int n
, unsigned int base
)
70 n
= dm_div_up(n
, base
);
78 * Returns the minimum that is _not_ zero, unless both are zero.
80 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
83 * Combine two io_restrictions, always taking the lower value.
85 static void combine_restrictions_low(struct io_restrictions
*lhs
,
86 struct io_restrictions
*rhs
)
89 min_not_zero(lhs
->max_sectors
, rhs
->max_sectors
);
91 lhs
->max_phys_segments
=
92 min_not_zero(lhs
->max_phys_segments
, rhs
->max_phys_segments
);
94 lhs
->max_hw_segments
=
95 min_not_zero(lhs
->max_hw_segments
, rhs
->max_hw_segments
);
97 lhs
->hardsect_size
= max(lhs
->hardsect_size
, rhs
->hardsect_size
);
99 lhs
->max_segment_size
=
100 min_not_zero(lhs
->max_segment_size
, rhs
->max_segment_size
);
102 lhs
->max_hw_sectors
=
103 min_not_zero(lhs
->max_hw_sectors
, rhs
->max_hw_sectors
);
105 lhs
->seg_boundary_mask
=
106 min_not_zero(lhs
->seg_boundary_mask
, rhs
->seg_boundary_mask
);
108 lhs
->bounce_pfn
= min_not_zero(lhs
->bounce_pfn
, rhs
->bounce_pfn
);
110 lhs
->no_cluster
|= rhs
->no_cluster
;
114 * Calculate the index of the child node of the n'th node k'th key.
116 static inline unsigned int get_child(unsigned int n
, unsigned int k
)
118 return (n
* CHILDREN_PER_NODE
) + k
;
122 * Return the n'th node of level l from table t.
124 static inline sector_t
*get_node(struct dm_table
*t
,
125 unsigned int l
, unsigned int n
)
127 return t
->index
[l
] + (n
* KEYS_PER_NODE
);
131 * Return the highest key that you could lookup from the n'th
132 * node on level l of the btree.
134 static sector_t
high(struct dm_table
*t
, unsigned int l
, unsigned int n
)
136 for (; l
< t
->depth
- 1; l
++)
137 n
= get_child(n
, CHILDREN_PER_NODE
- 1);
139 if (n
>= t
->counts
[l
])
140 return (sector_t
) - 1;
142 return get_node(t
, l
, n
)[KEYS_PER_NODE
- 1];
146 * Fills in a level of the btree based on the highs of the level
149 static int setup_btree_index(unsigned int l
, struct dm_table
*t
)
154 for (n
= 0U; n
< t
->counts
[l
]; n
++) {
155 node
= get_node(t
, l
, n
);
157 for (k
= 0U; k
< KEYS_PER_NODE
; k
++)
158 node
[k
] = high(t
, l
+ 1, get_child(n
, k
));
164 void *dm_vcalloc(unsigned long nmemb
, unsigned long elem_size
)
170 * Check that we're not going to overflow.
172 if (nmemb
> (ULONG_MAX
/ elem_size
))
175 size
= nmemb
* elem_size
;
176 addr
= vmalloc(size
);
178 memset(addr
, 0, size
);
184 * highs, and targets are managed as dynamic arrays during a
187 static int alloc_targets(struct dm_table
*t
, unsigned int num
)
190 struct dm_target
*n_targets
;
191 int n
= t
->num_targets
;
194 * Allocate both the target array and offset array at once.
195 * Append an empty entry to catch sectors beyond the end of
198 n_highs
= (sector_t
*) dm_vcalloc(num
+ 1, sizeof(struct dm_target
) +
203 n_targets
= (struct dm_target
*) (n_highs
+ num
);
206 memcpy(n_highs
, t
->highs
, sizeof(*n_highs
) * n
);
207 memcpy(n_targets
, t
->targets
, sizeof(*n_targets
) * n
);
210 memset(n_highs
+ n
, -1, sizeof(*n_highs
) * (num
- n
));
213 t
->num_allocated
= num
;
215 t
->targets
= n_targets
;
220 int dm_table_create(struct dm_table
**result
, int mode
,
221 unsigned num_targets
, struct mapped_device
*md
)
223 struct dm_table
*t
= kzalloc(sizeof(*t
), GFP_KERNEL
);
228 INIT_LIST_HEAD(&t
->devices
);
229 atomic_set(&t
->holders
, 1);
232 num_targets
= KEYS_PER_NODE
;
234 num_targets
= dm_round_up(num_targets
, KEYS_PER_NODE
);
236 if (alloc_targets(t
, num_targets
)) {
248 static void free_devices(struct list_head
*devices
)
250 struct list_head
*tmp
, *next
;
252 list_for_each_safe(tmp
, next
, devices
) {
253 struct dm_dev
*dd
= list_entry(tmp
, struct dm_dev
, list
);
258 static void table_destroy(struct dm_table
*t
)
262 /* free the indexes (see dm_table_complete) */
264 vfree(t
->index
[t
->depth
- 2]);
266 /* free the targets */
267 for (i
= 0; i
< t
->num_targets
; i
++) {
268 struct dm_target
*tgt
= t
->targets
+ i
;
273 dm_put_target_type(tgt
->type
);
278 /* free the device list */
279 if (t
->devices
.next
!= &t
->devices
) {
280 DMWARN("devices still present during destroy: "
281 "dm_table_remove_device calls missing");
283 free_devices(&t
->devices
);
289 void dm_table_get(struct dm_table
*t
)
291 atomic_inc(&t
->holders
);
294 void dm_table_put(struct dm_table
*t
)
299 if (atomic_dec_and_test(&t
->holders
))
304 * Checks to see if we need to extend highs or targets.
306 static inline int check_space(struct dm_table
*t
)
308 if (t
->num_targets
>= t
->num_allocated
)
309 return alloc_targets(t
, t
->num_allocated
* 2);
315 * Convert a device path to a dev_t.
317 static int lookup_device(const char *path
, dev_t
*dev
)
323 if ((r
= path_lookup(path
, LOOKUP_FOLLOW
, &nd
)))
326 inode
= nd
.path
.dentry
->d_inode
;
332 if (!S_ISBLK(inode
->i_mode
)) {
337 *dev
= inode
->i_rdev
;
345 * See if we've already got a device in the list.
347 static struct dm_dev
*find_device(struct list_head
*l
, dev_t dev
)
351 list_for_each_entry (dd
, l
, list
)
352 if (dd
->bdev
->bd_dev
== dev
)
359 * Open a device so we can use it as a map destination.
361 static int open_dev(struct dm_dev
*d
, dev_t dev
, struct mapped_device
*md
)
363 static char *_claim_ptr
= "I belong to device-mapper";
364 struct block_device
*bdev
;
370 bdev
= open_by_devnum(dev
, d
->mode
);
372 return PTR_ERR(bdev
);
373 r
= bd_claim_by_disk(bdev
, _claim_ptr
, dm_disk(md
));
382 * Close a device that we've been using.
384 static void close_dev(struct dm_dev
*d
, struct mapped_device
*md
)
389 bd_release_from_disk(d
->bdev
, dm_disk(md
));
395 * If possible, this checks an area of a destination device is valid.
397 static int check_device_area(struct dm_dev
*dd
, sector_t start
, sector_t len
)
399 sector_t dev_size
= dd
->bdev
->bd_inode
->i_size
>> SECTOR_SHIFT
;
404 return ((start
< dev_size
) && (len
<= (dev_size
- start
)));
408 * This upgrades the mode on an already open dm_dev. Being
409 * careful to leave things as they were if we fail to reopen the
412 static int upgrade_mode(struct dm_dev
*dd
, int new_mode
, struct mapped_device
*md
)
415 struct dm_dev dd_copy
;
416 dev_t dev
= dd
->bdev
->bd_dev
;
420 dd
->mode
|= new_mode
;
422 r
= open_dev(dd
, dev
, md
);
424 close_dev(&dd_copy
, md
);
432 * Add a device to the list, or just increment the usage count if
433 * it's already present.
435 static int __table_get_device(struct dm_table
*t
, struct dm_target
*ti
,
436 const char *path
, sector_t start
, sector_t len
,
437 int mode
, struct dm_dev
**result
)
440 dev_t
uninitialized_var(dev
);
442 unsigned int major
, minor
;
446 if (sscanf(path
, "%u:%u", &major
, &minor
) == 2) {
447 /* Extract the major/minor numbers */
448 dev
= MKDEV(major
, minor
);
449 if (MAJOR(dev
) != major
|| MINOR(dev
) != minor
)
452 /* convert the path to a device */
453 if ((r
= lookup_device(path
, &dev
)))
457 dd
= find_device(&t
->devices
, dev
);
459 dd
= kmalloc(sizeof(*dd
), GFP_KERNEL
);
466 if ((r
= open_dev(dd
, dev
, t
->md
))) {
471 format_dev_t(dd
->name
, dev
);
473 atomic_set(&dd
->count
, 0);
474 list_add(&dd
->list
, &t
->devices
);
476 } else if (dd
->mode
!= (mode
| dd
->mode
)) {
477 r
= upgrade_mode(dd
, mode
, t
->md
);
481 atomic_inc(&dd
->count
);
483 if (!check_device_area(dd
, start
, len
)) {
484 DMWARN("device %s too small for target", path
);
485 dm_put_device(ti
, dd
);
494 void dm_set_device_limits(struct dm_target
*ti
, struct block_device
*bdev
)
496 struct request_queue
*q
= bdev_get_queue(bdev
);
497 struct io_restrictions
*rs
= &ti
->limits
;
500 * Combine the device limits low.
502 * FIXME: if we move an io_restriction struct
503 * into q this would just be a call to
504 * combine_restrictions_low()
507 min_not_zero(rs
->max_sectors
, q
->max_sectors
);
510 * Check if merge fn is supported.
511 * If not we'll force DM to use PAGE_SIZE or
512 * smaller I/O, just to be safe.
515 if (q
->merge_bvec_fn
&& !ti
->type
->merge
)
517 min_not_zero(rs
->max_sectors
,
518 (unsigned int) (PAGE_SIZE
>> 9));
520 rs
->max_phys_segments
=
521 min_not_zero(rs
->max_phys_segments
,
522 q
->max_phys_segments
);
524 rs
->max_hw_segments
=
525 min_not_zero(rs
->max_hw_segments
, q
->max_hw_segments
);
527 rs
->hardsect_size
= max(rs
->hardsect_size
, q
->hardsect_size
);
529 rs
->max_segment_size
=
530 min_not_zero(rs
->max_segment_size
, q
->max_segment_size
);
533 min_not_zero(rs
->max_hw_sectors
, q
->max_hw_sectors
);
535 rs
->seg_boundary_mask
=
536 min_not_zero(rs
->seg_boundary_mask
,
537 q
->seg_boundary_mask
);
539 rs
->bounce_pfn
= min_not_zero(rs
->bounce_pfn
, q
->bounce_pfn
);
541 rs
->no_cluster
|= !test_bit(QUEUE_FLAG_CLUSTER
, &q
->queue_flags
);
543 EXPORT_SYMBOL_GPL(dm_set_device_limits
);
545 int dm_get_device(struct dm_target
*ti
, const char *path
, sector_t start
,
546 sector_t len
, int mode
, struct dm_dev
**result
)
548 int r
= __table_get_device(ti
->table
, ti
, path
,
549 start
, len
, mode
, result
);
552 dm_set_device_limits(ti
, (*result
)->bdev
);
558 * Decrement a devices use count and remove it if necessary.
560 void dm_put_device(struct dm_target
*ti
, struct dm_dev
*dd
)
562 if (atomic_dec_and_test(&dd
->count
)) {
563 close_dev(dd
, ti
->table
->md
);
570 * Checks to see if the target joins onto the end of the table.
572 static int adjoin(struct dm_table
*table
, struct dm_target
*ti
)
574 struct dm_target
*prev
;
576 if (!table
->num_targets
)
579 prev
= &table
->targets
[table
->num_targets
- 1];
580 return (ti
->begin
== (prev
->begin
+ prev
->len
));
584 * Used to dynamically allocate the arg array.
586 static char **realloc_argv(unsigned *array_size
, char **old_argv
)
591 new_size
= *array_size
? *array_size
* 2 : 64;
592 argv
= kmalloc(new_size
* sizeof(*argv
), GFP_KERNEL
);
594 memcpy(argv
, old_argv
, *array_size
* sizeof(*argv
));
595 *array_size
= new_size
;
603 * Destructively splits up the argument list to pass to ctr.
605 int dm_split_args(int *argc
, char ***argvp
, char *input
)
607 char *start
, *end
= input
, *out
, **argv
= NULL
;
608 unsigned array_size
= 0;
617 argv
= realloc_argv(&array_size
, argv
);
624 /* Skip whitespace */
625 while (*start
&& isspace(*start
))
629 break; /* success, we hit the end */
631 /* 'out' is used to remove any back-quotes */
634 /* Everything apart from '\0' can be quoted */
635 if (*end
== '\\' && *(end
+ 1)) {
642 break; /* end of token */
647 /* have we already filled the array ? */
648 if ((*argc
+ 1) > array_size
) {
649 argv
= realloc_argv(&array_size
, argv
);
654 /* we know this is whitespace */
658 /* terminate the string and put it in the array */
668 static void check_for_valid_limits(struct io_restrictions
*rs
)
670 if (!rs
->max_sectors
)
671 rs
->max_sectors
= SAFE_MAX_SECTORS
;
672 if (!rs
->max_hw_sectors
)
673 rs
->max_hw_sectors
= SAFE_MAX_SECTORS
;
674 if (!rs
->max_phys_segments
)
675 rs
->max_phys_segments
= MAX_PHYS_SEGMENTS
;
676 if (!rs
->max_hw_segments
)
677 rs
->max_hw_segments
= MAX_HW_SEGMENTS
;
678 if (!rs
->hardsect_size
)
679 rs
->hardsect_size
= 1 << SECTOR_SHIFT
;
680 if (!rs
->max_segment_size
)
681 rs
->max_segment_size
= MAX_SEGMENT_SIZE
;
682 if (!rs
->seg_boundary_mask
)
683 rs
->seg_boundary_mask
= -1;
688 int dm_table_add_target(struct dm_table
*t
, const char *type
,
689 sector_t start
, sector_t len
, char *params
)
691 int r
= -EINVAL
, argc
;
693 struct dm_target
*tgt
;
695 if ((r
= check_space(t
)))
698 tgt
= t
->targets
+ t
->num_targets
;
699 memset(tgt
, 0, sizeof(*tgt
));
702 DMERR("%s: zero-length target", dm_device_name(t
->md
));
706 tgt
->type
= dm_get_target_type(type
);
708 DMERR("%s: %s: unknown target type", dm_device_name(t
->md
),
716 tgt
->error
= "Unknown error";
719 * Does this target adjoin the previous one ?
721 if (!adjoin(t
, tgt
)) {
722 tgt
->error
= "Gap in table";
727 r
= dm_split_args(&argc
, &argv
, params
);
729 tgt
->error
= "couldn't split parameters (insufficient memory)";
733 r
= tgt
->type
->ctr(tgt
, argc
, argv
);
738 t
->highs
[t
->num_targets
++] = tgt
->begin
+ tgt
->len
- 1;
740 /* FIXME: the plan is to combine high here and then have
741 * the merge fn apply the target level restrictions. */
742 combine_restrictions_low(&t
->limits
, &tgt
->limits
);
746 DMERR("%s: %s: %s", dm_device_name(t
->md
), type
, tgt
->error
);
747 dm_put_target_type(tgt
->type
);
751 static int setup_indexes(struct dm_table
*t
)
754 unsigned int total
= 0;
757 /* allocate the space for *all* the indexes */
758 for (i
= t
->depth
- 2; i
>= 0; i
--) {
759 t
->counts
[i
] = dm_div_up(t
->counts
[i
+ 1], CHILDREN_PER_NODE
);
760 total
+= t
->counts
[i
];
763 indexes
= (sector_t
*) dm_vcalloc(total
, (unsigned long) NODE_SIZE
);
767 /* set up internal nodes, bottom-up */
768 for (i
= t
->depth
- 2; i
>= 0; i
--) {
769 t
->index
[i
] = indexes
;
770 indexes
+= (KEYS_PER_NODE
* t
->counts
[i
]);
771 setup_btree_index(i
, t
);
778 * Builds the btree to index the map.
780 int dm_table_complete(struct dm_table
*t
)
783 unsigned int leaf_nodes
;
785 check_for_valid_limits(&t
->limits
);
787 /* how many indexes will the btree have ? */
788 leaf_nodes
= dm_div_up(t
->num_targets
, KEYS_PER_NODE
);
789 t
->depth
= 1 + int_log(leaf_nodes
, CHILDREN_PER_NODE
);
791 /* leaf layer has already been set up */
792 t
->counts
[t
->depth
- 1] = leaf_nodes
;
793 t
->index
[t
->depth
- 1] = t
->highs
;
796 r
= setup_indexes(t
);
801 static DEFINE_MUTEX(_event_lock
);
802 void dm_table_event_callback(struct dm_table
*t
,
803 void (*fn
)(void *), void *context
)
805 mutex_lock(&_event_lock
);
807 t
->event_context
= context
;
808 mutex_unlock(&_event_lock
);
811 void dm_table_event(struct dm_table
*t
)
814 * You can no longer call dm_table_event() from interrupt
815 * context, use a bottom half instead.
817 BUG_ON(in_interrupt());
819 mutex_lock(&_event_lock
);
821 t
->event_fn(t
->event_context
);
822 mutex_unlock(&_event_lock
);
825 sector_t
dm_table_get_size(struct dm_table
*t
)
827 return t
->num_targets
? (t
->highs
[t
->num_targets
- 1] + 1) : 0;
830 struct dm_target
*dm_table_get_target(struct dm_table
*t
, unsigned int index
)
832 if (index
>= t
->num_targets
)
835 return t
->targets
+ index
;
839 * Search the btree for the correct target.
841 * Caller should check returned pointer with dm_target_is_valid()
842 * to trap I/O beyond end of device.
844 struct dm_target
*dm_table_find_target(struct dm_table
*t
, sector_t sector
)
846 unsigned int l
, n
= 0, k
= 0;
849 for (l
= 0; l
< t
->depth
; l
++) {
851 node
= get_node(t
, l
, n
);
853 for (k
= 0; k
< KEYS_PER_NODE
; k
++)
854 if (node
[k
] >= sector
)
858 return &t
->targets
[(KEYS_PER_NODE
* n
) + k
];
861 void dm_table_set_restrictions(struct dm_table
*t
, struct request_queue
*q
)
864 * Make sure we obey the optimistic sub devices
867 blk_queue_max_sectors(q
, t
->limits
.max_sectors
);
868 q
->max_phys_segments
= t
->limits
.max_phys_segments
;
869 q
->max_hw_segments
= t
->limits
.max_hw_segments
;
870 q
->hardsect_size
= t
->limits
.hardsect_size
;
871 q
->max_segment_size
= t
->limits
.max_segment_size
;
872 q
->max_hw_sectors
= t
->limits
.max_hw_sectors
;
873 q
->seg_boundary_mask
= t
->limits
.seg_boundary_mask
;
874 q
->bounce_pfn
= t
->limits
.bounce_pfn
;
876 if (t
->limits
.no_cluster
)
877 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER
, q
);
879 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER
, q
);
883 unsigned int dm_table_get_num_targets(struct dm_table
*t
)
885 return t
->num_targets
;
888 struct list_head
*dm_table_get_devices(struct dm_table
*t
)
893 int dm_table_get_mode(struct dm_table
*t
)
898 static void suspend_targets(struct dm_table
*t
, unsigned postsuspend
)
900 int i
= t
->num_targets
;
901 struct dm_target
*ti
= t
->targets
;
905 if (ti
->type
->postsuspend
)
906 ti
->type
->postsuspend(ti
);
907 } else if (ti
->type
->presuspend
)
908 ti
->type
->presuspend(ti
);
914 void dm_table_presuspend_targets(struct dm_table
*t
)
919 suspend_targets(t
, 0);
922 void dm_table_postsuspend_targets(struct dm_table
*t
)
927 suspend_targets(t
, 1);
930 int dm_table_resume_targets(struct dm_table
*t
)
934 for (i
= 0; i
< t
->num_targets
; i
++) {
935 struct dm_target
*ti
= t
->targets
+ i
;
937 if (!ti
->type
->preresume
)
940 r
= ti
->type
->preresume(ti
);
945 for (i
= 0; i
< t
->num_targets
; i
++) {
946 struct dm_target
*ti
= t
->targets
+ i
;
948 if (ti
->type
->resume
)
949 ti
->type
->resume(ti
);
955 int dm_table_any_congested(struct dm_table
*t
, int bdi_bits
)
958 struct list_head
*devices
= dm_table_get_devices(t
);
961 list_for_each_entry(dd
, devices
, list
) {
962 struct request_queue
*q
= bdev_get_queue(dd
->bdev
);
963 r
|= bdi_congested(&q
->backing_dev_info
, bdi_bits
);
969 void dm_table_unplug_all(struct dm_table
*t
)
972 struct list_head
*devices
= dm_table_get_devices(t
);
974 list_for_each_entry(dd
, devices
, list
) {
975 struct request_queue
*q
= bdev_get_queue(dd
->bdev
);
981 struct mapped_device
*dm_table_get_md(struct dm_table
*t
)
988 EXPORT_SYMBOL(dm_vcalloc
);
989 EXPORT_SYMBOL(dm_get_device
);
990 EXPORT_SYMBOL(dm_put_device
);
991 EXPORT_SYMBOL(dm_table_event
);
992 EXPORT_SYMBOL(dm_table_get_size
);
993 EXPORT_SYMBOL(dm_table_get_mode
);
994 EXPORT_SYMBOL(dm_table_get_md
);
995 EXPORT_SYMBOL(dm_table_put
);
996 EXPORT_SYMBOL(dm_table_get
);
997 EXPORT_SYMBOL(dm_table_unplug_all
);