2 * Copyright (C) 2001 Sistina Software (UK) Limited.
4 * This file is released under the GPL.
9 #include <linux/module.h>
10 #include <linux/vmalloc.h>
11 #include <linux/blkdev.h>
12 #include <linux/namei.h>
13 #include <linux/ctype.h>
14 #include <linux/slab.h>
15 #include <linux/interrupt.h>
16 #include <asm/atomic.h>
19 #define NODE_SIZE L1_CACHE_BYTES
20 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
21 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
28 unsigned int counts
[MAX_DEPTH
]; /* in nodes */
29 sector_t
*index
[MAX_DEPTH
];
31 unsigned int num_targets
;
32 unsigned int num_allocated
;
34 struct dm_target
*targets
;
37 * Indicates the rw permissions for the new logical
38 * device. This should be a combination of FMODE_READ
43 /* a list of devices used by this table */
44 struct list_head devices
;
47 * These are optimistic limits taken from all the
48 * targets, some targets will need smaller limits.
50 struct io_restrictions limits
;
52 /* events get handed up using this callback */
53 void (*event_fn
)(void *);
58 * Similar to ceiling(log_size(n))
60 static unsigned int int_log(unsigned long n
, unsigned long base
)
65 n
= dm_div_up(n
, base
);
73 * Returns the minimum that is _not_ zero, unless both are zero.
75 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
78 * Combine two io_restrictions, always taking the lower value.
80 static void combine_restrictions_low(struct io_restrictions
*lhs
,
81 struct io_restrictions
*rhs
)
84 min_not_zero(lhs
->max_sectors
, rhs
->max_sectors
);
86 lhs
->max_phys_segments
=
87 min_not_zero(lhs
->max_phys_segments
, rhs
->max_phys_segments
);
89 lhs
->max_hw_segments
=
90 min_not_zero(lhs
->max_hw_segments
, rhs
->max_hw_segments
);
92 lhs
->hardsect_size
= max(lhs
->hardsect_size
, rhs
->hardsect_size
);
94 lhs
->max_segment_size
=
95 min_not_zero(lhs
->max_segment_size
, rhs
->max_segment_size
);
97 lhs
->seg_boundary_mask
=
98 min_not_zero(lhs
->seg_boundary_mask
, rhs
->seg_boundary_mask
);
102 * Calculate the index of the child node of the n'th node k'th key.
104 static inline unsigned int get_child(unsigned int n
, unsigned int k
)
106 return (n
* CHILDREN_PER_NODE
) + k
;
110 * Return the n'th node of level l from table t.
112 static inline sector_t
*get_node(struct dm_table
*t
,
113 unsigned int l
, unsigned int n
)
115 return t
->index
[l
] + (n
* KEYS_PER_NODE
);
119 * Return the highest key that you could lookup from the n'th
120 * node on level l of the btree.
122 static sector_t
high(struct dm_table
*t
, unsigned int l
, unsigned int n
)
124 for (; l
< t
->depth
- 1; l
++)
125 n
= get_child(n
, CHILDREN_PER_NODE
- 1);
127 if (n
>= t
->counts
[l
])
128 return (sector_t
) - 1;
130 return get_node(t
, l
, n
)[KEYS_PER_NODE
- 1];
134 * Fills in a level of the btree based on the highs of the level
137 static int setup_btree_index(unsigned int l
, struct dm_table
*t
)
142 for (n
= 0U; n
< t
->counts
[l
]; n
++) {
143 node
= get_node(t
, l
, n
);
145 for (k
= 0U; k
< KEYS_PER_NODE
; k
++)
146 node
[k
] = high(t
, l
+ 1, get_child(n
, k
));
152 void *dm_vcalloc(unsigned long nmemb
, unsigned long elem_size
)
158 * Check that we're not going to overflow.
160 if (nmemb
> (ULONG_MAX
/ elem_size
))
163 size
= nmemb
* elem_size
;
164 addr
= vmalloc(size
);
166 memset(addr
, 0, size
);
172 * highs, and targets are managed as dynamic arrays during a
175 static int alloc_targets(struct dm_table
*t
, unsigned int num
)
178 struct dm_target
*n_targets
;
179 int n
= t
->num_targets
;
182 * Allocate both the target array and offset array at once.
184 n_highs
= (sector_t
*) dm_vcalloc(num
, sizeof(struct dm_target
) +
189 n_targets
= (struct dm_target
*) (n_highs
+ num
);
192 memcpy(n_highs
, t
->highs
, sizeof(*n_highs
) * n
);
193 memcpy(n_targets
, t
->targets
, sizeof(*n_targets
) * n
);
196 memset(n_highs
+ n
, -1, sizeof(*n_highs
) * (num
- n
));
199 t
->num_allocated
= num
;
201 t
->targets
= n_targets
;
206 int dm_table_create(struct dm_table
**result
, int mode
, unsigned num_targets
)
208 struct dm_table
*t
= kmalloc(sizeof(*t
), GFP_KERNEL
);
213 memset(t
, 0, sizeof(*t
));
214 INIT_LIST_HEAD(&t
->devices
);
215 atomic_set(&t
->holders
, 1);
218 num_targets
= KEYS_PER_NODE
;
220 num_targets
= dm_round_up(num_targets
, KEYS_PER_NODE
);
222 if (alloc_targets(t
, num_targets
)) {
233 static void free_devices(struct list_head
*devices
)
235 struct list_head
*tmp
, *next
;
237 for (tmp
= devices
->next
; tmp
!= devices
; tmp
= next
) {
238 struct dm_dev
*dd
= list_entry(tmp
, struct dm_dev
, list
);
244 void table_destroy(struct dm_table
*t
)
248 /* free the indexes (see dm_table_complete) */
250 vfree(t
->index
[t
->depth
- 2]);
252 /* free the targets */
253 for (i
= 0; i
< t
->num_targets
; i
++) {
254 struct dm_target
*tgt
= t
->targets
+ i
;
259 dm_put_target_type(tgt
->type
);
264 /* free the device list */
265 if (t
->devices
.next
!= &t
->devices
) {
266 DMWARN("devices still present during destroy: "
267 "dm_table_remove_device calls missing");
269 free_devices(&t
->devices
);
275 void dm_table_get(struct dm_table
*t
)
277 atomic_inc(&t
->holders
);
280 void dm_table_put(struct dm_table
*t
)
285 if (atomic_dec_and_test(&t
->holders
))
290 * Checks to see if we need to extend highs or targets.
292 static inline int check_space(struct dm_table
*t
)
294 if (t
->num_targets
>= t
->num_allocated
)
295 return alloc_targets(t
, t
->num_allocated
* 2);
301 * Convert a device path to a dev_t.
303 static int lookup_device(const char *path
, dev_t
*dev
)
309 if ((r
= path_lookup(path
, LOOKUP_FOLLOW
, &nd
)))
312 inode
= nd
.dentry
->d_inode
;
318 if (!S_ISBLK(inode
->i_mode
)) {
323 *dev
= inode
->i_rdev
;
331 * See if we've already got a device in the list.
333 static struct dm_dev
*find_device(struct list_head
*l
, dev_t dev
)
337 list_for_each_entry (dd
, l
, list
)
338 if (dd
->bdev
->bd_dev
== dev
)
345 * Open a device so we can use it as a map destination.
347 static int open_dev(struct dm_dev
*d
, dev_t dev
)
349 static char *_claim_ptr
= "I belong to device-mapper";
350 struct block_device
*bdev
;
357 bdev
= open_by_devnum(dev
, d
->mode
);
359 return PTR_ERR(bdev
);
360 r
= bd_claim(bdev
, _claim_ptr
);
369 * Close a device that we've been using.
371 static void close_dev(struct dm_dev
*d
)
382 * If possible (ie. blk_size[major] is set), this checks an area
383 * of a destination device is valid.
385 static int check_device_area(struct dm_dev
*dd
, sector_t start
, sector_t len
)
388 dev_size
= dd
->bdev
->bd_inode
->i_size
>> SECTOR_SHIFT
;
389 return ((start
< dev_size
) && (len
<= (dev_size
- start
)));
393 * This upgrades the mode on an already open dm_dev. Being
394 * careful to leave things as they were if we fail to reopen the
397 static int upgrade_mode(struct dm_dev
*dd
, int new_mode
)
400 struct dm_dev dd_copy
;
401 dev_t dev
= dd
->bdev
->bd_dev
;
405 dd
->mode
|= new_mode
;
407 r
= open_dev(dd
, dev
);
417 * Add a device to the list, or just increment the usage count if
418 * it's already present.
420 static int __table_get_device(struct dm_table
*t
, struct dm_target
*ti
,
421 const char *path
, sector_t start
, sector_t len
,
422 int mode
, struct dm_dev
**result
)
427 unsigned int major
, minor
;
432 if (sscanf(path
, "%u:%u", &major
, &minor
) == 2) {
433 /* Extract the major/minor numbers */
434 dev
= MKDEV(major
, minor
);
435 if (MAJOR(dev
) != major
|| MINOR(dev
) != minor
)
438 /* convert the path to a device */
439 if ((r
= lookup_device(path
, &dev
)))
443 dd
= find_device(&t
->devices
, dev
);
445 dd
= kmalloc(sizeof(*dd
), GFP_KERNEL
);
452 if ((r
= open_dev(dd
, dev
))) {
457 atomic_set(&dd
->count
, 0);
458 list_add(&dd
->list
, &t
->devices
);
460 } else if (dd
->mode
!= (mode
| dd
->mode
)) {
461 r
= upgrade_mode(dd
, mode
);
465 atomic_inc(&dd
->count
);
467 if (!check_device_area(dd
, start
, len
)) {
468 DMWARN("device %s too small for target", path
);
469 dm_put_device(ti
, dd
);
479 int dm_get_device(struct dm_target
*ti
, const char *path
, sector_t start
,
480 sector_t len
, int mode
, struct dm_dev
**result
)
482 int r
= __table_get_device(ti
->table
, ti
, path
,
483 start
, len
, mode
, result
);
485 request_queue_t
*q
= bdev_get_queue((*result
)->bdev
);
486 struct io_restrictions
*rs
= &ti
->limits
;
489 * Combine the device limits low.
491 * FIXME: if we move an io_restriction struct
492 * into q this would just be a call to
493 * combine_restrictions_low()
496 min_not_zero(rs
->max_sectors
, q
->max_sectors
);
498 /* FIXME: Device-Mapper on top of RAID-0 breaks because DM
499 * currently doesn't honor MD's merge_bvec_fn routine.
500 * In this case, we'll force DM to use PAGE_SIZE or
501 * smaller I/O, just to be safe. A better fix is in the
502 * works, but add this for the time being so it will at
503 * least operate correctly.
505 if (q
->merge_bvec_fn
)
507 min_not_zero(rs
->max_sectors
,
508 (unsigned short)(PAGE_SIZE
>> 9));
510 rs
->max_phys_segments
=
511 min_not_zero(rs
->max_phys_segments
,
512 q
->max_phys_segments
);
514 rs
->max_hw_segments
=
515 min_not_zero(rs
->max_hw_segments
, q
->max_hw_segments
);
517 rs
->hardsect_size
= max(rs
->hardsect_size
, q
->hardsect_size
);
519 rs
->max_segment_size
=
520 min_not_zero(rs
->max_segment_size
, q
->max_segment_size
);
522 rs
->seg_boundary_mask
=
523 min_not_zero(rs
->seg_boundary_mask
,
524 q
->seg_boundary_mask
);
531 * Decrement a devices use count and remove it if necessary.
533 void dm_put_device(struct dm_target
*ti
, struct dm_dev
*dd
)
535 if (atomic_dec_and_test(&dd
->count
)) {
543 * Checks to see if the target joins onto the end of the table.
545 static int adjoin(struct dm_table
*table
, struct dm_target
*ti
)
547 struct dm_target
*prev
;
549 if (!table
->num_targets
)
552 prev
= &table
->targets
[table
->num_targets
- 1];
553 return (ti
->begin
== (prev
->begin
+ prev
->len
));
557 * Used to dynamically allocate the arg array.
559 static char **realloc_argv(unsigned *array_size
, char **old_argv
)
564 new_size
= *array_size
? *array_size
* 2 : 64;
565 argv
= kmalloc(new_size
* sizeof(*argv
), GFP_KERNEL
);
567 memcpy(argv
, old_argv
, *array_size
* sizeof(*argv
));
568 *array_size
= new_size
;
576 * Destructively splits up the argument list to pass to ctr.
578 static int split_args(int *argc
, char ***argvp
, char *input
)
580 char *start
, *end
= input
, *out
, **argv
= NULL
;
581 unsigned array_size
= 0;
584 argv
= realloc_argv(&array_size
, argv
);
591 /* Skip whitespace */
592 while (*start
&& isspace(*start
))
596 break; /* success, we hit the end */
598 /* 'out' is used to remove any back-quotes */
601 /* Everything apart from '\0' can be quoted */
602 if (*end
== '\\' && *(end
+ 1)) {
609 break; /* end of token */
614 /* have we already filled the array ? */
615 if ((*argc
+ 1) > array_size
) {
616 argv
= realloc_argv(&array_size
, argv
);
621 /* we know this is whitespace */
625 /* terminate the string and put it in the array */
635 static void check_for_valid_limits(struct io_restrictions
*rs
)
637 if (!rs
->max_sectors
)
638 rs
->max_sectors
= MAX_SECTORS
;
639 if (!rs
->max_phys_segments
)
640 rs
->max_phys_segments
= MAX_PHYS_SEGMENTS
;
641 if (!rs
->max_hw_segments
)
642 rs
->max_hw_segments
= MAX_HW_SEGMENTS
;
643 if (!rs
->hardsect_size
)
644 rs
->hardsect_size
= 1 << SECTOR_SHIFT
;
645 if (!rs
->max_segment_size
)
646 rs
->max_segment_size
= MAX_SEGMENT_SIZE
;
647 if (!rs
->seg_boundary_mask
)
648 rs
->seg_boundary_mask
= -1;
651 int dm_table_add_target(struct dm_table
*t
, const char *type
,
652 sector_t start
, sector_t len
, char *params
)
654 int r
= -EINVAL
, argc
;
656 struct dm_target
*tgt
;
658 if ((r
= check_space(t
)))
661 tgt
= t
->targets
+ t
->num_targets
;
662 memset(tgt
, 0, sizeof(*tgt
));
665 tgt
->error
= "zero-length target";
666 DMERR(": %s\n", tgt
->error
);
670 tgt
->type
= dm_get_target_type(type
);
672 tgt
->error
= "unknown target type";
673 DMERR(": %s\n", tgt
->error
);
680 tgt
->error
= "Unknown error";
683 * Does this target adjoin the previous one ?
685 if (!adjoin(t
, tgt
)) {
686 tgt
->error
= "Gap in table";
691 r
= split_args(&argc
, &argv
, params
);
693 tgt
->error
= "couldn't split parameters (insufficient memory)";
697 r
= tgt
->type
->ctr(tgt
, argc
, argv
);
702 t
->highs
[t
->num_targets
++] = tgt
->begin
+ tgt
->len
- 1;
704 /* FIXME: the plan is to combine high here and then have
705 * the merge fn apply the target level restrictions. */
706 combine_restrictions_low(&t
->limits
, &tgt
->limits
);
710 DMERR(": %s\n", tgt
->error
);
711 dm_put_target_type(tgt
->type
);
715 static int setup_indexes(struct dm_table
*t
)
718 unsigned int total
= 0;
721 /* allocate the space for *all* the indexes */
722 for (i
= t
->depth
- 2; i
>= 0; i
--) {
723 t
->counts
[i
] = dm_div_up(t
->counts
[i
+ 1], CHILDREN_PER_NODE
);
724 total
+= t
->counts
[i
];
727 indexes
= (sector_t
*) dm_vcalloc(total
, (unsigned long) NODE_SIZE
);
731 /* set up internal nodes, bottom-up */
732 for (i
= t
->depth
- 2, total
= 0; i
>= 0; i
--) {
733 t
->index
[i
] = indexes
;
734 indexes
+= (KEYS_PER_NODE
* t
->counts
[i
]);
735 setup_btree_index(i
, t
);
742 * Builds the btree to index the map.
744 int dm_table_complete(struct dm_table
*t
)
747 unsigned int leaf_nodes
;
749 check_for_valid_limits(&t
->limits
);
751 /* how many indexes will the btree have ? */
752 leaf_nodes
= dm_div_up(t
->num_targets
, KEYS_PER_NODE
);
753 t
->depth
= 1 + int_log(leaf_nodes
, CHILDREN_PER_NODE
);
755 /* leaf layer has already been set up */
756 t
->counts
[t
->depth
- 1] = leaf_nodes
;
757 t
->index
[t
->depth
- 1] = t
->highs
;
760 r
= setup_indexes(t
);
765 static DECLARE_MUTEX(_event_lock
);
766 void dm_table_event_callback(struct dm_table
*t
,
767 void (*fn
)(void *), void *context
)
771 t
->event_context
= context
;
775 void dm_table_event(struct dm_table
*t
)
778 * You can no longer call dm_table_event() from interrupt
779 * context, use a bottom half instead.
781 BUG_ON(in_interrupt());
785 t
->event_fn(t
->event_context
);
789 sector_t
dm_table_get_size(struct dm_table
*t
)
791 return t
->num_targets
? (t
->highs
[t
->num_targets
- 1] + 1) : 0;
794 struct dm_target
*dm_table_get_target(struct dm_table
*t
, unsigned int index
)
796 if (index
> t
->num_targets
)
799 return t
->targets
+ index
;
803 * Search the btree for the correct target.
805 struct dm_target
*dm_table_find_target(struct dm_table
*t
, sector_t sector
)
807 unsigned int l
, n
= 0, k
= 0;
810 for (l
= 0; l
< t
->depth
; l
++) {
812 node
= get_node(t
, l
, n
);
814 for (k
= 0; k
< KEYS_PER_NODE
; k
++)
815 if (node
[k
] >= sector
)
819 return &t
->targets
[(KEYS_PER_NODE
* n
) + k
];
822 void dm_table_set_restrictions(struct dm_table
*t
, struct request_queue
*q
)
825 * Make sure we obey the optimistic sub devices
828 blk_queue_max_sectors(q
, t
->limits
.max_sectors
);
829 q
->max_phys_segments
= t
->limits
.max_phys_segments
;
830 q
->max_hw_segments
= t
->limits
.max_hw_segments
;
831 q
->hardsect_size
= t
->limits
.hardsect_size
;
832 q
->max_segment_size
= t
->limits
.max_segment_size
;
833 q
->seg_boundary_mask
= t
->limits
.seg_boundary_mask
;
836 unsigned int dm_table_get_num_targets(struct dm_table
*t
)
838 return t
->num_targets
;
841 struct list_head
*dm_table_get_devices(struct dm_table
*t
)
846 int dm_table_get_mode(struct dm_table
*t
)
851 void dm_table_suspend_targets(struct dm_table
*t
)
855 for (i
= 0; i
< t
->num_targets
; i
++) {
856 struct dm_target
*ti
= t
->targets
+ i
;
858 if (ti
->type
->suspend
)
859 ti
->type
->suspend(ti
);
863 void dm_table_resume_targets(struct dm_table
*t
)
867 for (i
= 0; i
< t
->num_targets
; i
++) {
868 struct dm_target
*ti
= t
->targets
+ i
;
870 if (ti
->type
->resume
)
871 ti
->type
->resume(ti
);
875 int dm_table_any_congested(struct dm_table
*t
, int bdi_bits
)
877 struct list_head
*d
, *devices
;
880 devices
= dm_table_get_devices(t
);
881 for (d
= devices
->next
; d
!= devices
; d
= d
->next
) {
882 struct dm_dev
*dd
= list_entry(d
, struct dm_dev
, list
);
883 request_queue_t
*q
= bdev_get_queue(dd
->bdev
);
884 r
|= bdi_congested(&q
->backing_dev_info
, bdi_bits
);
890 void dm_table_unplug_all(struct dm_table
*t
)
892 struct list_head
*d
, *devices
= dm_table_get_devices(t
);
894 for (d
= devices
->next
; d
!= devices
; d
= d
->next
) {
895 struct dm_dev
*dd
= list_entry(d
, struct dm_dev
, list
);
896 request_queue_t
*q
= bdev_get_queue(dd
->bdev
);
903 int dm_table_flush_all(struct dm_table
*t
)
905 struct list_head
*d
, *devices
= dm_table_get_devices(t
);
908 for (d
= devices
->next
; d
!= devices
; d
= d
->next
) {
909 struct dm_dev
*dd
= list_entry(d
, struct dm_dev
, list
);
910 request_queue_t
*q
= bdev_get_queue(dd
->bdev
);
913 if (!q
->issue_flush_fn
)
916 err
= q
->issue_flush_fn(q
, dd
->bdev
->bd_disk
, NULL
);
925 EXPORT_SYMBOL(dm_vcalloc
);
926 EXPORT_SYMBOL(dm_get_device
);
927 EXPORT_SYMBOL(dm_put_device
);
928 EXPORT_SYMBOL(dm_table_event
);
929 EXPORT_SYMBOL(dm_table_get_mode
);
930 EXPORT_SYMBOL(dm_table_put
);
931 EXPORT_SYMBOL(dm_table_get
);
932 EXPORT_SYMBOL(dm_table_unplug_all
);
933 EXPORT_SYMBOL(dm_table_flush_all
);