2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/blkdev.h>
13 #include <linux/namei.h>
14 #include <linux/ctype.h>
15 #include <linux/slab.h>
16 #include <linux/interrupt.h>
17 #include <linux/mutex.h>
18 #include <linux/delay.h>
19 #include <asm/atomic.h>
21 #define DM_MSG_PREFIX "table"
24 #define NODE_SIZE L1_CACHE_BYTES
25 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
26 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
29 * The table has always exactly one reference from either mapped_device->map
30 * or hash_cell->new_map. This reference is not counted in table->holders.
31 * A pair of dm_create_table/dm_destroy_table functions is used for table
32 * creation/destruction.
34 * Temporary references from the other code increase table->holders. A pair
35 * of dm_table_get/dm_table_put functions is used to manipulate it.
37 * When the table is about to be destroyed, we wait for table->holders to
42 struct mapped_device
*md
;
47 unsigned int counts
[MAX_DEPTH
]; /* in nodes */
48 sector_t
*index
[MAX_DEPTH
];
50 unsigned int num_targets
;
51 unsigned int num_allocated
;
53 struct dm_target
*targets
;
56 * Indicates the rw permissions for the new logical
57 * device. This should be a combination of FMODE_READ
62 /* a list of devices used by this table */
63 struct list_head devices
;
66 * These are optimistic limits taken from all the
67 * targets, some targets will need smaller limits.
69 struct io_restrictions limits
;
71 /* events get handed up using this callback */
72 void (*event_fn
)(void *);
77 * Similar to ceiling(log_size(n))
79 static unsigned int int_log(unsigned int n
, unsigned int base
)
84 n
= dm_div_up(n
, base
);
92 * Returns the minimum that is _not_ zero, unless both are zero.
94 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
97 * Combine two io_restrictions, always taking the lower value.
99 static void combine_restrictions_low(struct io_restrictions
*lhs
,
100 struct io_restrictions
*rhs
)
103 min_not_zero(lhs
->max_sectors
, rhs
->max_sectors
);
105 lhs
->max_phys_segments
=
106 min_not_zero(lhs
->max_phys_segments
, rhs
->max_phys_segments
);
108 lhs
->max_hw_segments
=
109 min_not_zero(lhs
->max_hw_segments
, rhs
->max_hw_segments
);
111 lhs
->logical_block_size
= max(lhs
->logical_block_size
,
112 rhs
->logical_block_size
);
114 lhs
->max_segment_size
=
115 min_not_zero(lhs
->max_segment_size
, rhs
->max_segment_size
);
117 lhs
->max_hw_sectors
=
118 min_not_zero(lhs
->max_hw_sectors
, rhs
->max_hw_sectors
);
120 lhs
->seg_boundary_mask
=
121 min_not_zero(lhs
->seg_boundary_mask
, rhs
->seg_boundary_mask
);
123 lhs
->bounce_pfn
= min_not_zero(lhs
->bounce_pfn
, rhs
->bounce_pfn
);
125 lhs
->no_cluster
|= rhs
->no_cluster
;
129 * Calculate the index of the child node of the n'th node k'th key.
131 static inline unsigned int get_child(unsigned int n
, unsigned int k
)
133 return (n
* CHILDREN_PER_NODE
) + k
;
137 * Return the n'th node of level l from table t.
139 static inline sector_t
*get_node(struct dm_table
*t
,
140 unsigned int l
, unsigned int n
)
142 return t
->index
[l
] + (n
* KEYS_PER_NODE
);
146 * Return the highest key that you could lookup from the n'th
147 * node on level l of the btree.
149 static sector_t
high(struct dm_table
*t
, unsigned int l
, unsigned int n
)
151 for (; l
< t
->depth
- 1; l
++)
152 n
= get_child(n
, CHILDREN_PER_NODE
- 1);
154 if (n
>= t
->counts
[l
])
155 return (sector_t
) - 1;
157 return get_node(t
, l
, n
)[KEYS_PER_NODE
- 1];
161 * Fills in a level of the btree based on the highs of the level
164 static int setup_btree_index(unsigned int l
, struct dm_table
*t
)
169 for (n
= 0U; n
< t
->counts
[l
]; n
++) {
170 node
= get_node(t
, l
, n
);
172 for (k
= 0U; k
< KEYS_PER_NODE
; k
++)
173 node
[k
] = high(t
, l
+ 1, get_child(n
, k
));
179 void *dm_vcalloc(unsigned long nmemb
, unsigned long elem_size
)
185 * Check that we're not going to overflow.
187 if (nmemb
> (ULONG_MAX
/ elem_size
))
190 size
= nmemb
* elem_size
;
191 addr
= vmalloc(size
);
193 memset(addr
, 0, size
);
199 * highs, and targets are managed as dynamic arrays during a
202 static int alloc_targets(struct dm_table
*t
, unsigned int num
)
205 struct dm_target
*n_targets
;
206 int n
= t
->num_targets
;
209 * Allocate both the target array and offset array at once.
210 * Append an empty entry to catch sectors beyond the end of
213 n_highs
= (sector_t
*) dm_vcalloc(num
+ 1, sizeof(struct dm_target
) +
218 n_targets
= (struct dm_target
*) (n_highs
+ num
);
221 memcpy(n_highs
, t
->highs
, sizeof(*n_highs
) * n
);
222 memcpy(n_targets
, t
->targets
, sizeof(*n_targets
) * n
);
225 memset(n_highs
+ n
, -1, sizeof(*n_highs
) * (num
- n
));
228 t
->num_allocated
= num
;
230 t
->targets
= n_targets
;
235 int dm_table_create(struct dm_table
**result
, fmode_t mode
,
236 unsigned num_targets
, struct mapped_device
*md
)
238 struct dm_table
*t
= kzalloc(sizeof(*t
), GFP_KERNEL
);
243 INIT_LIST_HEAD(&t
->devices
);
244 atomic_set(&t
->holders
, 0);
247 num_targets
= KEYS_PER_NODE
;
249 num_targets
= dm_round_up(num_targets
, KEYS_PER_NODE
);
251 if (alloc_targets(t
, num_targets
)) {
263 static void free_devices(struct list_head
*devices
)
265 struct list_head
*tmp
, *next
;
267 list_for_each_safe(tmp
, next
, devices
) {
268 struct dm_dev_internal
*dd
=
269 list_entry(tmp
, struct dm_dev_internal
, list
);
270 DMWARN("dm_table_destroy: dm_put_device call missing for %s",
276 void dm_table_destroy(struct dm_table
*t
)
280 while (atomic_read(&t
->holders
))
284 /* free the indexes (see dm_table_complete) */
286 vfree(t
->index
[t
->depth
- 2]);
288 /* free the targets */
289 for (i
= 0; i
< t
->num_targets
; i
++) {
290 struct dm_target
*tgt
= t
->targets
+ i
;
295 dm_put_target_type(tgt
->type
);
300 /* free the device list */
301 if (t
->devices
.next
!= &t
->devices
)
302 free_devices(&t
->devices
);
307 void dm_table_get(struct dm_table
*t
)
309 atomic_inc(&t
->holders
);
312 void dm_table_put(struct dm_table
*t
)
317 smp_mb__before_atomic_dec();
318 atomic_dec(&t
->holders
);
322 * Checks to see if we need to extend highs or targets.
324 static inline int check_space(struct dm_table
*t
)
326 if (t
->num_targets
>= t
->num_allocated
)
327 return alloc_targets(t
, t
->num_allocated
* 2);
333 * See if we've already got a device in the list.
335 static struct dm_dev_internal
*find_device(struct list_head
*l
, dev_t dev
)
337 struct dm_dev_internal
*dd
;
339 list_for_each_entry (dd
, l
, list
)
340 if (dd
->dm_dev
.bdev
->bd_dev
== dev
)
347 * Open a device so we can use it as a map destination.
349 static int open_dev(struct dm_dev_internal
*d
, dev_t dev
,
350 struct mapped_device
*md
)
352 static char *_claim_ptr
= "I belong to device-mapper";
353 struct block_device
*bdev
;
357 BUG_ON(d
->dm_dev
.bdev
);
359 bdev
= open_by_devnum(dev
, d
->dm_dev
.mode
);
361 return PTR_ERR(bdev
);
362 r
= bd_claim_by_disk(bdev
, _claim_ptr
, dm_disk(md
));
364 blkdev_put(bdev
, d
->dm_dev
.mode
);
366 d
->dm_dev
.bdev
= bdev
;
371 * Close a device that we've been using.
373 static void close_dev(struct dm_dev_internal
*d
, struct mapped_device
*md
)
378 bd_release_from_disk(d
->dm_dev
.bdev
, dm_disk(md
));
379 blkdev_put(d
->dm_dev
.bdev
, d
->dm_dev
.mode
);
380 d
->dm_dev
.bdev
= NULL
;
384 * If possible, this checks an area of a destination device is valid.
386 static int device_area_is_valid(struct dm_target
*ti
, struct block_device
*bdev
,
387 sector_t start
, sector_t len
)
389 sector_t dev_size
= i_size_read(bdev
->bd_inode
) >> SECTOR_SHIFT
;
390 unsigned short logical_block_size_sectors
=
391 ti
->limits
.logical_block_size
>> SECTOR_SHIFT
;
392 char b
[BDEVNAME_SIZE
];
397 if ((start
>= dev_size
) || (start
+ len
> dev_size
)) {
398 DMWARN("%s: %s too small for target",
399 dm_device_name(ti
->table
->md
), bdevname(bdev
, b
));
403 if (logical_block_size_sectors
<= 1)
406 if (start
& (logical_block_size_sectors
- 1)) {
407 DMWARN("%s: start=%llu not aligned to h/w "
408 "logical block size %hu of %s",
409 dm_device_name(ti
->table
->md
),
410 (unsigned long long)start
,
411 ti
->limits
.logical_block_size
, bdevname(bdev
, b
));
415 if (len
& (logical_block_size_sectors
- 1)) {
416 DMWARN("%s: len=%llu not aligned to h/w "
417 "logical block size %hu of %s",
418 dm_device_name(ti
->table
->md
),
419 (unsigned long long)len
,
420 ti
->limits
.logical_block_size
, bdevname(bdev
, b
));
428 * This upgrades the mode on an already open dm_dev, being
429 * careful to leave things as they were if we fail to reopen the
430 * device and not to touch the existing bdev field in case
431 * it is accessed concurrently inside dm_table_any_congested().
433 static int upgrade_mode(struct dm_dev_internal
*dd
, fmode_t new_mode
,
434 struct mapped_device
*md
)
437 struct dm_dev_internal dd_new
, dd_old
;
439 dd_new
= dd_old
= *dd
;
441 dd_new
.dm_dev
.mode
|= new_mode
;
442 dd_new
.dm_dev
.bdev
= NULL
;
444 r
= open_dev(&dd_new
, dd
->dm_dev
.bdev
->bd_dev
, md
);
448 dd
->dm_dev
.mode
|= new_mode
;
449 close_dev(&dd_old
, md
);
455 * Add a device to the list, or just increment the usage count if
456 * it's already present.
458 static int __table_get_device(struct dm_table
*t
, struct dm_target
*ti
,
459 const char *path
, sector_t start
, sector_t len
,
460 fmode_t mode
, struct dm_dev
**result
)
463 dev_t
uninitialized_var(dev
);
464 struct dm_dev_internal
*dd
;
465 unsigned int major
, minor
;
469 if (sscanf(path
, "%u:%u", &major
, &minor
) == 2) {
470 /* Extract the major/minor numbers */
471 dev
= MKDEV(major
, minor
);
472 if (MAJOR(dev
) != major
|| MINOR(dev
) != minor
)
475 /* convert the path to a device */
476 struct block_device
*bdev
= lookup_bdev(path
);
479 return PTR_ERR(bdev
);
484 dd
= find_device(&t
->devices
, dev
);
486 dd
= kmalloc(sizeof(*dd
), GFP_KERNEL
);
490 dd
->dm_dev
.mode
= mode
;
491 dd
->dm_dev
.bdev
= NULL
;
493 if ((r
= open_dev(dd
, dev
, t
->md
))) {
498 format_dev_t(dd
->dm_dev
.name
, dev
);
500 atomic_set(&dd
->count
, 0);
501 list_add(&dd
->list
, &t
->devices
);
503 } else if (dd
->dm_dev
.mode
!= (mode
| dd
->dm_dev
.mode
)) {
504 r
= upgrade_mode(dd
, mode
, t
->md
);
508 atomic_inc(&dd
->count
);
510 *result
= &dd
->dm_dev
;
514 void dm_set_device_limits(struct dm_target
*ti
, struct block_device
*bdev
)
516 struct request_queue
*q
= bdev_get_queue(bdev
);
517 struct io_restrictions
*rs
= &ti
->limits
;
518 char b
[BDEVNAME_SIZE
];
521 DMWARN("%s: Cannot set limits for nonexistent device %s",
522 dm_device_name(ti
->table
->md
), bdevname(bdev
, b
));
527 * Combine the device limits low.
529 * FIXME: if we move an io_restriction struct
530 * into q this would just be a call to
531 * combine_restrictions_low()
534 min_not_zero(rs
->max_sectors
, queue_max_sectors(q
));
537 * Check if merge fn is supported.
538 * If not we'll force DM to use PAGE_SIZE or
539 * smaller I/O, just to be safe.
542 if (q
->merge_bvec_fn
&& !ti
->type
->merge
)
544 min_not_zero(rs
->max_sectors
,
545 (unsigned int) (PAGE_SIZE
>> 9));
547 rs
->max_phys_segments
=
548 min_not_zero(rs
->max_phys_segments
,
549 queue_max_phys_segments(q
));
551 rs
->max_hw_segments
=
552 min_not_zero(rs
->max_hw_segments
, queue_max_hw_segments(q
));
554 rs
->logical_block_size
= max(rs
->logical_block_size
,
555 queue_logical_block_size(q
));
557 rs
->max_segment_size
=
558 min_not_zero(rs
->max_segment_size
, queue_max_segment_size(q
));
561 min_not_zero(rs
->max_hw_sectors
, queue_max_hw_sectors(q
));
563 rs
->seg_boundary_mask
=
564 min_not_zero(rs
->seg_boundary_mask
,
565 queue_segment_boundary(q
));
567 rs
->bounce_pfn
= min_not_zero(rs
->bounce_pfn
, queue_bounce_pfn(q
));
569 rs
->no_cluster
|= !test_bit(QUEUE_FLAG_CLUSTER
, &q
->queue_flags
);
571 EXPORT_SYMBOL_GPL(dm_set_device_limits
);
573 int dm_get_device(struct dm_target
*ti
, const char *path
, sector_t start
,
574 sector_t len
, fmode_t mode
, struct dm_dev
**result
)
576 int r
= __table_get_device(ti
->table
, ti
, path
,
577 start
, len
, mode
, result
);
582 dm_set_device_limits(ti
, (*result
)->bdev
);
584 if (!device_area_is_valid(ti
, (*result
)->bdev
, start
, len
)) {
585 dm_put_device(ti
, *result
);
594 * Decrement a devices use count and remove it if necessary.
596 void dm_put_device(struct dm_target
*ti
, struct dm_dev
*d
)
598 struct dm_dev_internal
*dd
= container_of(d
, struct dm_dev_internal
,
601 if (atomic_dec_and_test(&dd
->count
)) {
602 close_dev(dd
, ti
->table
->md
);
609 * Checks to see if the target joins onto the end of the table.
611 static int adjoin(struct dm_table
*table
, struct dm_target
*ti
)
613 struct dm_target
*prev
;
615 if (!table
->num_targets
)
618 prev
= &table
->targets
[table
->num_targets
- 1];
619 return (ti
->begin
== (prev
->begin
+ prev
->len
));
623 * Used to dynamically allocate the arg array.
625 static char **realloc_argv(unsigned *array_size
, char **old_argv
)
630 new_size
= *array_size
? *array_size
* 2 : 64;
631 argv
= kmalloc(new_size
* sizeof(*argv
), GFP_KERNEL
);
633 memcpy(argv
, old_argv
, *array_size
* sizeof(*argv
));
634 *array_size
= new_size
;
642 * Destructively splits up the argument list to pass to ctr.
644 int dm_split_args(int *argc
, char ***argvp
, char *input
)
646 char *start
, *end
= input
, *out
, **argv
= NULL
;
647 unsigned array_size
= 0;
656 argv
= realloc_argv(&array_size
, argv
);
663 /* Skip whitespace */
664 while (*start
&& isspace(*start
))
668 break; /* success, we hit the end */
670 /* 'out' is used to remove any back-quotes */
673 /* Everything apart from '\0' can be quoted */
674 if (*end
== '\\' && *(end
+ 1)) {
681 break; /* end of token */
686 /* have we already filled the array ? */
687 if ((*argc
+ 1) > array_size
) {
688 argv
= realloc_argv(&array_size
, argv
);
693 /* we know this is whitespace */
697 /* terminate the string and put it in the array */
707 static void check_for_valid_limits(struct io_restrictions
*rs
)
709 if (!rs
->max_sectors
)
710 rs
->max_sectors
= SAFE_MAX_SECTORS
;
711 if (!rs
->max_hw_sectors
)
712 rs
->max_hw_sectors
= SAFE_MAX_SECTORS
;
713 if (!rs
->max_phys_segments
)
714 rs
->max_phys_segments
= MAX_PHYS_SEGMENTS
;
715 if (!rs
->max_hw_segments
)
716 rs
->max_hw_segments
= MAX_HW_SEGMENTS
;
717 if (!rs
->logical_block_size
)
718 rs
->logical_block_size
= 1 << SECTOR_SHIFT
;
719 if (!rs
->max_segment_size
)
720 rs
->max_segment_size
= MAX_SEGMENT_SIZE
;
721 if (!rs
->seg_boundary_mask
)
722 rs
->seg_boundary_mask
= BLK_SEG_BOUNDARY_MASK
;
727 int dm_table_add_target(struct dm_table
*t
, const char *type
,
728 sector_t start
, sector_t len
, char *params
)
730 int r
= -EINVAL
, argc
;
732 struct dm_target
*tgt
;
734 if ((r
= check_space(t
)))
737 tgt
= t
->targets
+ t
->num_targets
;
738 memset(tgt
, 0, sizeof(*tgt
));
741 DMERR("%s: zero-length target", dm_device_name(t
->md
));
745 tgt
->type
= dm_get_target_type(type
);
747 DMERR("%s: %s: unknown target type", dm_device_name(t
->md
),
755 tgt
->error
= "Unknown error";
758 * Does this target adjoin the previous one ?
760 if (!adjoin(t
, tgt
)) {
761 tgt
->error
= "Gap in table";
766 r
= dm_split_args(&argc
, &argv
, params
);
768 tgt
->error
= "couldn't split parameters (insufficient memory)";
772 r
= tgt
->type
->ctr(tgt
, argc
, argv
);
777 t
->highs
[t
->num_targets
++] = tgt
->begin
+ tgt
->len
- 1;
779 /* FIXME: the plan is to combine high here and then have
780 * the merge fn apply the target level restrictions. */
781 combine_restrictions_low(&t
->limits
, &tgt
->limits
);
785 DMERR("%s: %s: %s", dm_device_name(t
->md
), type
, tgt
->error
);
786 dm_put_target_type(tgt
->type
);
790 static int setup_indexes(struct dm_table
*t
)
793 unsigned int total
= 0;
796 /* allocate the space for *all* the indexes */
797 for (i
= t
->depth
- 2; i
>= 0; i
--) {
798 t
->counts
[i
] = dm_div_up(t
->counts
[i
+ 1], CHILDREN_PER_NODE
);
799 total
+= t
->counts
[i
];
802 indexes
= (sector_t
*) dm_vcalloc(total
, (unsigned long) NODE_SIZE
);
806 /* set up internal nodes, bottom-up */
807 for (i
= t
->depth
- 2; i
>= 0; i
--) {
808 t
->index
[i
] = indexes
;
809 indexes
+= (KEYS_PER_NODE
* t
->counts
[i
]);
810 setup_btree_index(i
, t
);
817 * Builds the btree to index the map.
819 int dm_table_complete(struct dm_table
*t
)
822 unsigned int leaf_nodes
;
824 check_for_valid_limits(&t
->limits
);
826 /* how many indexes will the btree have ? */
827 leaf_nodes
= dm_div_up(t
->num_targets
, KEYS_PER_NODE
);
828 t
->depth
= 1 + int_log(leaf_nodes
, CHILDREN_PER_NODE
);
830 /* leaf layer has already been set up */
831 t
->counts
[t
->depth
- 1] = leaf_nodes
;
832 t
->index
[t
->depth
- 1] = t
->highs
;
835 r
= setup_indexes(t
);
840 static DEFINE_MUTEX(_event_lock
);
841 void dm_table_event_callback(struct dm_table
*t
,
842 void (*fn
)(void *), void *context
)
844 mutex_lock(&_event_lock
);
846 t
->event_context
= context
;
847 mutex_unlock(&_event_lock
);
850 void dm_table_event(struct dm_table
*t
)
853 * You can no longer call dm_table_event() from interrupt
854 * context, use a bottom half instead.
856 BUG_ON(in_interrupt());
858 mutex_lock(&_event_lock
);
860 t
->event_fn(t
->event_context
);
861 mutex_unlock(&_event_lock
);
864 sector_t
dm_table_get_size(struct dm_table
*t
)
866 return t
->num_targets
? (t
->highs
[t
->num_targets
- 1] + 1) : 0;
869 struct dm_target
*dm_table_get_target(struct dm_table
*t
, unsigned int index
)
871 if (index
>= t
->num_targets
)
874 return t
->targets
+ index
;
878 * Search the btree for the correct target.
880 * Caller should check returned pointer with dm_target_is_valid()
881 * to trap I/O beyond end of device.
883 struct dm_target
*dm_table_find_target(struct dm_table
*t
, sector_t sector
)
885 unsigned int l
, n
= 0, k
= 0;
888 for (l
= 0; l
< t
->depth
; l
++) {
890 node
= get_node(t
, l
, n
);
892 for (k
= 0; k
< KEYS_PER_NODE
; k
++)
893 if (node
[k
] >= sector
)
897 return &t
->targets
[(KEYS_PER_NODE
* n
) + k
];
901 * Set the integrity profile for this device if all devices used have
904 static void dm_table_set_integrity(struct dm_table
*t
)
906 struct list_head
*devices
= dm_table_get_devices(t
);
907 struct dm_dev_internal
*prev
= NULL
, *dd
= NULL
;
909 if (!blk_get_integrity(dm_disk(t
->md
)))
912 list_for_each_entry(dd
, devices
, list
) {
914 blk_integrity_compare(prev
->dm_dev
.bdev
->bd_disk
,
915 dd
->dm_dev
.bdev
->bd_disk
) < 0) {
916 DMWARN("%s: integrity not set: %s and %s mismatch",
917 dm_device_name(t
->md
),
918 prev
->dm_dev
.bdev
->bd_disk
->disk_name
,
919 dd
->dm_dev
.bdev
->bd_disk
->disk_name
);
925 if (!prev
|| !bdev_get_integrity(prev
->dm_dev
.bdev
))
928 blk_integrity_register(dm_disk(t
->md
),
929 bdev_get_integrity(prev
->dm_dev
.bdev
));
934 blk_integrity_register(dm_disk(t
->md
), NULL
);
939 void dm_table_set_restrictions(struct dm_table
*t
, struct request_queue
*q
)
942 * Make sure we obey the optimistic sub devices
945 blk_queue_max_sectors(q
, t
->limits
.max_sectors
);
946 blk_queue_max_phys_segments(q
, t
->limits
.max_phys_segments
);
947 blk_queue_max_hw_segments(q
, t
->limits
.max_hw_segments
);
948 blk_queue_logical_block_size(q
, t
->limits
.logical_block_size
);
949 blk_queue_max_segment_size(q
, t
->limits
.max_segment_size
);
950 blk_queue_max_hw_sectors(q
, t
->limits
.max_hw_sectors
);
951 blk_queue_segment_boundary(q
, t
->limits
.seg_boundary_mask
);
952 blk_queue_bounce_limit(q
, t
->limits
.bounce_pfn
);
954 if (t
->limits
.no_cluster
)
955 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER
, q
);
957 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER
, q
);
959 dm_table_set_integrity(t
);
962 unsigned int dm_table_get_num_targets(struct dm_table
*t
)
964 return t
->num_targets
;
967 struct list_head
*dm_table_get_devices(struct dm_table
*t
)
972 fmode_t
dm_table_get_mode(struct dm_table
*t
)
977 static void suspend_targets(struct dm_table
*t
, unsigned postsuspend
)
979 int i
= t
->num_targets
;
980 struct dm_target
*ti
= t
->targets
;
984 if (ti
->type
->postsuspend
)
985 ti
->type
->postsuspend(ti
);
986 } else if (ti
->type
->presuspend
)
987 ti
->type
->presuspend(ti
);
993 void dm_table_presuspend_targets(struct dm_table
*t
)
998 suspend_targets(t
, 0);
1001 void dm_table_postsuspend_targets(struct dm_table
*t
)
1006 suspend_targets(t
, 1);
1009 int dm_table_resume_targets(struct dm_table
*t
)
1013 for (i
= 0; i
< t
->num_targets
; i
++) {
1014 struct dm_target
*ti
= t
->targets
+ i
;
1016 if (!ti
->type
->preresume
)
1019 r
= ti
->type
->preresume(ti
);
1024 for (i
= 0; i
< t
->num_targets
; i
++) {
1025 struct dm_target
*ti
= t
->targets
+ i
;
1027 if (ti
->type
->resume
)
1028 ti
->type
->resume(ti
);
1034 int dm_table_any_congested(struct dm_table
*t
, int bdi_bits
)
1036 struct dm_dev_internal
*dd
;
1037 struct list_head
*devices
= dm_table_get_devices(t
);
1040 list_for_each_entry(dd
, devices
, list
) {
1041 struct request_queue
*q
= bdev_get_queue(dd
->dm_dev
.bdev
);
1042 char b
[BDEVNAME_SIZE
];
1045 r
|= bdi_congested(&q
->backing_dev_info
, bdi_bits
);
1047 DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
1048 dm_device_name(t
->md
),
1049 bdevname(dd
->dm_dev
.bdev
, b
));
1055 void dm_table_unplug_all(struct dm_table
*t
)
1057 struct dm_dev_internal
*dd
;
1058 struct list_head
*devices
= dm_table_get_devices(t
);
1060 list_for_each_entry(dd
, devices
, list
) {
1061 struct request_queue
*q
= bdev_get_queue(dd
->dm_dev
.bdev
);
1062 char b
[BDEVNAME_SIZE
];
1067 DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s",
1068 dm_device_name(t
->md
),
1069 bdevname(dd
->dm_dev
.bdev
, b
));
1073 struct mapped_device
*dm_table_get_md(struct dm_table
*t
)
1080 EXPORT_SYMBOL(dm_vcalloc
);
1081 EXPORT_SYMBOL(dm_get_device
);
1082 EXPORT_SYMBOL(dm_put_device
);
1083 EXPORT_SYMBOL(dm_table_event
);
1084 EXPORT_SYMBOL(dm_table_get_size
);
1085 EXPORT_SYMBOL(dm_table_get_mode
);
1086 EXPORT_SYMBOL(dm_table_get_md
);
1087 EXPORT_SYMBOL(dm_table_put
);
1088 EXPORT_SYMBOL(dm_table_get
);
1089 EXPORT_SYMBOL(dm_table_unplug_all
);