2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/blkdev.h>
13 #include <linux/namei.h>
14 #include <linux/ctype.h>
15 #include <linux/slab.h>
16 #include <linux/interrupt.h>
17 #include <linux/mutex.h>
18 #include <linux/delay.h>
19 #include <asm/atomic.h>
21 #define DM_MSG_PREFIX "table"
24 #define NODE_SIZE L1_CACHE_BYTES
25 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
26 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
29 * The table has always exactly one reference from either mapped_device->map
30 * or hash_cell->new_map. This reference is not counted in table->holders.
31 * A pair of dm_create_table/dm_destroy_table functions is used for table
32 * creation/destruction.
34 * Temporary references from the other code increase table->holders. A pair
35 * of dm_table_get/dm_table_put functions is used to manipulate it.
37 * When the table is about to be destroyed, we wait for table->holders to
42 struct mapped_device
*md
;
47 unsigned int counts
[MAX_DEPTH
]; /* in nodes */
48 sector_t
*index
[MAX_DEPTH
];
50 unsigned int num_targets
;
51 unsigned int num_allocated
;
53 struct dm_target
*targets
;
56 * Indicates the rw permissions for the new logical
57 * device. This should be a combination of FMODE_READ
62 /* a list of devices used by this table */
63 struct list_head devices
;
65 /* events get handed up using this callback */
66 void (*event_fn
)(void *);
71 * Similar to ceiling(log_size(n))
73 static unsigned int int_log(unsigned int n
, unsigned int base
)
78 n
= dm_div_up(n
, base
);
86 * Calculate the index of the child node of the n'th node k'th key.
88 static inline unsigned int get_child(unsigned int n
, unsigned int k
)
90 return (n
* CHILDREN_PER_NODE
) + k
;
94 * Return the n'th node of level l from table t.
96 static inline sector_t
*get_node(struct dm_table
*t
,
97 unsigned int l
, unsigned int n
)
99 return t
->index
[l
] + (n
* KEYS_PER_NODE
);
103 * Return the highest key that you could lookup from the n'th
104 * node on level l of the btree.
106 static sector_t
high(struct dm_table
*t
, unsigned int l
, unsigned int n
)
108 for (; l
< t
->depth
- 1; l
++)
109 n
= get_child(n
, CHILDREN_PER_NODE
- 1);
111 if (n
>= t
->counts
[l
])
112 return (sector_t
) - 1;
114 return get_node(t
, l
, n
)[KEYS_PER_NODE
- 1];
118 * Fills in a level of the btree based on the highs of the level
121 static int setup_btree_index(unsigned int l
, struct dm_table
*t
)
126 for (n
= 0U; n
< t
->counts
[l
]; n
++) {
127 node
= get_node(t
, l
, n
);
129 for (k
= 0U; k
< KEYS_PER_NODE
; k
++)
130 node
[k
] = high(t
, l
+ 1, get_child(n
, k
));
136 void *dm_vcalloc(unsigned long nmemb
, unsigned long elem_size
)
142 * Check that we're not going to overflow.
144 if (nmemb
> (ULONG_MAX
/ elem_size
))
147 size
= nmemb
* elem_size
;
148 addr
= vmalloc(size
);
150 memset(addr
, 0, size
);
156 * highs, and targets are managed as dynamic arrays during a
159 static int alloc_targets(struct dm_table
*t
, unsigned int num
)
162 struct dm_target
*n_targets
;
163 int n
= t
->num_targets
;
166 * Allocate both the target array and offset array at once.
167 * Append an empty entry to catch sectors beyond the end of
170 n_highs
= (sector_t
*) dm_vcalloc(num
+ 1, sizeof(struct dm_target
) +
175 n_targets
= (struct dm_target
*) (n_highs
+ num
);
178 memcpy(n_highs
, t
->highs
, sizeof(*n_highs
) * n
);
179 memcpy(n_targets
, t
->targets
, sizeof(*n_targets
) * n
);
182 memset(n_highs
+ n
, -1, sizeof(*n_highs
) * (num
- n
));
185 t
->num_allocated
= num
;
187 t
->targets
= n_targets
;
192 int dm_table_create(struct dm_table
**result
, fmode_t mode
,
193 unsigned num_targets
, struct mapped_device
*md
)
195 struct dm_table
*t
= kzalloc(sizeof(*t
), GFP_KERNEL
);
200 INIT_LIST_HEAD(&t
->devices
);
201 atomic_set(&t
->holders
, 0);
204 num_targets
= KEYS_PER_NODE
;
206 num_targets
= dm_round_up(num_targets
, KEYS_PER_NODE
);
208 if (alloc_targets(t
, num_targets
)) {
220 static void free_devices(struct list_head
*devices
)
222 struct list_head
*tmp
, *next
;
224 list_for_each_safe(tmp
, next
, devices
) {
225 struct dm_dev_internal
*dd
=
226 list_entry(tmp
, struct dm_dev_internal
, list
);
227 DMWARN("dm_table_destroy: dm_put_device call missing for %s",
233 void dm_table_destroy(struct dm_table
*t
)
237 while (atomic_read(&t
->holders
))
241 /* free the indexes (see dm_table_complete) */
243 vfree(t
->index
[t
->depth
- 2]);
245 /* free the targets */
246 for (i
= 0; i
< t
->num_targets
; i
++) {
247 struct dm_target
*tgt
= t
->targets
+ i
;
252 dm_put_target_type(tgt
->type
);
257 /* free the device list */
258 if (t
->devices
.next
!= &t
->devices
)
259 free_devices(&t
->devices
);
264 void dm_table_get(struct dm_table
*t
)
266 atomic_inc(&t
->holders
);
269 void dm_table_put(struct dm_table
*t
)
274 smp_mb__before_atomic_dec();
275 atomic_dec(&t
->holders
);
279 * Checks to see if we need to extend highs or targets.
281 static inline int check_space(struct dm_table
*t
)
283 if (t
->num_targets
>= t
->num_allocated
)
284 return alloc_targets(t
, t
->num_allocated
* 2);
290 * See if we've already got a device in the list.
292 static struct dm_dev_internal
*find_device(struct list_head
*l
, dev_t dev
)
294 struct dm_dev_internal
*dd
;
296 list_for_each_entry (dd
, l
, list
)
297 if (dd
->dm_dev
.bdev
->bd_dev
== dev
)
304 * Open a device so we can use it as a map destination.
306 static int open_dev(struct dm_dev_internal
*d
, dev_t dev
,
307 struct mapped_device
*md
)
309 static char *_claim_ptr
= "I belong to device-mapper";
310 struct block_device
*bdev
;
314 BUG_ON(d
->dm_dev
.bdev
);
316 bdev
= open_by_devnum(dev
, d
->dm_dev
.mode
);
318 return PTR_ERR(bdev
);
319 r
= bd_claim_by_disk(bdev
, _claim_ptr
, dm_disk(md
));
321 blkdev_put(bdev
, d
->dm_dev
.mode
);
323 d
->dm_dev
.bdev
= bdev
;
328 * Close a device that we've been using.
330 static void close_dev(struct dm_dev_internal
*d
, struct mapped_device
*md
)
335 bd_release_from_disk(d
->dm_dev
.bdev
, dm_disk(md
));
336 blkdev_put(d
->dm_dev
.bdev
, d
->dm_dev
.mode
);
337 d
->dm_dev
.bdev
= NULL
;
341 * If possible, this checks an area of a destination device is valid.
343 static int device_area_is_valid(struct dm_target
*ti
, struct dm_dev
*dev
,
344 sector_t start
, void *data
)
346 struct queue_limits
*limits
= data
;
347 struct block_device
*bdev
= dev
->bdev
;
349 i_size_read(bdev
->bd_inode
) >> SECTOR_SHIFT
;
350 unsigned short logical_block_size_sectors
=
351 limits
->logical_block_size
>> SECTOR_SHIFT
;
352 char b
[BDEVNAME_SIZE
];
357 if ((start
>= dev_size
) || (start
+ ti
->len
> dev_size
)) {
358 DMWARN("%s: %s too small for target",
359 dm_device_name(ti
->table
->md
), bdevname(bdev
, b
));
363 if (logical_block_size_sectors
<= 1)
366 if (start
& (logical_block_size_sectors
- 1)) {
367 DMWARN("%s: start=%llu not aligned to h/w "
368 "logical block size %hu of %s",
369 dm_device_name(ti
->table
->md
),
370 (unsigned long long)start
,
371 limits
->logical_block_size
, bdevname(bdev
, b
));
375 if (ti
->len
& (logical_block_size_sectors
- 1)) {
376 DMWARN("%s: len=%llu not aligned to h/w "
377 "logical block size %hu of %s",
378 dm_device_name(ti
->table
->md
),
379 (unsigned long long)ti
->len
,
380 limits
->logical_block_size
, bdevname(bdev
, b
));
388 * This upgrades the mode on an already open dm_dev, being
389 * careful to leave things as they were if we fail to reopen the
390 * device and not to touch the existing bdev field in case
391 * it is accessed concurrently inside dm_table_any_congested().
393 static int upgrade_mode(struct dm_dev_internal
*dd
, fmode_t new_mode
,
394 struct mapped_device
*md
)
397 struct dm_dev_internal dd_new
, dd_old
;
399 dd_new
= dd_old
= *dd
;
401 dd_new
.dm_dev
.mode
|= new_mode
;
402 dd_new
.dm_dev
.bdev
= NULL
;
404 r
= open_dev(&dd_new
, dd
->dm_dev
.bdev
->bd_dev
, md
);
408 dd
->dm_dev
.mode
|= new_mode
;
409 close_dev(&dd_old
, md
);
415 * Add a device to the list, or just increment the usage count if
416 * it's already present.
418 static int __table_get_device(struct dm_table
*t
, struct dm_target
*ti
,
419 const char *path
, sector_t start
, sector_t len
,
420 fmode_t mode
, struct dm_dev
**result
)
423 dev_t
uninitialized_var(dev
);
424 struct dm_dev_internal
*dd
;
425 unsigned int major
, minor
;
429 if (sscanf(path
, "%u:%u", &major
, &minor
) == 2) {
430 /* Extract the major/minor numbers */
431 dev
= MKDEV(major
, minor
);
432 if (MAJOR(dev
) != major
|| MINOR(dev
) != minor
)
435 /* convert the path to a device */
436 struct block_device
*bdev
= lookup_bdev(path
);
439 return PTR_ERR(bdev
);
444 dd
= find_device(&t
->devices
, dev
);
446 dd
= kmalloc(sizeof(*dd
), GFP_KERNEL
);
450 dd
->dm_dev
.mode
= mode
;
451 dd
->dm_dev
.bdev
= NULL
;
453 if ((r
= open_dev(dd
, dev
, t
->md
))) {
458 format_dev_t(dd
->dm_dev
.name
, dev
);
460 atomic_set(&dd
->count
, 0);
461 list_add(&dd
->list
, &t
->devices
);
463 } else if (dd
->dm_dev
.mode
!= (mode
| dd
->dm_dev
.mode
)) {
464 r
= upgrade_mode(dd
, mode
, t
->md
);
468 atomic_inc(&dd
->count
);
470 *result
= &dd
->dm_dev
;
475 * Returns the minimum that is _not_ zero, unless both are zero.
477 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
479 int dm_set_device_limits(struct dm_target
*ti
, struct dm_dev
*dev
,
480 sector_t start
, void *data
)
482 struct queue_limits
*limits
= data
;
483 struct block_device
*bdev
= dev
->bdev
;
484 struct request_queue
*q
= bdev_get_queue(bdev
);
485 char b
[BDEVNAME_SIZE
];
488 DMWARN("%s: Cannot set limits for nonexistent device %s",
489 dm_device_name(ti
->table
->md
), bdevname(bdev
, b
));
493 if (blk_stack_limits(limits
, &q
->limits
, start
) < 0)
494 DMWARN("%s: target device %s is misaligned",
495 dm_device_name(ti
->table
->md
), bdevname(bdev
, b
));
498 * Check if merge fn is supported.
499 * If not we'll force DM to use PAGE_SIZE or
500 * smaller I/O, just to be safe.
503 if (q
->merge_bvec_fn
&& !ti
->type
->merge
)
504 limits
->max_sectors
=
505 min_not_zero(limits
->max_sectors
,
506 (unsigned int) (PAGE_SIZE
>> 9));
509 EXPORT_SYMBOL_GPL(dm_set_device_limits
);
511 int dm_get_device(struct dm_target
*ti
, const char *path
, sector_t start
,
512 sector_t len
, fmode_t mode
, struct dm_dev
**result
)
514 return __table_get_device(ti
->table
, ti
, path
,
515 start
, len
, mode
, result
);
520 * Decrement a devices use count and remove it if necessary.
522 void dm_put_device(struct dm_target
*ti
, struct dm_dev
*d
)
524 struct dm_dev_internal
*dd
= container_of(d
, struct dm_dev_internal
,
527 if (atomic_dec_and_test(&dd
->count
)) {
528 close_dev(dd
, ti
->table
->md
);
535 * Checks to see if the target joins onto the end of the table.
537 static int adjoin(struct dm_table
*table
, struct dm_target
*ti
)
539 struct dm_target
*prev
;
541 if (!table
->num_targets
)
544 prev
= &table
->targets
[table
->num_targets
- 1];
545 return (ti
->begin
== (prev
->begin
+ prev
->len
));
549 * Used to dynamically allocate the arg array.
551 static char **realloc_argv(unsigned *array_size
, char **old_argv
)
556 new_size
= *array_size
? *array_size
* 2 : 64;
557 argv
= kmalloc(new_size
* sizeof(*argv
), GFP_KERNEL
);
559 memcpy(argv
, old_argv
, *array_size
* sizeof(*argv
));
560 *array_size
= new_size
;
568 * Destructively splits up the argument list to pass to ctr.
570 int dm_split_args(int *argc
, char ***argvp
, char *input
)
572 char *start
, *end
= input
, *out
, **argv
= NULL
;
573 unsigned array_size
= 0;
582 argv
= realloc_argv(&array_size
, argv
);
589 /* Skip whitespace */
590 while (*start
&& isspace(*start
))
594 break; /* success, we hit the end */
596 /* 'out' is used to remove any back-quotes */
599 /* Everything apart from '\0' can be quoted */
600 if (*end
== '\\' && *(end
+ 1)) {
607 break; /* end of token */
612 /* have we already filled the array ? */
613 if ((*argc
+ 1) > array_size
) {
614 argv
= realloc_argv(&array_size
, argv
);
619 /* we know this is whitespace */
623 /* terminate the string and put it in the array */
634 * Impose necessary and sufficient conditions on a devices's table such
635 * that any incoming bio which respects its logical_block_size can be
636 * processed successfully. If it falls across the boundary between
637 * two or more targets, the size of each piece it gets split into must
638 * be compatible with the logical_block_size of the target processing it.
640 static int validate_hardware_logical_block_alignment(struct dm_table
*table
,
641 struct queue_limits
*limits
)
644 * This function uses arithmetic modulo the logical_block_size
645 * (in units of 512-byte sectors).
647 unsigned short device_logical_block_size_sects
=
648 limits
->logical_block_size
>> SECTOR_SHIFT
;
651 * Offset of the start of the next table entry, mod logical_block_size.
653 unsigned short next_target_start
= 0;
656 * Given an aligned bio that extends beyond the end of a
657 * target, how many sectors must the next target handle?
659 unsigned short remaining
= 0;
661 struct dm_target
*uninitialized_var(ti
);
662 struct queue_limits ti_limits
;
666 * Check each entry in the table in turn.
668 while (i
< dm_table_get_num_targets(table
)) {
669 ti
= dm_table_get_target(table
, i
++);
671 blk_set_default_limits(&ti_limits
);
673 /* combine all target devices' limits */
674 if (ti
->type
->iterate_devices
)
675 ti
->type
->iterate_devices(ti
, dm_set_device_limits
,
679 * If the remaining sectors fall entirely within this
680 * table entry are they compatible with its logical_block_size?
682 if (remaining
< ti
->len
&&
683 remaining
& ((ti_limits
.logical_block_size
>>
688 (unsigned short) ((next_target_start
+ ti
->len
) &
689 (device_logical_block_size_sects
- 1));
690 remaining
= next_target_start
?
691 device_logical_block_size_sects
- next_target_start
: 0;
695 DMWARN("%s: table line %u (start sect %llu len %llu) "
696 "not aligned to h/w logical block size %hu",
697 dm_device_name(table
->md
), i
,
698 (unsigned long long) ti
->begin
,
699 (unsigned long long) ti
->len
,
700 limits
->logical_block_size
);
707 int dm_table_add_target(struct dm_table
*t
, const char *type
,
708 sector_t start
, sector_t len
, char *params
)
710 int r
= -EINVAL
, argc
;
712 struct dm_target
*tgt
;
714 if ((r
= check_space(t
)))
717 tgt
= t
->targets
+ t
->num_targets
;
718 memset(tgt
, 0, sizeof(*tgt
));
721 DMERR("%s: zero-length target", dm_device_name(t
->md
));
725 tgt
->type
= dm_get_target_type(type
);
727 DMERR("%s: %s: unknown target type", dm_device_name(t
->md
),
735 tgt
->error
= "Unknown error";
738 * Does this target adjoin the previous one ?
740 if (!adjoin(t
, tgt
)) {
741 tgt
->error
= "Gap in table";
746 r
= dm_split_args(&argc
, &argv
, params
);
748 tgt
->error
= "couldn't split parameters (insufficient memory)";
752 r
= tgt
->type
->ctr(tgt
, argc
, argv
);
757 t
->highs
[t
->num_targets
++] = tgt
->begin
+ tgt
->len
- 1;
762 DMERR("%s: %s: %s", dm_device_name(t
->md
), type
, tgt
->error
);
763 dm_put_target_type(tgt
->type
);
767 static int setup_indexes(struct dm_table
*t
)
770 unsigned int total
= 0;
773 /* allocate the space for *all* the indexes */
774 for (i
= t
->depth
- 2; i
>= 0; i
--) {
775 t
->counts
[i
] = dm_div_up(t
->counts
[i
+ 1], CHILDREN_PER_NODE
);
776 total
+= t
->counts
[i
];
779 indexes
= (sector_t
*) dm_vcalloc(total
, (unsigned long) NODE_SIZE
);
783 /* set up internal nodes, bottom-up */
784 for (i
= t
->depth
- 2; i
>= 0; i
--) {
785 t
->index
[i
] = indexes
;
786 indexes
+= (KEYS_PER_NODE
* t
->counts
[i
]);
787 setup_btree_index(i
, t
);
794 * Builds the btree to index the map.
796 int dm_table_complete(struct dm_table
*t
)
799 unsigned int leaf_nodes
;
801 /* how many indexes will the btree have ? */
802 leaf_nodes
= dm_div_up(t
->num_targets
, KEYS_PER_NODE
);
803 t
->depth
= 1 + int_log(leaf_nodes
, CHILDREN_PER_NODE
);
805 /* leaf layer has already been set up */
806 t
->counts
[t
->depth
- 1] = leaf_nodes
;
807 t
->index
[t
->depth
- 1] = t
->highs
;
810 r
= setup_indexes(t
);
815 static DEFINE_MUTEX(_event_lock
);
816 void dm_table_event_callback(struct dm_table
*t
,
817 void (*fn
)(void *), void *context
)
819 mutex_lock(&_event_lock
);
821 t
->event_context
= context
;
822 mutex_unlock(&_event_lock
);
825 void dm_table_event(struct dm_table
*t
)
828 * You can no longer call dm_table_event() from interrupt
829 * context, use a bottom half instead.
831 BUG_ON(in_interrupt());
833 mutex_lock(&_event_lock
);
835 t
->event_fn(t
->event_context
);
836 mutex_unlock(&_event_lock
);
839 sector_t
dm_table_get_size(struct dm_table
*t
)
841 return t
->num_targets
? (t
->highs
[t
->num_targets
- 1] + 1) : 0;
844 struct dm_target
*dm_table_get_target(struct dm_table
*t
, unsigned int index
)
846 if (index
>= t
->num_targets
)
849 return t
->targets
+ index
;
853 * Search the btree for the correct target.
855 * Caller should check returned pointer with dm_target_is_valid()
856 * to trap I/O beyond end of device.
858 struct dm_target
*dm_table_find_target(struct dm_table
*t
, sector_t sector
)
860 unsigned int l
, n
= 0, k
= 0;
863 for (l
= 0; l
< t
->depth
; l
++) {
865 node
= get_node(t
, l
, n
);
867 for (k
= 0; k
< KEYS_PER_NODE
; k
++)
868 if (node
[k
] >= sector
)
872 return &t
->targets
[(KEYS_PER_NODE
* n
) + k
];
876 * Establish the new table's queue_limits and validate them.
878 int dm_calculate_queue_limits(struct dm_table
*table
,
879 struct queue_limits
*limits
)
881 struct dm_target
*uninitialized_var(ti
);
882 struct queue_limits ti_limits
;
885 blk_set_default_limits(limits
);
887 while (i
< dm_table_get_num_targets(table
)) {
888 blk_set_default_limits(&ti_limits
);
890 ti
= dm_table_get_target(table
, i
++);
892 if (!ti
->type
->iterate_devices
)
896 * Combine queue limits of all the devices this target uses.
898 ti
->type
->iterate_devices(ti
, dm_set_device_limits
,
902 * Check each device area is consistent with the target's
903 * overall queue limits.
905 if (!ti
->type
->iterate_devices(ti
, device_area_is_valid
,
911 * Merge this target's queue limits into the overall limits
914 if (blk_stack_limits(limits
, &ti_limits
, 0) < 0)
915 DMWARN("%s: target device "
916 "(start sect %llu len %llu) "
918 dm_device_name(table
->md
),
919 (unsigned long long) ti
->begin
,
920 (unsigned long long) ti
->len
);
923 return validate_hardware_logical_block_alignment(table
, limits
);
927 * Set the integrity profile for this device if all devices used have
930 static void dm_table_set_integrity(struct dm_table
*t
)
932 struct list_head
*devices
= dm_table_get_devices(t
);
933 struct dm_dev_internal
*prev
= NULL
, *dd
= NULL
;
935 if (!blk_get_integrity(dm_disk(t
->md
)))
938 list_for_each_entry(dd
, devices
, list
) {
940 blk_integrity_compare(prev
->dm_dev
.bdev
->bd_disk
,
941 dd
->dm_dev
.bdev
->bd_disk
) < 0) {
942 DMWARN("%s: integrity not set: %s and %s mismatch",
943 dm_device_name(t
->md
),
944 prev
->dm_dev
.bdev
->bd_disk
->disk_name
,
945 dd
->dm_dev
.bdev
->bd_disk
->disk_name
);
951 if (!prev
|| !bdev_get_integrity(prev
->dm_dev
.bdev
))
954 blk_integrity_register(dm_disk(t
->md
),
955 bdev_get_integrity(prev
->dm_dev
.bdev
));
960 blk_integrity_register(dm_disk(t
->md
), NULL
);
965 void dm_table_set_restrictions(struct dm_table
*t
, struct request_queue
*q
,
966 struct queue_limits
*limits
)
969 * Each target device in the table has a data area that should normally
970 * be aligned such that the DM device's alignment_offset is 0.
971 * FIXME: Propagate alignment_offsets up the stack and warn of
972 * sub-optimal or inconsistent settings.
974 limits
->alignment_offset
= 0;
975 limits
->misaligned
= 0;
978 * Copy table's limits to the DM device's request_queue
982 if (limits
->no_cluster
)
983 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER
, q
);
985 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER
, q
);
987 dm_table_set_integrity(t
);
990 unsigned int dm_table_get_num_targets(struct dm_table
*t
)
992 return t
->num_targets
;
995 struct list_head
*dm_table_get_devices(struct dm_table
*t
)
1000 fmode_t
dm_table_get_mode(struct dm_table
*t
)
1005 static void suspend_targets(struct dm_table
*t
, unsigned postsuspend
)
1007 int i
= t
->num_targets
;
1008 struct dm_target
*ti
= t
->targets
;
1012 if (ti
->type
->postsuspend
)
1013 ti
->type
->postsuspend(ti
);
1014 } else if (ti
->type
->presuspend
)
1015 ti
->type
->presuspend(ti
);
1021 void dm_table_presuspend_targets(struct dm_table
*t
)
1026 suspend_targets(t
, 0);
1029 void dm_table_postsuspend_targets(struct dm_table
*t
)
1034 suspend_targets(t
, 1);
1037 int dm_table_resume_targets(struct dm_table
*t
)
1041 for (i
= 0; i
< t
->num_targets
; i
++) {
1042 struct dm_target
*ti
= t
->targets
+ i
;
1044 if (!ti
->type
->preresume
)
1047 r
= ti
->type
->preresume(ti
);
1052 for (i
= 0; i
< t
->num_targets
; i
++) {
1053 struct dm_target
*ti
= t
->targets
+ i
;
1055 if (ti
->type
->resume
)
1056 ti
->type
->resume(ti
);
1062 int dm_table_any_congested(struct dm_table
*t
, int bdi_bits
)
1064 struct dm_dev_internal
*dd
;
1065 struct list_head
*devices
= dm_table_get_devices(t
);
1068 list_for_each_entry(dd
, devices
, list
) {
1069 struct request_queue
*q
= bdev_get_queue(dd
->dm_dev
.bdev
);
1070 char b
[BDEVNAME_SIZE
];
1073 r
|= bdi_congested(&q
->backing_dev_info
, bdi_bits
);
1075 DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
1076 dm_device_name(t
->md
),
1077 bdevname(dd
->dm_dev
.bdev
, b
));
1083 void dm_table_unplug_all(struct dm_table
*t
)
1085 struct dm_dev_internal
*dd
;
1086 struct list_head
*devices
= dm_table_get_devices(t
);
1088 list_for_each_entry(dd
, devices
, list
) {
1089 struct request_queue
*q
= bdev_get_queue(dd
->dm_dev
.bdev
);
1090 char b
[BDEVNAME_SIZE
];
1095 DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s",
1096 dm_device_name(t
->md
),
1097 bdevname(dd
->dm_dev
.bdev
, b
));
1101 struct mapped_device
*dm_table_get_md(struct dm_table
*t
)
1108 EXPORT_SYMBOL(dm_vcalloc
);
1109 EXPORT_SYMBOL(dm_get_device
);
1110 EXPORT_SYMBOL(dm_put_device
);
1111 EXPORT_SYMBOL(dm_table_event
);
1112 EXPORT_SYMBOL(dm_table_get_size
);
1113 EXPORT_SYMBOL(dm_table_get_mode
);
1114 EXPORT_SYMBOL(dm_table_get_md
);
1115 EXPORT_SYMBOL(dm_table_put
);
1116 EXPORT_SYMBOL(dm_table_get
);
1117 EXPORT_SYMBOL(dm_table_unplug_all
);