2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/blkdev.h>
13 #include <linux/namei.h>
14 #include <linux/ctype.h>
15 #include <linux/slab.h>
16 #include <linux/interrupt.h>
17 #include <linux/mutex.h>
18 #include <linux/delay.h>
19 #include <asm/atomic.h>
21 #define DM_MSG_PREFIX "table"
24 #define NODE_SIZE L1_CACHE_BYTES
25 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
26 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
29 * The table has always exactly one reference from either mapped_device->map
30 * or hash_cell->new_map. This reference is not counted in table->holders.
31 * A pair of dm_create_table/dm_destroy_table functions is used for table
32 * creation/destruction.
34 * Temporary references from the other code increase table->holders. A pair
35 * of dm_table_get/dm_table_put functions is used to manipulate it.
37 * When the table is about to be destroyed, we wait for table->holders to
42 struct mapped_device
*md
;
47 unsigned int counts
[MAX_DEPTH
]; /* in nodes */
48 sector_t
*index
[MAX_DEPTH
];
50 unsigned int num_targets
;
51 unsigned int num_allocated
;
53 struct dm_target
*targets
;
56 * Indicates the rw permissions for the new logical
57 * device. This should be a combination of FMODE_READ
62 /* a list of devices used by this table */
63 struct list_head devices
;
66 * These are optimistic limits taken from all the
67 * targets, some targets will need smaller limits.
69 struct io_restrictions limits
;
71 /* events get handed up using this callback */
72 void (*event_fn
)(void *);
77 * Similar to ceiling(log_size(n))
79 static unsigned int int_log(unsigned int n
, unsigned int base
)
84 n
= dm_div_up(n
, base
);
92 * Returns the minimum that is _not_ zero, unless both are zero.
94 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
97 * Combine two io_restrictions, always taking the lower value.
99 static void combine_restrictions_low(struct io_restrictions
*lhs
,
100 struct io_restrictions
*rhs
)
103 min_not_zero(lhs
->max_sectors
, rhs
->max_sectors
);
105 lhs
->max_phys_segments
=
106 min_not_zero(lhs
->max_phys_segments
, rhs
->max_phys_segments
);
108 lhs
->max_hw_segments
=
109 min_not_zero(lhs
->max_hw_segments
, rhs
->max_hw_segments
);
111 lhs
->hardsect_size
= max(lhs
->hardsect_size
, rhs
->hardsect_size
);
113 lhs
->max_segment_size
=
114 min_not_zero(lhs
->max_segment_size
, rhs
->max_segment_size
);
116 lhs
->max_hw_sectors
=
117 min_not_zero(lhs
->max_hw_sectors
, rhs
->max_hw_sectors
);
119 lhs
->seg_boundary_mask
=
120 min_not_zero(lhs
->seg_boundary_mask
, rhs
->seg_boundary_mask
);
122 lhs
->bounce_pfn
= min_not_zero(lhs
->bounce_pfn
, rhs
->bounce_pfn
);
124 lhs
->no_cluster
|= rhs
->no_cluster
;
128 * Calculate the index of the child node of the n'th node k'th key.
130 static inline unsigned int get_child(unsigned int n
, unsigned int k
)
132 return (n
* CHILDREN_PER_NODE
) + k
;
136 * Return the n'th node of level l from table t.
138 static inline sector_t
*get_node(struct dm_table
*t
,
139 unsigned int l
, unsigned int n
)
141 return t
->index
[l
] + (n
* KEYS_PER_NODE
);
145 * Return the highest key that you could lookup from the n'th
146 * node on level l of the btree.
148 static sector_t
high(struct dm_table
*t
, unsigned int l
, unsigned int n
)
150 for (; l
< t
->depth
- 1; l
++)
151 n
= get_child(n
, CHILDREN_PER_NODE
- 1);
153 if (n
>= t
->counts
[l
])
154 return (sector_t
) - 1;
156 return get_node(t
, l
, n
)[KEYS_PER_NODE
- 1];
160 * Fills in a level of the btree based on the highs of the level
163 static int setup_btree_index(unsigned int l
, struct dm_table
*t
)
168 for (n
= 0U; n
< t
->counts
[l
]; n
++) {
169 node
= get_node(t
, l
, n
);
171 for (k
= 0U; k
< KEYS_PER_NODE
; k
++)
172 node
[k
] = high(t
, l
+ 1, get_child(n
, k
));
178 void *dm_vcalloc(unsigned long nmemb
, unsigned long elem_size
)
184 * Check that we're not going to overflow.
186 if (nmemb
> (ULONG_MAX
/ elem_size
))
189 size
= nmemb
* elem_size
;
190 addr
= vmalloc(size
);
192 memset(addr
, 0, size
);
198 * highs, and targets are managed as dynamic arrays during a
201 static int alloc_targets(struct dm_table
*t
, unsigned int num
)
204 struct dm_target
*n_targets
;
205 int n
= t
->num_targets
;
208 * Allocate both the target array and offset array at once.
209 * Append an empty entry to catch sectors beyond the end of
212 n_highs
= (sector_t
*) dm_vcalloc(num
+ 1, sizeof(struct dm_target
) +
217 n_targets
= (struct dm_target
*) (n_highs
+ num
);
220 memcpy(n_highs
, t
->highs
, sizeof(*n_highs
) * n
);
221 memcpy(n_targets
, t
->targets
, sizeof(*n_targets
) * n
);
224 memset(n_highs
+ n
, -1, sizeof(*n_highs
) * (num
- n
));
227 t
->num_allocated
= num
;
229 t
->targets
= n_targets
;
234 int dm_table_create(struct dm_table
**result
, fmode_t mode
,
235 unsigned num_targets
, struct mapped_device
*md
)
237 struct dm_table
*t
= kzalloc(sizeof(*t
), GFP_KERNEL
);
242 INIT_LIST_HEAD(&t
->devices
);
243 atomic_set(&t
->holders
, 0);
246 num_targets
= KEYS_PER_NODE
;
248 num_targets
= dm_round_up(num_targets
, KEYS_PER_NODE
);
250 if (alloc_targets(t
, num_targets
)) {
262 static void free_devices(struct list_head
*devices
)
264 struct list_head
*tmp
, *next
;
266 list_for_each_safe(tmp
, next
, devices
) {
267 struct dm_dev_internal
*dd
=
268 list_entry(tmp
, struct dm_dev_internal
, list
);
273 void dm_table_destroy(struct dm_table
*t
)
277 while (atomic_read(&t
->holders
))
281 /* free the indexes (see dm_table_complete) */
283 vfree(t
->index
[t
->depth
- 2]);
285 /* free the targets */
286 for (i
= 0; i
< t
->num_targets
; i
++) {
287 struct dm_target
*tgt
= t
->targets
+ i
;
292 dm_put_target_type(tgt
->type
);
297 /* free the device list */
298 if (t
->devices
.next
!= &t
->devices
) {
299 DMWARN("devices still present during destroy: "
300 "dm_table_remove_device calls missing");
302 free_devices(&t
->devices
);
308 void dm_table_get(struct dm_table
*t
)
310 atomic_inc(&t
->holders
);
313 void dm_table_put(struct dm_table
*t
)
318 smp_mb__before_atomic_dec();
319 atomic_dec(&t
->holders
);
323 * Checks to see if we need to extend highs or targets.
325 static inline int check_space(struct dm_table
*t
)
327 if (t
->num_targets
>= t
->num_allocated
)
328 return alloc_targets(t
, t
->num_allocated
* 2);
334 * See if we've already got a device in the list.
336 static struct dm_dev_internal
*find_device(struct list_head
*l
, dev_t dev
)
338 struct dm_dev_internal
*dd
;
340 list_for_each_entry (dd
, l
, list
)
341 if (dd
->dm_dev
.bdev
->bd_dev
== dev
)
348 * Open a device so we can use it as a map destination.
350 static int open_dev(struct dm_dev_internal
*d
, dev_t dev
,
351 struct mapped_device
*md
)
353 static char *_claim_ptr
= "I belong to device-mapper";
354 struct block_device
*bdev
;
358 BUG_ON(d
->dm_dev
.bdev
);
360 bdev
= open_by_devnum(dev
, d
->dm_dev
.mode
);
362 return PTR_ERR(bdev
);
363 r
= bd_claim_by_disk(bdev
, _claim_ptr
, dm_disk(md
));
365 blkdev_put(bdev
, d
->dm_dev
.mode
);
367 d
->dm_dev
.bdev
= bdev
;
372 * Close a device that we've been using.
374 static void close_dev(struct dm_dev_internal
*d
, struct mapped_device
*md
)
379 bd_release_from_disk(d
->dm_dev
.bdev
, dm_disk(md
));
380 blkdev_put(d
->dm_dev
.bdev
, d
->dm_dev
.mode
);
381 d
->dm_dev
.bdev
= NULL
;
385 * If possible, this checks an area of a destination device is valid.
387 static int check_device_area(struct dm_dev_internal
*dd
, sector_t start
,
390 sector_t dev_size
= dd
->dm_dev
.bdev
->bd_inode
->i_size
>> SECTOR_SHIFT
;
395 return ((start
< dev_size
) && (len
<= (dev_size
- start
)));
399 * This upgrades the mode on an already open dm_dev, being
400 * careful to leave things as they were if we fail to reopen the
401 * device and not to touch the existing bdev field in case
402 * it is accessed concurrently inside dm_table_any_congested().
404 static int upgrade_mode(struct dm_dev_internal
*dd
, fmode_t new_mode
,
405 struct mapped_device
*md
)
408 struct dm_dev_internal dd_new
, dd_old
;
410 dd_new
= dd_old
= *dd
;
412 dd_new
.dm_dev
.mode
|= new_mode
;
413 dd_new
.dm_dev
.bdev
= NULL
;
415 r
= open_dev(&dd_new
, dd
->dm_dev
.bdev
->bd_dev
, md
);
419 dd
->dm_dev
.mode
|= new_mode
;
420 close_dev(&dd_old
, md
);
426 * Add a device to the list, or just increment the usage count if
427 * it's already present.
429 static int __table_get_device(struct dm_table
*t
, struct dm_target
*ti
,
430 const char *path
, sector_t start
, sector_t len
,
431 fmode_t mode
, struct dm_dev
**result
)
434 dev_t
uninitialized_var(dev
);
435 struct dm_dev_internal
*dd
;
436 unsigned int major
, minor
;
440 if (sscanf(path
, "%u:%u", &major
, &minor
) == 2) {
441 /* Extract the major/minor numbers */
442 dev
= MKDEV(major
, minor
);
443 if (MAJOR(dev
) != major
|| MINOR(dev
) != minor
)
446 /* convert the path to a device */
447 struct block_device
*bdev
= lookup_bdev(path
);
450 return PTR_ERR(bdev
);
455 dd
= find_device(&t
->devices
, dev
);
457 dd
= kmalloc(sizeof(*dd
), GFP_KERNEL
);
461 dd
->dm_dev
.mode
= mode
;
462 dd
->dm_dev
.bdev
= NULL
;
464 if ((r
= open_dev(dd
, dev
, t
->md
))) {
469 format_dev_t(dd
->dm_dev
.name
, dev
);
471 atomic_set(&dd
->count
, 0);
472 list_add(&dd
->list
, &t
->devices
);
474 } else if (dd
->dm_dev
.mode
!= (mode
| dd
->dm_dev
.mode
)) {
475 r
= upgrade_mode(dd
, mode
, t
->md
);
479 atomic_inc(&dd
->count
);
481 if (!check_device_area(dd
, start
, len
)) {
482 DMWARN("device %s too small for target", path
);
483 dm_put_device(ti
, &dd
->dm_dev
);
487 *result
= &dd
->dm_dev
;
492 void dm_set_device_limits(struct dm_target
*ti
, struct block_device
*bdev
)
494 struct request_queue
*q
= bdev_get_queue(bdev
);
495 struct io_restrictions
*rs
= &ti
->limits
;
496 char b
[BDEVNAME_SIZE
];
499 DMWARN("%s: Cannot set limits for nonexistent device %s",
500 dm_device_name(ti
->table
->md
), bdevname(bdev
, b
));
505 * Combine the device limits low.
507 * FIXME: if we move an io_restriction struct
508 * into q this would just be a call to
509 * combine_restrictions_low()
512 min_not_zero(rs
->max_sectors
, q
->max_sectors
);
515 * Check if merge fn is supported.
516 * If not we'll force DM to use PAGE_SIZE or
517 * smaller I/O, just to be safe.
520 if (q
->merge_bvec_fn
&& !ti
->type
->merge
)
522 min_not_zero(rs
->max_sectors
,
523 (unsigned int) (PAGE_SIZE
>> 9));
525 rs
->max_phys_segments
=
526 min_not_zero(rs
->max_phys_segments
,
527 q
->max_phys_segments
);
529 rs
->max_hw_segments
=
530 min_not_zero(rs
->max_hw_segments
, q
->max_hw_segments
);
532 rs
->hardsect_size
= max(rs
->hardsect_size
, q
->hardsect_size
);
534 rs
->max_segment_size
=
535 min_not_zero(rs
->max_segment_size
, q
->max_segment_size
);
538 min_not_zero(rs
->max_hw_sectors
, q
->max_hw_sectors
);
540 rs
->seg_boundary_mask
=
541 min_not_zero(rs
->seg_boundary_mask
,
542 q
->seg_boundary_mask
);
544 rs
->bounce_pfn
= min_not_zero(rs
->bounce_pfn
, q
->bounce_pfn
);
546 rs
->no_cluster
|= !test_bit(QUEUE_FLAG_CLUSTER
, &q
->queue_flags
);
548 EXPORT_SYMBOL_GPL(dm_set_device_limits
);
550 int dm_get_device(struct dm_target
*ti
, const char *path
, sector_t start
,
551 sector_t len
, fmode_t mode
, struct dm_dev
**result
)
553 int r
= __table_get_device(ti
->table
, ti
, path
,
554 start
, len
, mode
, result
);
557 dm_set_device_limits(ti
, (*result
)->bdev
);
563 * Decrement a devices use count and remove it if necessary.
565 void dm_put_device(struct dm_target
*ti
, struct dm_dev
*d
)
567 struct dm_dev_internal
*dd
= container_of(d
, struct dm_dev_internal
,
570 if (atomic_dec_and_test(&dd
->count
)) {
571 close_dev(dd
, ti
->table
->md
);
578 * Checks to see if the target joins onto the end of the table.
580 static int adjoin(struct dm_table
*table
, struct dm_target
*ti
)
582 struct dm_target
*prev
;
584 if (!table
->num_targets
)
587 prev
= &table
->targets
[table
->num_targets
- 1];
588 return (ti
->begin
== (prev
->begin
+ prev
->len
));
592 * Used to dynamically allocate the arg array.
594 static char **realloc_argv(unsigned *array_size
, char **old_argv
)
599 new_size
= *array_size
? *array_size
* 2 : 64;
600 argv
= kmalloc(new_size
* sizeof(*argv
), GFP_KERNEL
);
602 memcpy(argv
, old_argv
, *array_size
* sizeof(*argv
));
603 *array_size
= new_size
;
611 * Destructively splits up the argument list to pass to ctr.
613 int dm_split_args(int *argc
, char ***argvp
, char *input
)
615 char *start
, *end
= input
, *out
, **argv
= NULL
;
616 unsigned array_size
= 0;
625 argv
= realloc_argv(&array_size
, argv
);
632 /* Skip whitespace */
633 while (*start
&& isspace(*start
))
637 break; /* success, we hit the end */
639 /* 'out' is used to remove any back-quotes */
642 /* Everything apart from '\0' can be quoted */
643 if (*end
== '\\' && *(end
+ 1)) {
650 break; /* end of token */
655 /* have we already filled the array ? */
656 if ((*argc
+ 1) > array_size
) {
657 argv
= realloc_argv(&array_size
, argv
);
662 /* we know this is whitespace */
666 /* terminate the string and put it in the array */
676 static void check_for_valid_limits(struct io_restrictions
*rs
)
678 if (!rs
->max_sectors
)
679 rs
->max_sectors
= SAFE_MAX_SECTORS
;
680 if (!rs
->max_hw_sectors
)
681 rs
->max_hw_sectors
= SAFE_MAX_SECTORS
;
682 if (!rs
->max_phys_segments
)
683 rs
->max_phys_segments
= MAX_PHYS_SEGMENTS
;
684 if (!rs
->max_hw_segments
)
685 rs
->max_hw_segments
= MAX_HW_SEGMENTS
;
686 if (!rs
->hardsect_size
)
687 rs
->hardsect_size
= 1 << SECTOR_SHIFT
;
688 if (!rs
->max_segment_size
)
689 rs
->max_segment_size
= MAX_SEGMENT_SIZE
;
690 if (!rs
->seg_boundary_mask
)
691 rs
->seg_boundary_mask
= BLK_SEG_BOUNDARY_MASK
;
696 int dm_table_add_target(struct dm_table
*t
, const char *type
,
697 sector_t start
, sector_t len
, char *params
)
699 int r
= -EINVAL
, argc
;
701 struct dm_target
*tgt
;
703 if ((r
= check_space(t
)))
706 tgt
= t
->targets
+ t
->num_targets
;
707 memset(tgt
, 0, sizeof(*tgt
));
710 DMERR("%s: zero-length target", dm_device_name(t
->md
));
714 tgt
->type
= dm_get_target_type(type
);
716 DMERR("%s: %s: unknown target type", dm_device_name(t
->md
),
724 tgt
->error
= "Unknown error";
727 * Does this target adjoin the previous one ?
729 if (!adjoin(t
, tgt
)) {
730 tgt
->error
= "Gap in table";
735 r
= dm_split_args(&argc
, &argv
, params
);
737 tgt
->error
= "couldn't split parameters (insufficient memory)";
741 r
= tgt
->type
->ctr(tgt
, argc
, argv
);
746 t
->highs
[t
->num_targets
++] = tgt
->begin
+ tgt
->len
- 1;
748 /* FIXME: the plan is to combine high here and then have
749 * the merge fn apply the target level restrictions. */
750 combine_restrictions_low(&t
->limits
, &tgt
->limits
);
754 DMERR("%s: %s: %s", dm_device_name(t
->md
), type
, tgt
->error
);
755 dm_put_target_type(tgt
->type
);
759 static int setup_indexes(struct dm_table
*t
)
762 unsigned int total
= 0;
765 /* allocate the space for *all* the indexes */
766 for (i
= t
->depth
- 2; i
>= 0; i
--) {
767 t
->counts
[i
] = dm_div_up(t
->counts
[i
+ 1], CHILDREN_PER_NODE
);
768 total
+= t
->counts
[i
];
771 indexes
= (sector_t
*) dm_vcalloc(total
, (unsigned long) NODE_SIZE
);
775 /* set up internal nodes, bottom-up */
776 for (i
= t
->depth
- 2; i
>= 0; i
--) {
777 t
->index
[i
] = indexes
;
778 indexes
+= (KEYS_PER_NODE
* t
->counts
[i
]);
779 setup_btree_index(i
, t
);
786 * Builds the btree to index the map.
788 int dm_table_complete(struct dm_table
*t
)
791 unsigned int leaf_nodes
;
793 check_for_valid_limits(&t
->limits
);
795 /* how many indexes will the btree have ? */
796 leaf_nodes
= dm_div_up(t
->num_targets
, KEYS_PER_NODE
);
797 t
->depth
= 1 + int_log(leaf_nodes
, CHILDREN_PER_NODE
);
799 /* leaf layer has already been set up */
800 t
->counts
[t
->depth
- 1] = leaf_nodes
;
801 t
->index
[t
->depth
- 1] = t
->highs
;
804 r
= setup_indexes(t
);
809 static DEFINE_MUTEX(_event_lock
);
810 void dm_table_event_callback(struct dm_table
*t
,
811 void (*fn
)(void *), void *context
)
813 mutex_lock(&_event_lock
);
815 t
->event_context
= context
;
816 mutex_unlock(&_event_lock
);
819 void dm_table_event(struct dm_table
*t
)
822 * You can no longer call dm_table_event() from interrupt
823 * context, use a bottom half instead.
825 BUG_ON(in_interrupt());
827 mutex_lock(&_event_lock
);
829 t
->event_fn(t
->event_context
);
830 mutex_unlock(&_event_lock
);
833 sector_t
dm_table_get_size(struct dm_table
*t
)
835 return t
->num_targets
? (t
->highs
[t
->num_targets
- 1] + 1) : 0;
838 struct dm_target
*dm_table_get_target(struct dm_table
*t
, unsigned int index
)
840 if (index
>= t
->num_targets
)
843 return t
->targets
+ index
;
847 * Search the btree for the correct target.
849 * Caller should check returned pointer with dm_target_is_valid()
850 * to trap I/O beyond end of device.
852 struct dm_target
*dm_table_find_target(struct dm_table
*t
, sector_t sector
)
854 unsigned int l
, n
= 0, k
= 0;
857 for (l
= 0; l
< t
->depth
; l
++) {
859 node
= get_node(t
, l
, n
);
861 for (k
= 0; k
< KEYS_PER_NODE
; k
++)
862 if (node
[k
] >= sector
)
866 return &t
->targets
[(KEYS_PER_NODE
* n
) + k
];
870 * Set the integrity profile for this device if all devices used have
873 static void dm_table_set_integrity(struct dm_table
*t
)
875 struct list_head
*devices
= dm_table_get_devices(t
);
876 struct dm_dev_internal
*prev
= NULL
, *dd
= NULL
;
878 if (!blk_get_integrity(dm_disk(t
->md
)))
881 list_for_each_entry(dd
, devices
, list
) {
883 blk_integrity_compare(prev
->dm_dev
.bdev
->bd_disk
,
884 dd
->dm_dev
.bdev
->bd_disk
) < 0) {
885 DMWARN("%s: integrity not set: %s and %s mismatch",
886 dm_device_name(t
->md
),
887 prev
->dm_dev
.bdev
->bd_disk
->disk_name
,
888 dd
->dm_dev
.bdev
->bd_disk
->disk_name
);
894 if (!prev
|| !bdev_get_integrity(prev
->dm_dev
.bdev
))
897 blk_integrity_register(dm_disk(t
->md
),
898 bdev_get_integrity(prev
->dm_dev
.bdev
));
903 blk_integrity_register(dm_disk(t
->md
), NULL
);
908 void dm_table_set_restrictions(struct dm_table
*t
, struct request_queue
*q
)
911 * Make sure we obey the optimistic sub devices
914 blk_queue_max_sectors(q
, t
->limits
.max_sectors
);
915 q
->max_phys_segments
= t
->limits
.max_phys_segments
;
916 q
->max_hw_segments
= t
->limits
.max_hw_segments
;
917 q
->hardsect_size
= t
->limits
.hardsect_size
;
918 q
->max_segment_size
= t
->limits
.max_segment_size
;
919 q
->max_hw_sectors
= t
->limits
.max_hw_sectors
;
920 q
->seg_boundary_mask
= t
->limits
.seg_boundary_mask
;
921 q
->bounce_pfn
= t
->limits
.bounce_pfn
;
923 if (t
->limits
.no_cluster
)
924 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER
, q
);
926 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER
, q
);
928 dm_table_set_integrity(t
);
931 unsigned int dm_table_get_num_targets(struct dm_table
*t
)
933 return t
->num_targets
;
936 struct list_head
*dm_table_get_devices(struct dm_table
*t
)
941 fmode_t
dm_table_get_mode(struct dm_table
*t
)
946 static void suspend_targets(struct dm_table
*t
, unsigned postsuspend
)
948 int i
= t
->num_targets
;
949 struct dm_target
*ti
= t
->targets
;
953 if (ti
->type
->postsuspend
)
954 ti
->type
->postsuspend(ti
);
955 } else if (ti
->type
->presuspend
)
956 ti
->type
->presuspend(ti
);
962 void dm_table_presuspend_targets(struct dm_table
*t
)
967 suspend_targets(t
, 0);
970 void dm_table_postsuspend_targets(struct dm_table
*t
)
975 suspend_targets(t
, 1);
978 int dm_table_resume_targets(struct dm_table
*t
)
982 for (i
= 0; i
< t
->num_targets
; i
++) {
983 struct dm_target
*ti
= t
->targets
+ i
;
985 if (!ti
->type
->preresume
)
988 r
= ti
->type
->preresume(ti
);
993 for (i
= 0; i
< t
->num_targets
; i
++) {
994 struct dm_target
*ti
= t
->targets
+ i
;
996 if (ti
->type
->resume
)
997 ti
->type
->resume(ti
);
1003 int dm_table_any_congested(struct dm_table
*t
, int bdi_bits
)
1005 struct dm_dev_internal
*dd
;
1006 struct list_head
*devices
= dm_table_get_devices(t
);
1009 list_for_each_entry(dd
, devices
, list
) {
1010 struct request_queue
*q
= bdev_get_queue(dd
->dm_dev
.bdev
);
1011 char b
[BDEVNAME_SIZE
];
1014 r
|= bdi_congested(&q
->backing_dev_info
, bdi_bits
);
1016 DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
1017 dm_device_name(t
->md
),
1018 bdevname(dd
->dm_dev
.bdev
, b
));
1024 void dm_table_unplug_all(struct dm_table
*t
)
1026 struct dm_dev_internal
*dd
;
1027 struct list_head
*devices
= dm_table_get_devices(t
);
1029 list_for_each_entry(dd
, devices
, list
) {
1030 struct request_queue
*q
= bdev_get_queue(dd
->dm_dev
.bdev
);
1031 char b
[BDEVNAME_SIZE
];
1036 DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s",
1037 dm_device_name(t
->md
),
1038 bdevname(dd
->dm_dev
.bdev
, b
));
1042 struct mapped_device
*dm_table_get_md(struct dm_table
*t
)
1049 EXPORT_SYMBOL(dm_vcalloc
);
1050 EXPORT_SYMBOL(dm_get_device
);
1051 EXPORT_SYMBOL(dm_put_device
);
1052 EXPORT_SYMBOL(dm_table_event
);
1053 EXPORT_SYMBOL(dm_table_get_size
);
1054 EXPORT_SYMBOL(dm_table_get_mode
);
1055 EXPORT_SYMBOL(dm_table_get_md
);
1056 EXPORT_SYMBOL(dm_table_put
);
1057 EXPORT_SYMBOL(dm_table_get
);
1058 EXPORT_SYMBOL(dm_table_unplug_all
);