2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/blkdev.h>
13 #include <linux/namei.h>
14 #include <linux/ctype.h>
15 #include <linux/string.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/mutex.h>
19 #include <linux/delay.h>
20 #include <linux/atomic.h>
22 #define DM_MSG_PREFIX "table"
25 #define NODE_SIZE L1_CACHE_BYTES
26 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
27 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
30 struct mapped_device
*md
;
35 unsigned int counts
[MAX_DEPTH
]; /* in nodes */
36 sector_t
*index
[MAX_DEPTH
];
38 unsigned int num_targets
;
39 unsigned int num_allocated
;
41 struct dm_target
*targets
;
43 struct target_type
*immutable_target_type
;
44 unsigned integrity_supported
:1;
48 * Indicates the rw permissions for the new logical
49 * device. This should be a combination of FMODE_READ
54 /* a list of devices used by this table */
55 struct list_head devices
;
57 /* events get handed up using this callback */
58 void (*event_fn
)(void *);
61 struct dm_md_mempools
*mempools
;
63 struct list_head target_callbacks
;
67 * Similar to ceiling(log_size(n))
69 static unsigned int int_log(unsigned int n
, unsigned int base
)
74 n
= dm_div_up(n
, base
);
82 * Calculate the index of the child node of the n'th node k'th key.
84 static inline unsigned int get_child(unsigned int n
, unsigned int k
)
86 return (n
* CHILDREN_PER_NODE
) + k
;
90 * Return the n'th node of level l from table t.
92 static inline sector_t
*get_node(struct dm_table
*t
,
93 unsigned int l
, unsigned int n
)
95 return t
->index
[l
] + (n
* KEYS_PER_NODE
);
99 * Return the highest key that you could lookup from the n'th
100 * node on level l of the btree.
102 static sector_t
high(struct dm_table
*t
, unsigned int l
, unsigned int n
)
104 for (; l
< t
->depth
- 1; l
++)
105 n
= get_child(n
, CHILDREN_PER_NODE
- 1);
107 if (n
>= t
->counts
[l
])
108 return (sector_t
) - 1;
110 return get_node(t
, l
, n
)[KEYS_PER_NODE
- 1];
114 * Fills in a level of the btree based on the highs of the level
117 static int setup_btree_index(unsigned int l
, struct dm_table
*t
)
122 for (n
= 0U; n
< t
->counts
[l
]; n
++) {
123 node
= get_node(t
, l
, n
);
125 for (k
= 0U; k
< KEYS_PER_NODE
; k
++)
126 node
[k
] = high(t
, l
+ 1, get_child(n
, k
));
132 void *dm_vcalloc(unsigned long nmemb
, unsigned long elem_size
)
138 * Check that we're not going to overflow.
140 if (nmemb
> (ULONG_MAX
/ elem_size
))
143 size
= nmemb
* elem_size
;
144 addr
= vzalloc(size
);
148 EXPORT_SYMBOL(dm_vcalloc
);
151 * highs, and targets are managed as dynamic arrays during a
154 static int alloc_targets(struct dm_table
*t
, unsigned int num
)
157 struct dm_target
*n_targets
;
160 * Allocate both the target array and offset array at once.
161 * Append an empty entry to catch sectors beyond the end of
164 n_highs
= (sector_t
*) dm_vcalloc(num
+ 1, sizeof(struct dm_target
) +
169 n_targets
= (struct dm_target
*) (n_highs
+ num
);
171 memset(n_highs
, -1, sizeof(*n_highs
) * num
);
174 t
->num_allocated
= num
;
176 t
->targets
= n_targets
;
181 int dm_table_create(struct dm_table
**result
, fmode_t mode
,
182 unsigned num_targets
, struct mapped_device
*md
)
184 struct dm_table
*t
= kzalloc(sizeof(*t
), GFP_KERNEL
);
189 INIT_LIST_HEAD(&t
->devices
);
190 INIT_LIST_HEAD(&t
->target_callbacks
);
193 num_targets
= KEYS_PER_NODE
;
195 num_targets
= dm_round_up(num_targets
, KEYS_PER_NODE
);
202 if (alloc_targets(t
, num_targets
)) {
213 static void free_devices(struct list_head
*devices
, struct mapped_device
*md
)
215 struct list_head
*tmp
, *next
;
217 list_for_each_safe(tmp
, next
, devices
) {
218 struct dm_dev_internal
*dd
=
219 list_entry(tmp
, struct dm_dev_internal
, list
);
220 DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s",
221 dm_device_name(md
), dd
->dm_dev
->name
);
222 dm_put_table_device(md
, dd
->dm_dev
);
227 void dm_table_destroy(struct dm_table
*t
)
234 /* free the indexes */
236 vfree(t
->index
[t
->depth
- 2]);
238 /* free the targets */
239 for (i
= 0; i
< t
->num_targets
; i
++) {
240 struct dm_target
*tgt
= t
->targets
+ i
;
245 dm_put_target_type(tgt
->type
);
250 /* free the device list */
251 free_devices(&t
->devices
, t
->md
);
253 dm_free_md_mempools(t
->mempools
);
259 * See if we've already got a device in the list.
261 static struct dm_dev_internal
*find_device(struct list_head
*l
, dev_t dev
)
263 struct dm_dev_internal
*dd
;
265 list_for_each_entry (dd
, l
, list
)
266 if (dd
->dm_dev
->bdev
->bd_dev
== dev
)
273 * If possible, this checks an area of a destination device is invalid.
275 static int device_area_is_invalid(struct dm_target
*ti
, struct dm_dev
*dev
,
276 sector_t start
, sector_t len
, void *data
)
278 struct request_queue
*q
;
279 struct queue_limits
*limits
= data
;
280 struct block_device
*bdev
= dev
->bdev
;
282 i_size_read(bdev
->bd_inode
) >> SECTOR_SHIFT
;
283 unsigned short logical_block_size_sectors
=
284 limits
->logical_block_size
>> SECTOR_SHIFT
;
285 char b
[BDEVNAME_SIZE
];
288 * Some devices exist without request functions,
289 * such as loop devices not yet bound to backing files.
290 * Forbid the use of such devices.
292 q
= bdev_get_queue(bdev
);
293 if (!q
|| !q
->make_request_fn
) {
294 DMWARN("%s: %s is not yet initialised: "
295 "start=%llu, len=%llu, dev_size=%llu",
296 dm_device_name(ti
->table
->md
), bdevname(bdev
, b
),
297 (unsigned long long)start
,
298 (unsigned long long)len
,
299 (unsigned long long)dev_size
);
306 if ((start
>= dev_size
) || (start
+ len
> dev_size
)) {
307 DMWARN("%s: %s too small for target: "
308 "start=%llu, len=%llu, dev_size=%llu",
309 dm_device_name(ti
->table
->md
), bdevname(bdev
, b
),
310 (unsigned long long)start
,
311 (unsigned long long)len
,
312 (unsigned long long)dev_size
);
316 if (logical_block_size_sectors
<= 1)
319 if (start
& (logical_block_size_sectors
- 1)) {
320 DMWARN("%s: start=%llu not aligned to h/w "
321 "logical block size %u of %s",
322 dm_device_name(ti
->table
->md
),
323 (unsigned long long)start
,
324 limits
->logical_block_size
, bdevname(bdev
, b
));
328 if (len
& (logical_block_size_sectors
- 1)) {
329 DMWARN("%s: len=%llu not aligned to h/w "
330 "logical block size %u of %s",
331 dm_device_name(ti
->table
->md
),
332 (unsigned long long)len
,
333 limits
->logical_block_size
, bdevname(bdev
, b
));
341 * This upgrades the mode on an already open dm_dev, being
342 * careful to leave things as they were if we fail to reopen the
343 * device and not to touch the existing bdev field in case
344 * it is accessed concurrently inside dm_table_any_congested().
346 static int upgrade_mode(struct dm_dev_internal
*dd
, fmode_t new_mode
,
347 struct mapped_device
*md
)
350 struct dm_dev
*old_dev
, *new_dev
;
352 old_dev
= dd
->dm_dev
;
354 r
= dm_get_table_device(md
, dd
->dm_dev
->bdev
->bd_dev
,
355 dd
->dm_dev
->mode
| new_mode
, &new_dev
);
359 dd
->dm_dev
= new_dev
;
360 dm_put_table_device(md
, old_dev
);
366 * Add a device to the list, or just increment the usage count if
367 * it's already present.
369 int dm_get_device(struct dm_target
*ti
, const char *path
, fmode_t mode
,
370 struct dm_dev
**result
)
373 dev_t
uninitialized_var(dev
);
374 struct dm_dev_internal
*dd
;
375 unsigned int major
, minor
;
376 struct dm_table
*t
= ti
->table
;
381 if (sscanf(path
, "%u:%u%c", &major
, &minor
, &dummy
) == 2) {
382 /* Extract the major/minor numbers */
383 dev
= MKDEV(major
, minor
);
384 if (MAJOR(dev
) != major
|| MINOR(dev
) != minor
)
387 /* convert the path to a device */
388 struct block_device
*bdev
= lookup_bdev(path
);
391 return PTR_ERR(bdev
);
396 dd
= find_device(&t
->devices
, dev
);
398 dd
= kmalloc(sizeof(*dd
), GFP_KERNEL
);
402 if ((r
= dm_get_table_device(t
->md
, dev
, mode
, &dd
->dm_dev
))) {
407 atomic_set(&dd
->count
, 0);
408 list_add(&dd
->list
, &t
->devices
);
410 } else if (dd
->dm_dev
->mode
!= (mode
| dd
->dm_dev
->mode
)) {
411 r
= upgrade_mode(dd
, mode
, t
->md
);
415 atomic_inc(&dd
->count
);
417 *result
= dd
->dm_dev
;
420 EXPORT_SYMBOL(dm_get_device
);
422 static int dm_set_device_limits(struct dm_target
*ti
, struct dm_dev
*dev
,
423 sector_t start
, sector_t len
, void *data
)
425 struct queue_limits
*limits
= data
;
426 struct block_device
*bdev
= dev
->bdev
;
427 struct request_queue
*q
= bdev_get_queue(bdev
);
428 char b
[BDEVNAME_SIZE
];
431 DMWARN("%s: Cannot set limits for nonexistent device %s",
432 dm_device_name(ti
->table
->md
), bdevname(bdev
, b
));
436 if (bdev_stack_limits(limits
, bdev
, start
) < 0)
437 DMWARN("%s: adding target device %s caused an alignment inconsistency: "
438 "physical_block_size=%u, logical_block_size=%u, "
439 "alignment_offset=%u, start=%llu",
440 dm_device_name(ti
->table
->md
), bdevname(bdev
, b
),
441 q
->limits
.physical_block_size
,
442 q
->limits
.logical_block_size
,
443 q
->limits
.alignment_offset
,
444 (unsigned long long) start
<< SECTOR_SHIFT
);
447 * Check if merge fn is supported.
448 * If not we'll force DM to use PAGE_SIZE or
449 * smaller I/O, just to be safe.
451 if (dm_queue_merge_is_compulsory(q
) && !ti
->type
->merge
)
452 blk_limits_max_hw_sectors(limits
,
453 (unsigned int) (PAGE_SIZE
>> 9));
458 * Decrement a device's use count and remove it if necessary.
460 void dm_put_device(struct dm_target
*ti
, struct dm_dev
*d
)
463 struct list_head
*devices
= &ti
->table
->devices
;
464 struct dm_dev_internal
*dd
;
466 list_for_each_entry(dd
, devices
, list
) {
467 if (dd
->dm_dev
== d
) {
473 DMWARN("%s: device %s not in table devices list",
474 dm_device_name(ti
->table
->md
), d
->name
);
477 if (atomic_dec_and_test(&dd
->count
)) {
478 dm_put_table_device(ti
->table
->md
, d
);
483 EXPORT_SYMBOL(dm_put_device
);
486 * Checks to see if the target joins onto the end of the table.
488 static int adjoin(struct dm_table
*table
, struct dm_target
*ti
)
490 struct dm_target
*prev
;
492 if (!table
->num_targets
)
495 prev
= &table
->targets
[table
->num_targets
- 1];
496 return (ti
->begin
== (prev
->begin
+ prev
->len
));
500 * Used to dynamically allocate the arg array.
502 * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
503 * process messages even if some device is suspended. These messages have a
504 * small fixed number of arguments.
506 * On the other hand, dm-switch needs to process bulk data using messages and
507 * excessive use of GFP_NOIO could cause trouble.
509 static char **realloc_argv(unsigned *array_size
, char **old_argv
)
516 new_size
= *array_size
* 2;
522 argv
= kmalloc(new_size
* sizeof(*argv
), gfp
);
524 memcpy(argv
, old_argv
, *array_size
* sizeof(*argv
));
525 *array_size
= new_size
;
533 * Destructively splits up the argument list to pass to ctr.
535 int dm_split_args(int *argc
, char ***argvp
, char *input
)
537 char *start
, *end
= input
, *out
, **argv
= NULL
;
538 unsigned array_size
= 0;
547 argv
= realloc_argv(&array_size
, argv
);
552 /* Skip whitespace */
553 start
= skip_spaces(end
);
556 break; /* success, we hit the end */
558 /* 'out' is used to remove any back-quotes */
561 /* Everything apart from '\0' can be quoted */
562 if (*end
== '\\' && *(end
+ 1)) {
569 break; /* end of token */
574 /* have we already filled the array ? */
575 if ((*argc
+ 1) > array_size
) {
576 argv
= realloc_argv(&array_size
, argv
);
581 /* we know this is whitespace */
585 /* terminate the string and put it in the array */
596 * Impose necessary and sufficient conditions on a devices's table such
597 * that any incoming bio which respects its logical_block_size can be
598 * processed successfully. If it falls across the boundary between
599 * two or more targets, the size of each piece it gets split into must
600 * be compatible with the logical_block_size of the target processing it.
602 static int validate_hardware_logical_block_alignment(struct dm_table
*table
,
603 struct queue_limits
*limits
)
606 * This function uses arithmetic modulo the logical_block_size
607 * (in units of 512-byte sectors).
609 unsigned short device_logical_block_size_sects
=
610 limits
->logical_block_size
>> SECTOR_SHIFT
;
613 * Offset of the start of the next table entry, mod logical_block_size.
615 unsigned short next_target_start
= 0;
618 * Given an aligned bio that extends beyond the end of a
619 * target, how many sectors must the next target handle?
621 unsigned short remaining
= 0;
623 struct dm_target
*uninitialized_var(ti
);
624 struct queue_limits ti_limits
;
628 * Check each entry in the table in turn.
630 while (i
< dm_table_get_num_targets(table
)) {
631 ti
= dm_table_get_target(table
, i
++);
633 blk_set_stacking_limits(&ti_limits
);
635 /* combine all target devices' limits */
636 if (ti
->type
->iterate_devices
)
637 ti
->type
->iterate_devices(ti
, dm_set_device_limits
,
641 * If the remaining sectors fall entirely within this
642 * table entry are they compatible with its logical_block_size?
644 if (remaining
< ti
->len
&&
645 remaining
& ((ti_limits
.logical_block_size
>>
650 (unsigned short) ((next_target_start
+ ti
->len
) &
651 (device_logical_block_size_sects
- 1));
652 remaining
= next_target_start
?
653 device_logical_block_size_sects
- next_target_start
: 0;
657 DMWARN("%s: table line %u (start sect %llu len %llu) "
658 "not aligned to h/w logical block size %u",
659 dm_device_name(table
->md
), i
,
660 (unsigned long long) ti
->begin
,
661 (unsigned long long) ti
->len
,
662 limits
->logical_block_size
);
669 int dm_table_add_target(struct dm_table
*t
, const char *type
,
670 sector_t start
, sector_t len
, char *params
)
672 int r
= -EINVAL
, argc
;
674 struct dm_target
*tgt
;
677 DMERR("%s: target type %s must appear alone in table",
678 dm_device_name(t
->md
), t
->targets
->type
->name
);
682 BUG_ON(t
->num_targets
>= t
->num_allocated
);
684 tgt
= t
->targets
+ t
->num_targets
;
685 memset(tgt
, 0, sizeof(*tgt
));
688 DMERR("%s: zero-length target", dm_device_name(t
->md
));
692 tgt
->type
= dm_get_target_type(type
);
694 DMERR("%s: %s: unknown target type", dm_device_name(t
->md
),
699 if (dm_target_needs_singleton(tgt
->type
)) {
700 if (t
->num_targets
) {
701 DMERR("%s: target type %s must appear alone in table",
702 dm_device_name(t
->md
), type
);
708 if (dm_target_always_writeable(tgt
->type
) && !(t
->mode
& FMODE_WRITE
)) {
709 DMERR("%s: target type %s may not be included in read-only tables",
710 dm_device_name(t
->md
), type
);
714 if (t
->immutable_target_type
) {
715 if (t
->immutable_target_type
!= tgt
->type
) {
716 DMERR("%s: immutable target type %s cannot be mixed with other target types",
717 dm_device_name(t
->md
), t
->immutable_target_type
->name
);
720 } else if (dm_target_is_immutable(tgt
->type
)) {
721 if (t
->num_targets
) {
722 DMERR("%s: immutable target type %s cannot be mixed with other target types",
723 dm_device_name(t
->md
), tgt
->type
->name
);
726 t
->immutable_target_type
= tgt
->type
;
732 tgt
->error
= "Unknown error";
735 * Does this target adjoin the previous one ?
737 if (!adjoin(t
, tgt
)) {
738 tgt
->error
= "Gap in table";
743 r
= dm_split_args(&argc
, &argv
, params
);
745 tgt
->error
= "couldn't split parameters (insufficient memory)";
749 r
= tgt
->type
->ctr(tgt
, argc
, argv
);
754 t
->highs
[t
->num_targets
++] = tgt
->begin
+ tgt
->len
- 1;
756 if (!tgt
->num_discard_bios
&& tgt
->discards_supported
)
757 DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
758 dm_device_name(t
->md
), type
);
763 DMERR("%s: %s: %s", dm_device_name(t
->md
), type
, tgt
->error
);
764 dm_put_target_type(tgt
->type
);
769 * Target argument parsing helpers.
771 static int validate_next_arg(struct dm_arg
*arg
, struct dm_arg_set
*arg_set
,
772 unsigned *value
, char **error
, unsigned grouped
)
774 const char *arg_str
= dm_shift_arg(arg_set
);
778 (sscanf(arg_str
, "%u%c", value
, &dummy
) != 1) ||
779 (*value
< arg
->min
) ||
780 (*value
> arg
->max
) ||
781 (grouped
&& arg_set
->argc
< *value
)) {
789 int dm_read_arg(struct dm_arg
*arg
, struct dm_arg_set
*arg_set
,
790 unsigned *value
, char **error
)
792 return validate_next_arg(arg
, arg_set
, value
, error
, 0);
794 EXPORT_SYMBOL(dm_read_arg
);
796 int dm_read_arg_group(struct dm_arg
*arg
, struct dm_arg_set
*arg_set
,
797 unsigned *value
, char **error
)
799 return validate_next_arg(arg
, arg_set
, value
, error
, 1);
801 EXPORT_SYMBOL(dm_read_arg_group
);
803 const char *dm_shift_arg(struct dm_arg_set
*as
)
816 EXPORT_SYMBOL(dm_shift_arg
);
818 void dm_consume_args(struct dm_arg_set
*as
, unsigned num_args
)
820 BUG_ON(as
->argc
< num_args
);
821 as
->argc
-= num_args
;
822 as
->argv
+= num_args
;
824 EXPORT_SYMBOL(dm_consume_args
);
826 static int dm_table_set_type(struct dm_table
*t
)
829 unsigned bio_based
= 0, request_based
= 0, hybrid
= 0;
830 struct dm_target
*tgt
;
831 struct dm_dev_internal
*dd
;
832 struct list_head
*devices
;
833 unsigned live_md_type
;
835 for (i
= 0; i
< t
->num_targets
; i
++) {
836 tgt
= t
->targets
+ i
;
837 if (dm_target_hybrid(tgt
))
839 else if (dm_target_request_based(tgt
))
844 if (bio_based
&& request_based
) {
845 DMWARN("Inconsistent table: different target types"
846 " can't be mixed up");
851 if (hybrid
&& !bio_based
&& !request_based
) {
853 * The targets can work either way.
854 * Determine the type from the live device.
855 * Default to bio-based if device is new.
857 live_md_type
= dm_get_md_type(t
->md
);
858 if (live_md_type
== DM_TYPE_REQUEST_BASED
)
865 /* We must use this table as bio-based */
866 t
->type
= DM_TYPE_BIO_BASED
;
870 BUG_ON(!request_based
); /* No targets in this table */
872 /* Non-request-stackable devices can't be used for request-based dm */
873 devices
= dm_table_get_devices(t
);
874 list_for_each_entry(dd
, devices
, list
) {
875 if (!blk_queue_stackable(bdev_get_queue(dd
->dm_dev
->bdev
))) {
876 DMWARN("table load rejected: including"
877 " non-request-stackable devices");
883 * Request-based dm supports only tables that have a single target now.
884 * To support multiple targets, request splitting support is needed,
885 * and that needs lots of changes in the block-layer.
886 * (e.g. request completion process for partial completion.)
888 if (t
->num_targets
> 1) {
889 DMWARN("Request-based dm doesn't support multiple targets yet");
893 t
->type
= DM_TYPE_REQUEST_BASED
;
898 unsigned dm_table_get_type(struct dm_table
*t
)
903 struct target_type
*dm_table_get_immutable_target_type(struct dm_table
*t
)
905 return t
->immutable_target_type
;
908 bool dm_table_request_based(struct dm_table
*t
)
910 return dm_table_get_type(t
) == DM_TYPE_REQUEST_BASED
;
913 static int dm_table_alloc_md_mempools(struct dm_table
*t
)
915 unsigned type
= dm_table_get_type(t
);
916 unsigned per_bio_data_size
= 0;
917 struct dm_target
*tgt
;
920 if (unlikely(type
== DM_TYPE_NONE
)) {
921 DMWARN("no table type is set, can't allocate mempools");
925 if (type
== DM_TYPE_BIO_BASED
)
926 for (i
= 0; i
< t
->num_targets
; i
++) {
927 tgt
= t
->targets
+ i
;
928 per_bio_data_size
= max(per_bio_data_size
, tgt
->per_bio_data_size
);
931 t
->mempools
= dm_alloc_md_mempools(type
, t
->integrity_supported
, per_bio_data_size
);
938 void dm_table_free_md_mempools(struct dm_table
*t
)
940 dm_free_md_mempools(t
->mempools
);
944 struct dm_md_mempools
*dm_table_get_md_mempools(struct dm_table
*t
)
949 static int setup_indexes(struct dm_table
*t
)
952 unsigned int total
= 0;
955 /* allocate the space for *all* the indexes */
956 for (i
= t
->depth
- 2; i
>= 0; i
--) {
957 t
->counts
[i
] = dm_div_up(t
->counts
[i
+ 1], CHILDREN_PER_NODE
);
958 total
+= t
->counts
[i
];
961 indexes
= (sector_t
*) dm_vcalloc(total
, (unsigned long) NODE_SIZE
);
965 /* set up internal nodes, bottom-up */
966 for (i
= t
->depth
- 2; i
>= 0; i
--) {
967 t
->index
[i
] = indexes
;
968 indexes
+= (KEYS_PER_NODE
* t
->counts
[i
]);
969 setup_btree_index(i
, t
);
976 * Builds the btree to index the map.
978 static int dm_table_build_index(struct dm_table
*t
)
981 unsigned int leaf_nodes
;
983 /* how many indexes will the btree have ? */
984 leaf_nodes
= dm_div_up(t
->num_targets
, KEYS_PER_NODE
);
985 t
->depth
= 1 + int_log(leaf_nodes
, CHILDREN_PER_NODE
);
987 /* leaf layer has already been set up */
988 t
->counts
[t
->depth
- 1] = leaf_nodes
;
989 t
->index
[t
->depth
- 1] = t
->highs
;
992 r
= setup_indexes(t
);
998 * Get a disk whose integrity profile reflects the table's profile.
999 * If %match_all is true, all devices' profiles must match.
1000 * If %match_all is false, all devices must at least have an
1001 * allocated integrity profile; but uninitialized is ok.
1002 * Returns NULL if integrity support was inconsistent or unavailable.
1004 static struct gendisk
* dm_table_get_integrity_disk(struct dm_table
*t
,
1007 struct list_head
*devices
= dm_table_get_devices(t
);
1008 struct dm_dev_internal
*dd
= NULL
;
1009 struct gendisk
*prev_disk
= NULL
, *template_disk
= NULL
;
1011 list_for_each_entry(dd
, devices
, list
) {
1012 template_disk
= dd
->dm_dev
->bdev
->bd_disk
;
1013 if (!blk_get_integrity(template_disk
))
1015 if (!match_all
&& !blk_integrity_is_initialized(template_disk
))
1016 continue; /* skip uninitialized profiles */
1017 else if (prev_disk
&&
1018 blk_integrity_compare(prev_disk
, template_disk
) < 0)
1020 prev_disk
= template_disk
;
1023 return template_disk
;
1027 DMWARN("%s: integrity not set: %s and %s profile mismatch",
1028 dm_device_name(t
->md
),
1029 prev_disk
->disk_name
,
1030 template_disk
->disk_name
);
1035 * Register the mapped device for blk_integrity support if
1036 * the underlying devices have an integrity profile. But all devices
1037 * may not have matching profiles (checking all devices isn't reliable
1038 * during table load because this table may use other DM device(s) which
1039 * must be resumed before they will have an initialized integity profile).
1040 * Stacked DM devices force a 2 stage integrity profile validation:
1041 * 1 - during load, validate all initialized integrity profiles match
1042 * 2 - during resume, validate all integrity profiles match
1044 static int dm_table_prealloc_integrity(struct dm_table
*t
, struct mapped_device
*md
)
1046 struct gendisk
*template_disk
= NULL
;
1048 template_disk
= dm_table_get_integrity_disk(t
, false);
1052 if (!blk_integrity_is_initialized(dm_disk(md
))) {
1053 t
->integrity_supported
= 1;
1054 return blk_integrity_register(dm_disk(md
), NULL
);
1058 * If DM device already has an initalized integrity
1059 * profile the new profile should not conflict.
1061 if (blk_integrity_is_initialized(template_disk
) &&
1062 blk_integrity_compare(dm_disk(md
), template_disk
) < 0) {
1063 DMWARN("%s: conflict with existing integrity profile: "
1064 "%s profile mismatch",
1065 dm_device_name(t
->md
),
1066 template_disk
->disk_name
);
1070 /* Preserve existing initialized integrity profile */
1071 t
->integrity_supported
= 1;
1076 * Prepares the table for use by building the indices,
1077 * setting the type, and allocating mempools.
1079 int dm_table_complete(struct dm_table
*t
)
1083 r
= dm_table_set_type(t
);
1085 DMERR("unable to set table type");
1089 r
= dm_table_build_index(t
);
1091 DMERR("unable to build btrees");
1095 r
= dm_table_prealloc_integrity(t
, t
->md
);
1097 DMERR("could not register integrity profile.");
1101 r
= dm_table_alloc_md_mempools(t
);
1103 DMERR("unable to allocate mempools");
1108 static DEFINE_MUTEX(_event_lock
);
1109 void dm_table_event_callback(struct dm_table
*t
,
1110 void (*fn
)(void *), void *context
)
1112 mutex_lock(&_event_lock
);
1114 t
->event_context
= context
;
1115 mutex_unlock(&_event_lock
);
1118 void dm_table_event(struct dm_table
*t
)
1121 * You can no longer call dm_table_event() from interrupt
1122 * context, use a bottom half instead.
1124 BUG_ON(in_interrupt());
1126 mutex_lock(&_event_lock
);
1128 t
->event_fn(t
->event_context
);
1129 mutex_unlock(&_event_lock
);
1131 EXPORT_SYMBOL(dm_table_event
);
1133 sector_t
dm_table_get_size(struct dm_table
*t
)
1135 return t
->num_targets
? (t
->highs
[t
->num_targets
- 1] + 1) : 0;
1137 EXPORT_SYMBOL(dm_table_get_size
);
1139 struct dm_target
*dm_table_get_target(struct dm_table
*t
, unsigned int index
)
1141 if (index
>= t
->num_targets
)
1144 return t
->targets
+ index
;
1148 * Search the btree for the correct target.
1150 * Caller should check returned pointer with dm_target_is_valid()
1151 * to trap I/O beyond end of device.
1153 struct dm_target
*dm_table_find_target(struct dm_table
*t
, sector_t sector
)
1155 unsigned int l
, n
= 0, k
= 0;
1158 for (l
= 0; l
< t
->depth
; l
++) {
1159 n
= get_child(n
, k
);
1160 node
= get_node(t
, l
, n
);
1162 for (k
= 0; k
< KEYS_PER_NODE
; k
++)
1163 if (node
[k
] >= sector
)
1167 return &t
->targets
[(KEYS_PER_NODE
* n
) + k
];
1170 static int count_device(struct dm_target
*ti
, struct dm_dev
*dev
,
1171 sector_t start
, sector_t len
, void *data
)
1173 unsigned *num_devices
= data
;
1181 * Check whether a table has no data devices attached using each
1182 * target's iterate_devices method.
1183 * Returns false if the result is unknown because a target doesn't
1184 * support iterate_devices.
1186 bool dm_table_has_no_data_devices(struct dm_table
*table
)
1188 struct dm_target
*uninitialized_var(ti
);
1189 unsigned i
= 0, num_devices
= 0;
1191 while (i
< dm_table_get_num_targets(table
)) {
1192 ti
= dm_table_get_target(table
, i
++);
1194 if (!ti
->type
->iterate_devices
)
1197 ti
->type
->iterate_devices(ti
, count_device
, &num_devices
);
1206 * Establish the new table's queue_limits and validate them.
1208 int dm_calculate_queue_limits(struct dm_table
*table
,
1209 struct queue_limits
*limits
)
1211 struct dm_target
*uninitialized_var(ti
);
1212 struct queue_limits ti_limits
;
1215 blk_set_stacking_limits(limits
);
1217 while (i
< dm_table_get_num_targets(table
)) {
1218 blk_set_stacking_limits(&ti_limits
);
1220 ti
= dm_table_get_target(table
, i
++);
1222 if (!ti
->type
->iterate_devices
)
1223 goto combine_limits
;
1226 * Combine queue limits of all the devices this target uses.
1228 ti
->type
->iterate_devices(ti
, dm_set_device_limits
,
1231 /* Set I/O hints portion of queue limits */
1232 if (ti
->type
->io_hints
)
1233 ti
->type
->io_hints(ti
, &ti_limits
);
1236 * Check each device area is consistent with the target's
1237 * overall queue limits.
1239 if (ti
->type
->iterate_devices(ti
, device_area_is_invalid
,
1245 * Merge this target's queue limits into the overall limits
1248 if (blk_stack_limits(limits
, &ti_limits
, 0) < 0)
1249 DMWARN("%s: adding target device "
1250 "(start sect %llu len %llu) "
1251 "caused an alignment inconsistency",
1252 dm_device_name(table
->md
),
1253 (unsigned long long) ti
->begin
,
1254 (unsigned long long) ti
->len
);
1257 return validate_hardware_logical_block_alignment(table
, limits
);
1261 * Set the integrity profile for this device if all devices used have
1262 * matching profiles. We're quite deep in the resume path but still
1263 * don't know if all devices (particularly DM devices this device
1264 * may be stacked on) have matching profiles. Even if the profiles
1265 * don't match we have no way to fail (to resume) at this point.
1267 static void dm_table_set_integrity(struct dm_table
*t
)
1269 struct gendisk
*template_disk
= NULL
;
1271 if (!blk_get_integrity(dm_disk(t
->md
)))
1274 template_disk
= dm_table_get_integrity_disk(t
, true);
1276 blk_integrity_register(dm_disk(t
->md
),
1277 blk_get_integrity(template_disk
));
1278 else if (blk_integrity_is_initialized(dm_disk(t
->md
)))
1279 DMWARN("%s: device no longer has a valid integrity profile",
1280 dm_device_name(t
->md
));
1282 DMWARN("%s: unable to establish an integrity profile",
1283 dm_device_name(t
->md
));
1286 static int device_flush_capable(struct dm_target
*ti
, struct dm_dev
*dev
,
1287 sector_t start
, sector_t len
, void *data
)
1289 unsigned flush
= (*(unsigned *)data
);
1290 struct request_queue
*q
= bdev_get_queue(dev
->bdev
);
1292 return q
&& (q
->flush_flags
& flush
);
1295 static bool dm_table_supports_flush(struct dm_table
*t
, unsigned flush
)
1297 struct dm_target
*ti
;
1301 * Require at least one underlying device to support flushes.
1302 * t->devices includes internal dm devices such as mirror logs
1303 * so we need to use iterate_devices here, which targets
1304 * supporting flushes must provide.
1306 while (i
< dm_table_get_num_targets(t
)) {
1307 ti
= dm_table_get_target(t
, i
++);
1309 if (!ti
->num_flush_bios
)
1312 if (ti
->flush_supported
)
1315 if (ti
->type
->iterate_devices
&&
1316 ti
->type
->iterate_devices(ti
, device_flush_capable
, &flush
))
1323 static bool dm_table_discard_zeroes_data(struct dm_table
*t
)
1325 struct dm_target
*ti
;
1328 /* Ensure that all targets supports discard_zeroes_data. */
1329 while (i
< dm_table_get_num_targets(t
)) {
1330 ti
= dm_table_get_target(t
, i
++);
1332 if (ti
->discard_zeroes_data_unsupported
)
1339 static int device_is_nonrot(struct dm_target
*ti
, struct dm_dev
*dev
,
1340 sector_t start
, sector_t len
, void *data
)
1342 struct request_queue
*q
= bdev_get_queue(dev
->bdev
);
1344 return q
&& blk_queue_nonrot(q
);
1347 static int device_is_not_random(struct dm_target
*ti
, struct dm_dev
*dev
,
1348 sector_t start
, sector_t len
, void *data
)
1350 struct request_queue
*q
= bdev_get_queue(dev
->bdev
);
1352 return q
&& !blk_queue_add_random(q
);
1355 static int queue_supports_sg_merge(struct dm_target
*ti
, struct dm_dev
*dev
,
1356 sector_t start
, sector_t len
, void *data
)
1358 struct request_queue
*q
= bdev_get_queue(dev
->bdev
);
1360 return q
&& !test_bit(QUEUE_FLAG_NO_SG_MERGE
, &q
->queue_flags
);
1363 static bool dm_table_all_devices_attribute(struct dm_table
*t
,
1364 iterate_devices_callout_fn func
)
1366 struct dm_target
*ti
;
1369 while (i
< dm_table_get_num_targets(t
)) {
1370 ti
= dm_table_get_target(t
, i
++);
1372 if (!ti
->type
->iterate_devices
||
1373 !ti
->type
->iterate_devices(ti
, func
, NULL
))
1380 static int device_not_write_same_capable(struct dm_target
*ti
, struct dm_dev
*dev
,
1381 sector_t start
, sector_t len
, void *data
)
1383 struct request_queue
*q
= bdev_get_queue(dev
->bdev
);
1385 return q
&& !q
->limits
.max_write_same_sectors
;
1388 static bool dm_table_supports_write_same(struct dm_table
*t
)
1390 struct dm_target
*ti
;
1393 while (i
< dm_table_get_num_targets(t
)) {
1394 ti
= dm_table_get_target(t
, i
++);
1396 if (!ti
->num_write_same_bios
)
1399 if (!ti
->type
->iterate_devices
||
1400 ti
->type
->iterate_devices(ti
, device_not_write_same_capable
, NULL
))
1407 static int device_discard_capable(struct dm_target
*ti
, struct dm_dev
*dev
,
1408 sector_t start
, sector_t len
, void *data
)
1410 struct request_queue
*q
= bdev_get_queue(dev
->bdev
);
1412 return q
&& blk_queue_discard(q
);
1415 static bool dm_table_supports_discards(struct dm_table
*t
)
1417 struct dm_target
*ti
;
1421 * Unless any target used by the table set discards_supported,
1422 * require at least one underlying device to support discards.
1423 * t->devices includes internal dm devices such as mirror logs
1424 * so we need to use iterate_devices here, which targets
1425 * supporting discard selectively must provide.
1427 while (i
< dm_table_get_num_targets(t
)) {
1428 ti
= dm_table_get_target(t
, i
++);
1430 if (!ti
->num_discard_bios
)
1433 if (ti
->discards_supported
)
1436 if (ti
->type
->iterate_devices
&&
1437 ti
->type
->iterate_devices(ti
, device_discard_capable
, NULL
))
1444 void dm_table_set_restrictions(struct dm_table
*t
, struct request_queue
*q
,
1445 struct queue_limits
*limits
)
1450 * Copy table's limits to the DM device's request_queue
1452 q
->limits
= *limits
;
1454 if (!dm_table_supports_discards(t
))
1455 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD
, q
);
1457 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, q
);
1459 if (dm_table_supports_flush(t
, REQ_FLUSH
)) {
1461 if (dm_table_supports_flush(t
, REQ_FUA
))
1464 blk_queue_flush(q
, flush
);
1466 if (!dm_table_discard_zeroes_data(t
))
1467 q
->limits
.discard_zeroes_data
= 0;
1469 /* Ensure that all underlying devices are non-rotational. */
1470 if (dm_table_all_devices_attribute(t
, device_is_nonrot
))
1471 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, q
);
1473 queue_flag_clear_unlocked(QUEUE_FLAG_NONROT
, q
);
1475 if (!dm_table_supports_write_same(t
))
1476 q
->limits
.max_write_same_sectors
= 0;
1478 if (dm_table_all_devices_attribute(t
, queue_supports_sg_merge
))
1479 queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE
, q
);
1481 queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE
, q
);
1483 dm_table_set_integrity(t
);
1486 * Determine whether or not this queue's I/O timings contribute
1487 * to the entropy pool, Only request-based targets use this.
1488 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
1491 if (blk_queue_add_random(q
) && dm_table_all_devices_attribute(t
, device_is_not_random
))
1492 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM
, q
);
1495 * QUEUE_FLAG_STACKABLE must be set after all queue settings are
1496 * visible to other CPUs because, once the flag is set, incoming bios
1497 * are processed by request-based dm, which refers to the queue
1499 * Until the flag set, bios are passed to bio-based dm and queued to
1500 * md->deferred where queue settings are not needed yet.
1501 * Those bios are passed to request-based dm at the resume time.
1504 if (dm_table_request_based(t
))
1505 queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE
, q
);
1508 unsigned int dm_table_get_num_targets(struct dm_table
*t
)
1510 return t
->num_targets
;
1513 struct list_head
*dm_table_get_devices(struct dm_table
*t
)
1518 fmode_t
dm_table_get_mode(struct dm_table
*t
)
1522 EXPORT_SYMBOL(dm_table_get_mode
);
1530 static void suspend_targets(struct dm_table
*t
, enum suspend_mode mode
)
1532 int i
= t
->num_targets
;
1533 struct dm_target
*ti
= t
->targets
;
1538 if (ti
->type
->presuspend
)
1539 ti
->type
->presuspend(ti
);
1541 case PRESUSPEND_UNDO
:
1542 if (ti
->type
->presuspend_undo
)
1543 ti
->type
->presuspend_undo(ti
);
1546 if (ti
->type
->postsuspend
)
1547 ti
->type
->postsuspend(ti
);
1554 void dm_table_presuspend_targets(struct dm_table
*t
)
1559 suspend_targets(t
, PRESUSPEND
);
1562 void dm_table_presuspend_undo_targets(struct dm_table
*t
)
1567 suspend_targets(t
, PRESUSPEND_UNDO
);
1570 void dm_table_postsuspend_targets(struct dm_table
*t
)
1575 suspend_targets(t
, POSTSUSPEND
);
1578 int dm_table_resume_targets(struct dm_table
*t
)
1582 for (i
= 0; i
< t
->num_targets
; i
++) {
1583 struct dm_target
*ti
= t
->targets
+ i
;
1585 if (!ti
->type
->preresume
)
1588 r
= ti
->type
->preresume(ti
);
1590 DMERR("%s: %s: preresume failed, error = %d",
1591 dm_device_name(t
->md
), ti
->type
->name
, r
);
1596 for (i
= 0; i
< t
->num_targets
; i
++) {
1597 struct dm_target
*ti
= t
->targets
+ i
;
1599 if (ti
->type
->resume
)
1600 ti
->type
->resume(ti
);
1606 void dm_table_add_target_callbacks(struct dm_table
*t
, struct dm_target_callbacks
*cb
)
1608 list_add(&cb
->list
, &t
->target_callbacks
);
1610 EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks
);
1612 int dm_table_any_congested(struct dm_table
*t
, int bdi_bits
)
1614 struct dm_dev_internal
*dd
;
1615 struct list_head
*devices
= dm_table_get_devices(t
);
1616 struct dm_target_callbacks
*cb
;
1619 list_for_each_entry(dd
, devices
, list
) {
1620 struct request_queue
*q
= bdev_get_queue(dd
->dm_dev
->bdev
);
1621 char b
[BDEVNAME_SIZE
];
1624 r
|= bdi_congested(&q
->backing_dev_info
, bdi_bits
);
1626 DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
1627 dm_device_name(t
->md
),
1628 bdevname(dd
->dm_dev
->bdev
, b
));
1631 list_for_each_entry(cb
, &t
->target_callbacks
, list
)
1632 if (cb
->congested_fn
)
1633 r
|= cb
->congested_fn(cb
, bdi_bits
);
1638 int dm_table_any_busy_target(struct dm_table
*t
)
1641 struct dm_target
*ti
;
1643 for (i
= 0; i
< t
->num_targets
; i
++) {
1644 ti
= t
->targets
+ i
;
1645 if (ti
->type
->busy
&& ti
->type
->busy(ti
))
1652 struct mapped_device
*dm_table_get_md(struct dm_table
*t
)
1656 EXPORT_SYMBOL(dm_table_get_md
);
1658 void dm_table_run_md_queue_async(struct dm_table
*t
)
1660 struct mapped_device
*md
;
1661 struct request_queue
*queue
;
1662 unsigned long flags
;
1664 if (!dm_table_request_based(t
))
1667 md
= dm_table_get_md(t
);
1668 queue
= dm_get_md_queue(md
);
1670 spin_lock_irqsave(queue
->queue_lock
, flags
);
1671 blk_run_queue_async(queue
);
1672 spin_unlock_irqrestore(queue
->queue_lock
, flags
);
1675 EXPORT_SYMBOL(dm_table_run_md_queue_async
);