block: max hardware sectors limit wrapper
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / md / dm-table.c
blob4d705cea0f8c74c4010e7fe6fbf8d77288f49c36
1 /*
2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
6 */
8 #include "dm.h"
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/blkdev.h>
13 #include <linux/namei.h>
14 #include <linux/ctype.h>
15 #include <linux/string.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/mutex.h>
19 #include <linux/delay.h>
20 #include <asm/atomic.h>
22 #define DM_MSG_PREFIX "table"
24 #define MAX_DEPTH 16
25 #define NODE_SIZE L1_CACHE_BYTES
26 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
27 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
30 * The table has always exactly one reference from either mapped_device->map
31 * or hash_cell->new_map. This reference is not counted in table->holders.
32 * A pair of dm_create_table/dm_destroy_table functions is used for table
33 * creation/destruction.
35 * Temporary references from the other code increase table->holders. A pair
36 * of dm_table_get/dm_table_put functions is used to manipulate it.
38 * When the table is about to be destroyed, we wait for table->holders to
39 * drop to zero.
42 struct dm_table {
43 struct mapped_device *md;
44 atomic_t holders;
45 unsigned type;
47 /* btree table */
48 unsigned int depth;
49 unsigned int counts[MAX_DEPTH]; /* in nodes */
50 sector_t *index[MAX_DEPTH];
52 unsigned int num_targets;
53 unsigned int num_allocated;
54 sector_t *highs;
55 struct dm_target *targets;
57 unsigned discards_supported:1;
60 * Indicates the rw permissions for the new logical
61 * device. This should be a combination of FMODE_READ
62 * and FMODE_WRITE.
64 fmode_t mode;
66 /* a list of devices used by this table */
67 struct list_head devices;
69 /* events get handed up using this callback */
70 void (*event_fn)(void *);
71 void *event_context;
73 struct dm_md_mempools *mempools;
77 * Similar to ceiling(log_size(n))
79 static unsigned int int_log(unsigned int n, unsigned int base)
81 int result = 0;
83 while (n > 1) {
84 n = dm_div_up(n, base);
85 result++;
88 return result;
92 * Calculate the index of the child node of the n'th node k'th key.
94 static inline unsigned int get_child(unsigned int n, unsigned int k)
96 return (n * CHILDREN_PER_NODE) + k;
100 * Return the n'th node of level l from table t.
102 static inline sector_t *get_node(struct dm_table *t,
103 unsigned int l, unsigned int n)
105 return t->index[l] + (n * KEYS_PER_NODE);
109 * Return the highest key that you could lookup from the n'th
110 * node on level l of the btree.
112 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
114 for (; l < t->depth - 1; l++)
115 n = get_child(n, CHILDREN_PER_NODE - 1);
117 if (n >= t->counts[l])
118 return (sector_t) - 1;
120 return get_node(t, l, n)[KEYS_PER_NODE - 1];
124 * Fills in a level of the btree based on the highs of the level
125 * below it.
127 static int setup_btree_index(unsigned int l, struct dm_table *t)
129 unsigned int n, k;
130 sector_t *node;
132 for (n = 0U; n < t->counts[l]; n++) {
133 node = get_node(t, l, n);
135 for (k = 0U; k < KEYS_PER_NODE; k++)
136 node[k] = high(t, l + 1, get_child(n, k));
139 return 0;
142 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
144 unsigned long size;
145 void *addr;
148 * Check that we're not going to overflow.
150 if (nmemb > (ULONG_MAX / elem_size))
151 return NULL;
153 size = nmemb * elem_size;
154 addr = vmalloc(size);
155 if (addr)
156 memset(addr, 0, size);
158 return addr;
162 * highs, and targets are managed as dynamic arrays during a
163 * table load.
165 static int alloc_targets(struct dm_table *t, unsigned int num)
167 sector_t *n_highs;
168 struct dm_target *n_targets;
169 int n = t->num_targets;
172 * Allocate both the target array and offset array at once.
173 * Append an empty entry to catch sectors beyond the end of
174 * the device.
176 n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) +
177 sizeof(sector_t));
178 if (!n_highs)
179 return -ENOMEM;
181 n_targets = (struct dm_target *) (n_highs + num);
183 if (n) {
184 memcpy(n_highs, t->highs, sizeof(*n_highs) * n);
185 memcpy(n_targets, t->targets, sizeof(*n_targets) * n);
188 memset(n_highs + n, -1, sizeof(*n_highs) * (num - n));
189 vfree(t->highs);
191 t->num_allocated = num;
192 t->highs = n_highs;
193 t->targets = n_targets;
195 return 0;
198 int dm_table_create(struct dm_table **result, fmode_t mode,
199 unsigned num_targets, struct mapped_device *md)
201 struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
203 if (!t)
204 return -ENOMEM;
206 INIT_LIST_HEAD(&t->devices);
207 atomic_set(&t->holders, 0);
208 t->discards_supported = 1;
210 if (!num_targets)
211 num_targets = KEYS_PER_NODE;
213 num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
215 if (alloc_targets(t, num_targets)) {
216 kfree(t);
217 t = NULL;
218 return -ENOMEM;
221 t->mode = mode;
222 t->md = md;
223 *result = t;
224 return 0;
227 static void free_devices(struct list_head *devices)
229 struct list_head *tmp, *next;
231 list_for_each_safe(tmp, next, devices) {
232 struct dm_dev_internal *dd =
233 list_entry(tmp, struct dm_dev_internal, list);
234 DMWARN("dm_table_destroy: dm_put_device call missing for %s",
235 dd->dm_dev.name);
236 kfree(dd);
240 void dm_table_destroy(struct dm_table *t)
242 unsigned int i;
244 if (!t)
245 return;
247 while (atomic_read(&t->holders))
248 msleep(1);
249 smp_mb();
251 /* free the indexes */
252 if (t->depth >= 2)
253 vfree(t->index[t->depth - 2]);
255 /* free the targets */
256 for (i = 0; i < t->num_targets; i++) {
257 struct dm_target *tgt = t->targets + i;
259 if (tgt->type->dtr)
260 tgt->type->dtr(tgt);
262 dm_put_target_type(tgt->type);
265 vfree(t->highs);
267 /* free the device list */
268 if (t->devices.next != &t->devices)
269 free_devices(&t->devices);
271 dm_free_md_mempools(t->mempools);
273 kfree(t);
276 void dm_table_get(struct dm_table *t)
278 atomic_inc(&t->holders);
281 void dm_table_put(struct dm_table *t)
283 if (!t)
284 return;
286 smp_mb__before_atomic_dec();
287 atomic_dec(&t->holders);
291 * Checks to see if we need to extend highs or targets.
293 static inline int check_space(struct dm_table *t)
295 if (t->num_targets >= t->num_allocated)
296 return alloc_targets(t, t->num_allocated * 2);
298 return 0;
302 * See if we've already got a device in the list.
304 static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
306 struct dm_dev_internal *dd;
308 list_for_each_entry (dd, l, list)
309 if (dd->dm_dev.bdev->bd_dev == dev)
310 return dd;
312 return NULL;
316 * Open a device so we can use it as a map destination.
318 static int open_dev(struct dm_dev_internal *d, dev_t dev,
319 struct mapped_device *md)
321 static char *_claim_ptr = "I belong to device-mapper";
322 struct block_device *bdev;
324 int r;
326 BUG_ON(d->dm_dev.bdev);
328 bdev = open_by_devnum(dev, d->dm_dev.mode);
329 if (IS_ERR(bdev))
330 return PTR_ERR(bdev);
331 r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
332 if (r)
333 blkdev_put(bdev, d->dm_dev.mode);
334 else
335 d->dm_dev.bdev = bdev;
336 return r;
340 * Close a device that we've been using.
342 static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
344 if (!d->dm_dev.bdev)
345 return;
347 bd_release_from_disk(d->dm_dev.bdev, dm_disk(md));
348 blkdev_put(d->dm_dev.bdev, d->dm_dev.mode);
349 d->dm_dev.bdev = NULL;
353 * If possible, this checks an area of a destination device is invalid.
355 static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
356 sector_t start, sector_t len, void *data)
358 struct queue_limits *limits = data;
359 struct block_device *bdev = dev->bdev;
360 sector_t dev_size =
361 i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
362 unsigned short logical_block_size_sectors =
363 limits->logical_block_size >> SECTOR_SHIFT;
364 char b[BDEVNAME_SIZE];
366 if (!dev_size)
367 return 0;
369 if ((start >= dev_size) || (start + len > dev_size)) {
370 DMWARN("%s: %s too small for target: "
371 "start=%llu, len=%llu, dev_size=%llu",
372 dm_device_name(ti->table->md), bdevname(bdev, b),
373 (unsigned long long)start,
374 (unsigned long long)len,
375 (unsigned long long)dev_size);
376 return 1;
379 if (logical_block_size_sectors <= 1)
380 return 0;
382 if (start & (logical_block_size_sectors - 1)) {
383 DMWARN("%s: start=%llu not aligned to h/w "
384 "logical block size %u of %s",
385 dm_device_name(ti->table->md),
386 (unsigned long long)start,
387 limits->logical_block_size, bdevname(bdev, b));
388 return 1;
391 if (len & (logical_block_size_sectors - 1)) {
392 DMWARN("%s: len=%llu not aligned to h/w "
393 "logical block size %u of %s",
394 dm_device_name(ti->table->md),
395 (unsigned long long)len,
396 limits->logical_block_size, bdevname(bdev, b));
397 return 1;
400 return 0;
404 * This upgrades the mode on an already open dm_dev, being
405 * careful to leave things as they were if we fail to reopen the
406 * device and not to touch the existing bdev field in case
407 * it is accessed concurrently inside dm_table_any_congested().
409 static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
410 struct mapped_device *md)
412 int r;
413 struct dm_dev_internal dd_new, dd_old;
415 dd_new = dd_old = *dd;
417 dd_new.dm_dev.mode |= new_mode;
418 dd_new.dm_dev.bdev = NULL;
420 r = open_dev(&dd_new, dd->dm_dev.bdev->bd_dev, md);
421 if (r)
422 return r;
424 dd->dm_dev.mode |= new_mode;
425 close_dev(&dd_old, md);
427 return 0;
431 * Add a device to the list, or just increment the usage count if
432 * it's already present.
434 static int __table_get_device(struct dm_table *t, struct dm_target *ti,
435 const char *path, fmode_t mode, struct dm_dev **result)
437 int r;
438 dev_t uninitialized_var(dev);
439 struct dm_dev_internal *dd;
440 unsigned int major, minor;
442 BUG_ON(!t);
444 if (sscanf(path, "%u:%u", &major, &minor) == 2) {
445 /* Extract the major/minor numbers */
446 dev = MKDEV(major, minor);
447 if (MAJOR(dev) != major || MINOR(dev) != minor)
448 return -EOVERFLOW;
449 } else {
450 /* convert the path to a device */
451 struct block_device *bdev = lookup_bdev(path);
453 if (IS_ERR(bdev))
454 return PTR_ERR(bdev);
455 dev = bdev->bd_dev;
456 bdput(bdev);
459 dd = find_device(&t->devices, dev);
460 if (!dd) {
461 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
462 if (!dd)
463 return -ENOMEM;
465 dd->dm_dev.mode = mode;
466 dd->dm_dev.bdev = NULL;
468 if ((r = open_dev(dd, dev, t->md))) {
469 kfree(dd);
470 return r;
473 format_dev_t(dd->dm_dev.name, dev);
475 atomic_set(&dd->count, 0);
476 list_add(&dd->list, &t->devices);
478 } else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) {
479 r = upgrade_mode(dd, mode, t->md);
480 if (r)
481 return r;
483 atomic_inc(&dd->count);
485 *result = &dd->dm_dev;
486 return 0;
489 int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
490 sector_t start, sector_t len, void *data)
492 struct queue_limits *limits = data;
493 struct block_device *bdev = dev->bdev;
494 struct request_queue *q = bdev_get_queue(bdev);
495 char b[BDEVNAME_SIZE];
497 if (unlikely(!q)) {
498 DMWARN("%s: Cannot set limits for nonexistent device %s",
499 dm_device_name(ti->table->md), bdevname(bdev, b));
500 return 0;
503 if (bdev_stack_limits(limits, bdev, start) < 0)
504 DMWARN("%s: adding target device %s caused an alignment inconsistency: "
505 "physical_block_size=%u, logical_block_size=%u, "
506 "alignment_offset=%u, start=%llu",
507 dm_device_name(ti->table->md), bdevname(bdev, b),
508 q->limits.physical_block_size,
509 q->limits.logical_block_size,
510 q->limits.alignment_offset,
511 (unsigned long long) start << SECTOR_SHIFT);
514 * Check if merge fn is supported.
515 * If not we'll force DM to use PAGE_SIZE or
516 * smaller I/O, just to be safe.
519 if (q->merge_bvec_fn && !ti->type->merge)
520 blk_limits_max_hw_sectors(limits,
521 (unsigned int) (PAGE_SIZE >> 9));
522 return 0;
524 EXPORT_SYMBOL_GPL(dm_set_device_limits);
526 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
527 struct dm_dev **result)
529 return __table_get_device(ti->table, ti, path, mode, result);
534 * Decrement a devices use count and remove it if necessary.
536 void dm_put_device(struct dm_target *ti, struct dm_dev *d)
538 struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal,
539 dm_dev);
541 if (atomic_dec_and_test(&dd->count)) {
542 close_dev(dd, ti->table->md);
543 list_del(&dd->list);
544 kfree(dd);
549 * Checks to see if the target joins onto the end of the table.
551 static int adjoin(struct dm_table *table, struct dm_target *ti)
553 struct dm_target *prev;
555 if (!table->num_targets)
556 return !ti->begin;
558 prev = &table->targets[table->num_targets - 1];
559 return (ti->begin == (prev->begin + prev->len));
563 * Used to dynamically allocate the arg array.
565 static char **realloc_argv(unsigned *array_size, char **old_argv)
567 char **argv;
568 unsigned new_size;
570 new_size = *array_size ? *array_size * 2 : 64;
571 argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);
572 if (argv) {
573 memcpy(argv, old_argv, *array_size * sizeof(*argv));
574 *array_size = new_size;
577 kfree(old_argv);
578 return argv;
582 * Destructively splits up the argument list to pass to ctr.
584 int dm_split_args(int *argc, char ***argvp, char *input)
586 char *start, *end = input, *out, **argv = NULL;
587 unsigned array_size = 0;
589 *argc = 0;
591 if (!input) {
592 *argvp = NULL;
593 return 0;
596 argv = realloc_argv(&array_size, argv);
597 if (!argv)
598 return -ENOMEM;
600 while (1) {
601 /* Skip whitespace */
602 start = skip_spaces(end);
604 if (!*start)
605 break; /* success, we hit the end */
607 /* 'out' is used to remove any back-quotes */
608 end = out = start;
609 while (*end) {
610 /* Everything apart from '\0' can be quoted */
611 if (*end == '\\' && *(end + 1)) {
612 *out++ = *(end + 1);
613 end += 2;
614 continue;
617 if (isspace(*end))
618 break; /* end of token */
620 *out++ = *end++;
623 /* have we already filled the array ? */
624 if ((*argc + 1) > array_size) {
625 argv = realloc_argv(&array_size, argv);
626 if (!argv)
627 return -ENOMEM;
630 /* we know this is whitespace */
631 if (*end)
632 end++;
634 /* terminate the string and put it in the array */
635 *out = '\0';
636 argv[*argc] = start;
637 (*argc)++;
640 *argvp = argv;
641 return 0;
645 * Impose necessary and sufficient conditions on a devices's table such
646 * that any incoming bio which respects its logical_block_size can be
647 * processed successfully. If it falls across the boundary between
648 * two or more targets, the size of each piece it gets split into must
649 * be compatible with the logical_block_size of the target processing it.
651 static int validate_hardware_logical_block_alignment(struct dm_table *table,
652 struct queue_limits *limits)
655 * This function uses arithmetic modulo the logical_block_size
656 * (in units of 512-byte sectors).
658 unsigned short device_logical_block_size_sects =
659 limits->logical_block_size >> SECTOR_SHIFT;
662 * Offset of the start of the next table entry, mod logical_block_size.
664 unsigned short next_target_start = 0;
667 * Given an aligned bio that extends beyond the end of a
668 * target, how many sectors must the next target handle?
670 unsigned short remaining = 0;
672 struct dm_target *uninitialized_var(ti);
673 struct queue_limits ti_limits;
674 unsigned i = 0;
677 * Check each entry in the table in turn.
679 while (i < dm_table_get_num_targets(table)) {
680 ti = dm_table_get_target(table, i++);
682 blk_set_default_limits(&ti_limits);
684 /* combine all target devices' limits */
685 if (ti->type->iterate_devices)
686 ti->type->iterate_devices(ti, dm_set_device_limits,
687 &ti_limits);
690 * If the remaining sectors fall entirely within this
691 * table entry are they compatible with its logical_block_size?
693 if (remaining < ti->len &&
694 remaining & ((ti_limits.logical_block_size >>
695 SECTOR_SHIFT) - 1))
696 break; /* Error */
698 next_target_start =
699 (unsigned short) ((next_target_start + ti->len) &
700 (device_logical_block_size_sects - 1));
701 remaining = next_target_start ?
702 device_logical_block_size_sects - next_target_start : 0;
705 if (remaining) {
706 DMWARN("%s: table line %u (start sect %llu len %llu) "
707 "not aligned to h/w logical block size %u",
708 dm_device_name(table->md), i,
709 (unsigned long long) ti->begin,
710 (unsigned long long) ti->len,
711 limits->logical_block_size);
712 return -EINVAL;
715 return 0;
718 int dm_table_add_target(struct dm_table *t, const char *type,
719 sector_t start, sector_t len, char *params)
721 int r = -EINVAL, argc;
722 char **argv;
723 struct dm_target *tgt;
725 if ((r = check_space(t)))
726 return r;
728 tgt = t->targets + t->num_targets;
729 memset(tgt, 0, sizeof(*tgt));
731 if (!len) {
732 DMERR("%s: zero-length target", dm_device_name(t->md));
733 return -EINVAL;
736 tgt->type = dm_get_target_type(type);
737 if (!tgt->type) {
738 DMERR("%s: %s: unknown target type", dm_device_name(t->md),
739 type);
740 return -EINVAL;
743 tgt->table = t;
744 tgt->begin = start;
745 tgt->len = len;
746 tgt->error = "Unknown error";
749 * Does this target adjoin the previous one ?
751 if (!adjoin(t, tgt)) {
752 tgt->error = "Gap in table";
753 r = -EINVAL;
754 goto bad;
757 r = dm_split_args(&argc, &argv, params);
758 if (r) {
759 tgt->error = "couldn't split parameters (insufficient memory)";
760 goto bad;
763 r = tgt->type->ctr(tgt, argc, argv);
764 kfree(argv);
765 if (r)
766 goto bad;
768 t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
770 if (!tgt->num_discard_requests)
771 t->discards_supported = 0;
773 return 0;
775 bad:
776 DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
777 dm_put_target_type(tgt->type);
778 return r;
781 static int dm_table_set_type(struct dm_table *t)
783 unsigned i;
784 unsigned bio_based = 0, request_based = 0;
785 struct dm_target *tgt;
786 struct dm_dev_internal *dd;
787 struct list_head *devices;
789 for (i = 0; i < t->num_targets; i++) {
790 tgt = t->targets + i;
791 if (dm_target_request_based(tgt))
792 request_based = 1;
793 else
794 bio_based = 1;
796 if (bio_based && request_based) {
797 DMWARN("Inconsistent table: different target types"
798 " can't be mixed up");
799 return -EINVAL;
803 if (bio_based) {
804 /* We must use this table as bio-based */
805 t->type = DM_TYPE_BIO_BASED;
806 return 0;
809 BUG_ON(!request_based); /* No targets in this table */
811 /* Non-request-stackable devices can't be used for request-based dm */
812 devices = dm_table_get_devices(t);
813 list_for_each_entry(dd, devices, list) {
814 if (!blk_queue_stackable(bdev_get_queue(dd->dm_dev.bdev))) {
815 DMWARN("table load rejected: including"
816 " non-request-stackable devices");
817 return -EINVAL;
822 * Request-based dm supports only tables that have a single target now.
823 * To support multiple targets, request splitting support is needed,
824 * and that needs lots of changes in the block-layer.
825 * (e.g. request completion process for partial completion.)
827 if (t->num_targets > 1) {
828 DMWARN("Request-based dm doesn't support multiple targets yet");
829 return -EINVAL;
832 t->type = DM_TYPE_REQUEST_BASED;
834 return 0;
837 unsigned dm_table_get_type(struct dm_table *t)
839 return t->type;
842 bool dm_table_request_based(struct dm_table *t)
844 return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED;
847 int dm_table_alloc_md_mempools(struct dm_table *t)
849 unsigned type = dm_table_get_type(t);
851 if (unlikely(type == DM_TYPE_NONE)) {
852 DMWARN("no table type is set, can't allocate mempools");
853 return -EINVAL;
856 t->mempools = dm_alloc_md_mempools(type);
857 if (!t->mempools)
858 return -ENOMEM;
860 return 0;
863 void dm_table_free_md_mempools(struct dm_table *t)
865 dm_free_md_mempools(t->mempools);
866 t->mempools = NULL;
869 struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
871 return t->mempools;
874 static int setup_indexes(struct dm_table *t)
876 int i;
877 unsigned int total = 0;
878 sector_t *indexes;
880 /* allocate the space for *all* the indexes */
881 for (i = t->depth - 2; i >= 0; i--) {
882 t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
883 total += t->counts[i];
886 indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
887 if (!indexes)
888 return -ENOMEM;
890 /* set up internal nodes, bottom-up */
891 for (i = t->depth - 2; i >= 0; i--) {
892 t->index[i] = indexes;
893 indexes += (KEYS_PER_NODE * t->counts[i]);
894 setup_btree_index(i, t);
897 return 0;
901 * Builds the btree to index the map.
903 static int dm_table_build_index(struct dm_table *t)
905 int r = 0;
906 unsigned int leaf_nodes;
908 /* how many indexes will the btree have ? */
909 leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
910 t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
912 /* leaf layer has already been set up */
913 t->counts[t->depth - 1] = leaf_nodes;
914 t->index[t->depth - 1] = t->highs;
916 if (t->depth >= 2)
917 r = setup_indexes(t);
919 return r;
923 * Register the mapped device for blk_integrity support if
924 * the underlying devices support it.
926 static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md)
928 struct list_head *devices = dm_table_get_devices(t);
929 struct dm_dev_internal *dd;
931 list_for_each_entry(dd, devices, list)
932 if (bdev_get_integrity(dd->dm_dev.bdev))
933 return blk_integrity_register(dm_disk(md), NULL);
935 return 0;
939 * Prepares the table for use by building the indices,
940 * setting the type, and allocating mempools.
942 int dm_table_complete(struct dm_table *t)
944 int r;
946 r = dm_table_set_type(t);
947 if (r) {
948 DMERR("unable to set table type");
949 return r;
952 r = dm_table_build_index(t);
953 if (r) {
954 DMERR("unable to build btrees");
955 return r;
958 r = dm_table_prealloc_integrity(t, t->md);
959 if (r) {
960 DMERR("could not register integrity profile.");
961 return r;
964 r = dm_table_alloc_md_mempools(t);
965 if (r)
966 DMERR("unable to allocate mempools");
968 return r;
971 static DEFINE_MUTEX(_event_lock);
972 void dm_table_event_callback(struct dm_table *t,
973 void (*fn)(void *), void *context)
975 mutex_lock(&_event_lock);
976 t->event_fn = fn;
977 t->event_context = context;
978 mutex_unlock(&_event_lock);
981 void dm_table_event(struct dm_table *t)
984 * You can no longer call dm_table_event() from interrupt
985 * context, use a bottom half instead.
987 BUG_ON(in_interrupt());
989 mutex_lock(&_event_lock);
990 if (t->event_fn)
991 t->event_fn(t->event_context);
992 mutex_unlock(&_event_lock);
995 sector_t dm_table_get_size(struct dm_table *t)
997 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1000 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
1002 if (index >= t->num_targets)
1003 return NULL;
1005 return t->targets + index;
1009 * Search the btree for the correct target.
1011 * Caller should check returned pointer with dm_target_is_valid()
1012 * to trap I/O beyond end of device.
1014 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1016 unsigned int l, n = 0, k = 0;
1017 sector_t *node;
1019 for (l = 0; l < t->depth; l++) {
1020 n = get_child(n, k);
1021 node = get_node(t, l, n);
1023 for (k = 0; k < KEYS_PER_NODE; k++)
1024 if (node[k] >= sector)
1025 break;
1028 return &t->targets[(KEYS_PER_NODE * n) + k];
1032 * Establish the new table's queue_limits and validate them.
1034 int dm_calculate_queue_limits(struct dm_table *table,
1035 struct queue_limits *limits)
1037 struct dm_target *uninitialized_var(ti);
1038 struct queue_limits ti_limits;
1039 unsigned i = 0;
1041 blk_set_default_limits(limits);
1043 while (i < dm_table_get_num_targets(table)) {
1044 blk_set_default_limits(&ti_limits);
1046 ti = dm_table_get_target(table, i++);
1048 if (!ti->type->iterate_devices)
1049 goto combine_limits;
1052 * Combine queue limits of all the devices this target uses.
1054 ti->type->iterate_devices(ti, dm_set_device_limits,
1055 &ti_limits);
1057 /* Set I/O hints portion of queue limits */
1058 if (ti->type->io_hints)
1059 ti->type->io_hints(ti, &ti_limits);
1062 * Check each device area is consistent with the target's
1063 * overall queue limits.
1065 if (ti->type->iterate_devices(ti, device_area_is_invalid,
1066 &ti_limits))
1067 return -EINVAL;
1069 combine_limits:
1071 * Merge this target's queue limits into the overall limits
1072 * for the table.
1074 if (blk_stack_limits(limits, &ti_limits, 0) < 0)
1075 DMWARN("%s: adding target device "
1076 "(start sect %llu len %llu) "
1077 "caused an alignment inconsistency",
1078 dm_device_name(table->md),
1079 (unsigned long long) ti->begin,
1080 (unsigned long long) ti->len);
1083 return validate_hardware_logical_block_alignment(table, limits);
1087 * Set the integrity profile for this device if all devices used have
1088 * matching profiles.
1090 static void dm_table_set_integrity(struct dm_table *t)
1092 struct list_head *devices = dm_table_get_devices(t);
1093 struct dm_dev_internal *prev = NULL, *dd = NULL;
1095 if (!blk_get_integrity(dm_disk(t->md)))
1096 return;
1098 list_for_each_entry(dd, devices, list) {
1099 if (prev &&
1100 blk_integrity_compare(prev->dm_dev.bdev->bd_disk,
1101 dd->dm_dev.bdev->bd_disk) < 0) {
1102 DMWARN("%s: integrity not set: %s and %s mismatch",
1103 dm_device_name(t->md),
1104 prev->dm_dev.bdev->bd_disk->disk_name,
1105 dd->dm_dev.bdev->bd_disk->disk_name);
1106 goto no_integrity;
1108 prev = dd;
1111 if (!prev || !bdev_get_integrity(prev->dm_dev.bdev))
1112 goto no_integrity;
1114 blk_integrity_register(dm_disk(t->md),
1115 bdev_get_integrity(prev->dm_dev.bdev));
1117 return;
1119 no_integrity:
1120 blk_integrity_register(dm_disk(t->md), NULL);
1122 return;
1125 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1126 struct queue_limits *limits)
1129 * Copy table's limits to the DM device's request_queue
1131 q->limits = *limits;
1133 if (!dm_table_supports_discards(t))
1134 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
1135 else
1136 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
1138 dm_table_set_integrity(t);
1141 * QUEUE_FLAG_STACKABLE must be set after all queue settings are
1142 * visible to other CPUs because, once the flag is set, incoming bios
1143 * are processed by request-based dm, which refers to the queue
1144 * settings.
1145 * Until the flag set, bios are passed to bio-based dm and queued to
1146 * md->deferred where queue settings are not needed yet.
1147 * Those bios are passed to request-based dm at the resume time.
1149 smp_mb();
1150 if (dm_table_request_based(t))
1151 queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q);
1154 unsigned int dm_table_get_num_targets(struct dm_table *t)
1156 return t->num_targets;
1159 struct list_head *dm_table_get_devices(struct dm_table *t)
1161 return &t->devices;
1164 fmode_t dm_table_get_mode(struct dm_table *t)
1166 return t->mode;
1169 static void suspend_targets(struct dm_table *t, unsigned postsuspend)
1171 int i = t->num_targets;
1172 struct dm_target *ti = t->targets;
1174 while (i--) {
1175 if (postsuspend) {
1176 if (ti->type->postsuspend)
1177 ti->type->postsuspend(ti);
1178 } else if (ti->type->presuspend)
1179 ti->type->presuspend(ti);
1181 ti++;
1185 void dm_table_presuspend_targets(struct dm_table *t)
1187 if (!t)
1188 return;
1190 suspend_targets(t, 0);
1193 void dm_table_postsuspend_targets(struct dm_table *t)
1195 if (!t)
1196 return;
1198 suspend_targets(t, 1);
1201 int dm_table_resume_targets(struct dm_table *t)
1203 int i, r = 0;
1205 for (i = 0; i < t->num_targets; i++) {
1206 struct dm_target *ti = t->targets + i;
1208 if (!ti->type->preresume)
1209 continue;
1211 r = ti->type->preresume(ti);
1212 if (r)
1213 return r;
1216 for (i = 0; i < t->num_targets; i++) {
1217 struct dm_target *ti = t->targets + i;
1219 if (ti->type->resume)
1220 ti->type->resume(ti);
1223 return 0;
1226 int dm_table_any_congested(struct dm_table *t, int bdi_bits)
1228 struct dm_dev_internal *dd;
1229 struct list_head *devices = dm_table_get_devices(t);
1230 int r = 0;
1232 list_for_each_entry(dd, devices, list) {
1233 struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
1234 char b[BDEVNAME_SIZE];
1236 if (likely(q))
1237 r |= bdi_congested(&q->backing_dev_info, bdi_bits);
1238 else
1239 DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
1240 dm_device_name(t->md),
1241 bdevname(dd->dm_dev.bdev, b));
1244 return r;
1247 int dm_table_any_busy_target(struct dm_table *t)
1249 unsigned i;
1250 struct dm_target *ti;
1252 for (i = 0; i < t->num_targets; i++) {
1253 ti = t->targets + i;
1254 if (ti->type->busy && ti->type->busy(ti))
1255 return 1;
1258 return 0;
1261 void dm_table_unplug_all(struct dm_table *t)
1263 struct dm_dev_internal *dd;
1264 struct list_head *devices = dm_table_get_devices(t);
1266 list_for_each_entry(dd, devices, list) {
1267 struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
1268 char b[BDEVNAME_SIZE];
1270 if (likely(q))
1271 blk_unplug(q);
1272 else
1273 DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s",
1274 dm_device_name(t->md),
1275 bdevname(dd->dm_dev.bdev, b));
1279 struct mapped_device *dm_table_get_md(struct dm_table *t)
1281 return t->md;
1284 static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
1285 sector_t start, sector_t len, void *data)
1287 struct request_queue *q = bdev_get_queue(dev->bdev);
1289 return q && blk_queue_discard(q);
1292 bool dm_table_supports_discards(struct dm_table *t)
1294 struct dm_target *ti;
1295 unsigned i = 0;
1297 if (!t->discards_supported)
1298 return 0;
1301 * Ensure that at least one underlying device supports discards.
1302 * t->devices includes internal dm devices such as mirror logs
1303 * so we need to use iterate_devices here, which targets
1304 * supporting discard must provide.
1306 while (i < dm_table_get_num_targets(t)) {
1307 ti = dm_table_get_target(t, i++);
1309 if (ti->type->iterate_devices &&
1310 ti->type->iterate_devices(ti, device_discard_capable, NULL))
1311 return 1;
1314 return 0;
1317 EXPORT_SYMBOL(dm_vcalloc);
1318 EXPORT_SYMBOL(dm_get_device);
1319 EXPORT_SYMBOL(dm_put_device);
1320 EXPORT_SYMBOL(dm_table_event);
1321 EXPORT_SYMBOL(dm_table_get_size);
1322 EXPORT_SYMBOL(dm_table_get_mode);
1323 EXPORT_SYMBOL(dm_table_get_md);
1324 EXPORT_SYMBOL(dm_table_put);
1325 EXPORT_SYMBOL(dm_table_get);
1326 EXPORT_SYMBOL(dm_table_unplug_all);