Xilinx: ARM: adding xylon IP snippet dts file
[linux-2.6-xlnx.git] / drivers / md / dm-table.c
blob451c3bb176d2953fcc8c5da8fccc42f34e83fc74
1 /*
2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
6 */
8 #include "dm.h"
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/blkdev.h>
13 #include <linux/namei.h>
14 #include <linux/ctype.h>
15 #include <linux/string.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/mutex.h>
19 #include <linux/delay.h>
20 #include <asm/atomic.h>
22 #define DM_MSG_PREFIX "table"
24 #define MAX_DEPTH 16
25 #define NODE_SIZE L1_CACHE_BYTES
26 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
27 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
30 * The table has always exactly one reference from either mapped_device->map
31 * or hash_cell->new_map. This reference is not counted in table->holders.
32 * A pair of dm_create_table/dm_destroy_table functions is used for table
33 * creation/destruction.
35 * Temporary references from the other code increase table->holders. A pair
36 * of dm_table_get/dm_table_put functions is used to manipulate it.
38 * When the table is about to be destroyed, we wait for table->holders to
39 * drop to zero.
42 struct dm_table {
43 struct mapped_device *md;
44 atomic_t holders;
45 unsigned type;
47 /* btree table */
48 unsigned int depth;
49 unsigned int counts[MAX_DEPTH]; /* in nodes */
50 sector_t *index[MAX_DEPTH];
52 unsigned int num_targets;
53 unsigned int num_allocated;
54 sector_t *highs;
55 struct dm_target *targets;
57 unsigned discards_supported:1;
58 unsigned integrity_supported:1;
61 * Indicates the rw permissions for the new logical
62 * device. This should be a combination of FMODE_READ
63 * and FMODE_WRITE.
65 fmode_t mode;
67 /* a list of devices used by this table */
68 struct list_head devices;
70 /* events get handed up using this callback */
71 void (*event_fn)(void *);
72 void *event_context;
74 struct dm_md_mempools *mempools;
76 struct list_head target_callbacks;
80 * Similar to ceiling(log_size(n))
82 static unsigned int int_log(unsigned int n, unsigned int base)
84 int result = 0;
86 while (n > 1) {
87 n = dm_div_up(n, base);
88 result++;
91 return result;
95 * Calculate the index of the child node of the n'th node k'th key.
97 static inline unsigned int get_child(unsigned int n, unsigned int k)
99 return (n * CHILDREN_PER_NODE) + k;
103 * Return the n'th node of level l from table t.
105 static inline sector_t *get_node(struct dm_table *t,
106 unsigned int l, unsigned int n)
108 return t->index[l] + (n * KEYS_PER_NODE);
112 * Return the highest key that you could lookup from the n'th
113 * node on level l of the btree.
115 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
117 for (; l < t->depth - 1; l++)
118 n = get_child(n, CHILDREN_PER_NODE - 1);
120 if (n >= t->counts[l])
121 return (sector_t) - 1;
123 return get_node(t, l, n)[KEYS_PER_NODE - 1];
127 * Fills in a level of the btree based on the highs of the level
128 * below it.
130 static int setup_btree_index(unsigned int l, struct dm_table *t)
132 unsigned int n, k;
133 sector_t *node;
135 for (n = 0U; n < t->counts[l]; n++) {
136 node = get_node(t, l, n);
138 for (k = 0U; k < KEYS_PER_NODE; k++)
139 node[k] = high(t, l + 1, get_child(n, k));
142 return 0;
145 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
147 unsigned long size;
148 void *addr;
151 * Check that we're not going to overflow.
153 if (nmemb > (ULONG_MAX / elem_size))
154 return NULL;
156 size = nmemb * elem_size;
157 addr = vmalloc(size);
158 if (addr)
159 memset(addr, 0, size);
161 return addr;
165 * highs, and targets are managed as dynamic arrays during a
166 * table load.
168 static int alloc_targets(struct dm_table *t, unsigned int num)
170 sector_t *n_highs;
171 struct dm_target *n_targets;
172 int n = t->num_targets;
175 * Allocate both the target array and offset array at once.
176 * Append an empty entry to catch sectors beyond the end of
177 * the device.
179 n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) +
180 sizeof(sector_t));
181 if (!n_highs)
182 return -ENOMEM;
184 n_targets = (struct dm_target *) (n_highs + num);
186 if (n) {
187 memcpy(n_highs, t->highs, sizeof(*n_highs) * n);
188 memcpy(n_targets, t->targets, sizeof(*n_targets) * n);
191 memset(n_highs + n, -1, sizeof(*n_highs) * (num - n));
192 vfree(t->highs);
194 t->num_allocated = num;
195 t->highs = n_highs;
196 t->targets = n_targets;
198 return 0;
201 int dm_table_create(struct dm_table **result, fmode_t mode,
202 unsigned num_targets, struct mapped_device *md)
204 struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
206 if (!t)
207 return -ENOMEM;
209 INIT_LIST_HEAD(&t->devices);
210 INIT_LIST_HEAD(&t->target_callbacks);
211 atomic_set(&t->holders, 0);
212 t->discards_supported = 1;
214 if (!num_targets)
215 num_targets = KEYS_PER_NODE;
217 num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
219 if (alloc_targets(t, num_targets)) {
220 kfree(t);
221 t = NULL;
222 return -ENOMEM;
225 t->mode = mode;
226 t->md = md;
227 *result = t;
228 return 0;
231 static void free_devices(struct list_head *devices)
233 struct list_head *tmp, *next;
235 list_for_each_safe(tmp, next, devices) {
236 struct dm_dev_internal *dd =
237 list_entry(tmp, struct dm_dev_internal, list);
238 DMWARN("dm_table_destroy: dm_put_device call missing for %s",
239 dd->dm_dev.name);
240 kfree(dd);
244 void dm_table_destroy(struct dm_table *t)
246 unsigned int i;
248 if (!t)
249 return;
251 while (atomic_read(&t->holders))
252 msleep(1);
253 smp_mb();
255 /* free the indexes */
256 if (t->depth >= 2)
257 vfree(t->index[t->depth - 2]);
259 /* free the targets */
260 for (i = 0; i < t->num_targets; i++) {
261 struct dm_target *tgt = t->targets + i;
263 if (tgt->type->dtr)
264 tgt->type->dtr(tgt);
266 dm_put_target_type(tgt->type);
269 vfree(t->highs);
271 /* free the device list */
272 if (t->devices.next != &t->devices)
273 free_devices(&t->devices);
275 dm_free_md_mempools(t->mempools);
277 kfree(t);
280 void dm_table_get(struct dm_table *t)
282 atomic_inc(&t->holders);
285 void dm_table_put(struct dm_table *t)
287 if (!t)
288 return;
290 smp_mb__before_atomic_dec();
291 atomic_dec(&t->holders);
295 * Checks to see if we need to extend highs or targets.
297 static inline int check_space(struct dm_table *t)
299 if (t->num_targets >= t->num_allocated)
300 return alloc_targets(t, t->num_allocated * 2);
302 return 0;
306 * See if we've already got a device in the list.
308 static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
310 struct dm_dev_internal *dd;
312 list_for_each_entry (dd, l, list)
313 if (dd->dm_dev.bdev->bd_dev == dev)
314 return dd;
316 return NULL;
320 * Open a device so we can use it as a map destination.
322 static int open_dev(struct dm_dev_internal *d, dev_t dev,
323 struct mapped_device *md)
325 static char *_claim_ptr = "I belong to device-mapper";
326 struct block_device *bdev;
328 int r;
330 BUG_ON(d->dm_dev.bdev);
332 bdev = blkdev_get_by_dev(dev, d->dm_dev.mode | FMODE_EXCL, _claim_ptr);
333 if (IS_ERR(bdev))
334 return PTR_ERR(bdev);
336 r = bd_link_disk_holder(bdev, dm_disk(md));
337 if (r) {
338 blkdev_put(bdev, d->dm_dev.mode | FMODE_EXCL);
339 return r;
342 d->dm_dev.bdev = bdev;
343 return 0;
347 * Close a device that we've been using.
349 static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
351 if (!d->dm_dev.bdev)
352 return;
354 bd_unlink_disk_holder(d->dm_dev.bdev, dm_disk(md));
355 blkdev_put(d->dm_dev.bdev, d->dm_dev.mode | FMODE_EXCL);
356 d->dm_dev.bdev = NULL;
360 * If possible, this checks an area of a destination device is invalid.
362 static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
363 sector_t start, sector_t len, void *data)
365 struct request_queue *q;
366 struct queue_limits *limits = data;
367 struct block_device *bdev = dev->bdev;
368 sector_t dev_size =
369 i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
370 unsigned short logical_block_size_sectors =
371 limits->logical_block_size >> SECTOR_SHIFT;
372 char b[BDEVNAME_SIZE];
375 * Some devices exist without request functions,
376 * such as loop devices not yet bound to backing files.
377 * Forbid the use of such devices.
379 q = bdev_get_queue(bdev);
380 if (!q || !q->make_request_fn) {
381 DMWARN("%s: %s is not yet initialised: "
382 "start=%llu, len=%llu, dev_size=%llu",
383 dm_device_name(ti->table->md), bdevname(bdev, b),
384 (unsigned long long)start,
385 (unsigned long long)len,
386 (unsigned long long)dev_size);
387 return 1;
390 if (!dev_size)
391 return 0;
393 if ((start >= dev_size) || (start + len > dev_size)) {
394 DMWARN("%s: %s too small for target: "
395 "start=%llu, len=%llu, dev_size=%llu",
396 dm_device_name(ti->table->md), bdevname(bdev, b),
397 (unsigned long long)start,
398 (unsigned long long)len,
399 (unsigned long long)dev_size);
400 return 1;
403 if (logical_block_size_sectors <= 1)
404 return 0;
406 if (start & (logical_block_size_sectors - 1)) {
407 DMWARN("%s: start=%llu not aligned to h/w "
408 "logical block size %u of %s",
409 dm_device_name(ti->table->md),
410 (unsigned long long)start,
411 limits->logical_block_size, bdevname(bdev, b));
412 return 1;
415 if (len & (logical_block_size_sectors - 1)) {
416 DMWARN("%s: len=%llu not aligned to h/w "
417 "logical block size %u of %s",
418 dm_device_name(ti->table->md),
419 (unsigned long long)len,
420 limits->logical_block_size, bdevname(bdev, b));
421 return 1;
424 return 0;
428 * This upgrades the mode on an already open dm_dev, being
429 * careful to leave things as they were if we fail to reopen the
430 * device and not to touch the existing bdev field in case
431 * it is accessed concurrently inside dm_table_any_congested().
433 static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
434 struct mapped_device *md)
436 int r;
437 struct dm_dev_internal dd_new, dd_old;
439 dd_new = dd_old = *dd;
441 dd_new.dm_dev.mode |= new_mode;
442 dd_new.dm_dev.bdev = NULL;
444 r = open_dev(&dd_new, dd->dm_dev.bdev->bd_dev, md);
445 if (r)
446 return r;
448 dd->dm_dev.mode |= new_mode;
449 close_dev(&dd_old, md);
451 return 0;
455 * Add a device to the list, or just increment the usage count if
456 * it's already present.
458 static int __table_get_device(struct dm_table *t, struct dm_target *ti,
459 const char *path, fmode_t mode, struct dm_dev **result)
461 int r;
462 dev_t uninitialized_var(dev);
463 struct dm_dev_internal *dd;
464 unsigned int major, minor;
466 BUG_ON(!t);
468 if (sscanf(path, "%u:%u", &major, &minor) == 2) {
469 /* Extract the major/minor numbers */
470 dev = MKDEV(major, minor);
471 if (MAJOR(dev) != major || MINOR(dev) != minor)
472 return -EOVERFLOW;
473 } else {
474 /* convert the path to a device */
475 struct block_device *bdev = lookup_bdev(path);
477 if (IS_ERR(bdev))
478 return PTR_ERR(bdev);
479 dev = bdev->bd_dev;
480 bdput(bdev);
483 dd = find_device(&t->devices, dev);
484 if (!dd) {
485 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
486 if (!dd)
487 return -ENOMEM;
489 dd->dm_dev.mode = mode;
490 dd->dm_dev.bdev = NULL;
492 if ((r = open_dev(dd, dev, t->md))) {
493 kfree(dd);
494 return r;
497 format_dev_t(dd->dm_dev.name, dev);
499 atomic_set(&dd->count, 0);
500 list_add(&dd->list, &t->devices);
502 } else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) {
503 r = upgrade_mode(dd, mode, t->md);
504 if (r)
505 return r;
507 atomic_inc(&dd->count);
509 *result = &dd->dm_dev;
510 return 0;
513 int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
514 sector_t start, sector_t len, void *data)
516 struct queue_limits *limits = data;
517 struct block_device *bdev = dev->bdev;
518 struct request_queue *q = bdev_get_queue(bdev);
519 char b[BDEVNAME_SIZE];
521 if (unlikely(!q)) {
522 DMWARN("%s: Cannot set limits for nonexistent device %s",
523 dm_device_name(ti->table->md), bdevname(bdev, b));
524 return 0;
527 if (bdev_stack_limits(limits, bdev, start) < 0)
528 DMWARN("%s: adding target device %s caused an alignment inconsistency: "
529 "physical_block_size=%u, logical_block_size=%u, "
530 "alignment_offset=%u, start=%llu",
531 dm_device_name(ti->table->md), bdevname(bdev, b),
532 q->limits.physical_block_size,
533 q->limits.logical_block_size,
534 q->limits.alignment_offset,
535 (unsigned long long) start << SECTOR_SHIFT);
538 * Check if merge fn is supported.
539 * If not we'll force DM to use PAGE_SIZE or
540 * smaller I/O, just to be safe.
543 if (q->merge_bvec_fn && !ti->type->merge)
544 blk_limits_max_hw_sectors(limits,
545 (unsigned int) (PAGE_SIZE >> 9));
546 return 0;
548 EXPORT_SYMBOL_GPL(dm_set_device_limits);
550 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
551 struct dm_dev **result)
553 return __table_get_device(ti->table, ti, path, mode, result);
558 * Decrement a devices use count and remove it if necessary.
560 void dm_put_device(struct dm_target *ti, struct dm_dev *d)
562 struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal,
563 dm_dev);
565 if (atomic_dec_and_test(&dd->count)) {
566 close_dev(dd, ti->table->md);
567 list_del(&dd->list);
568 kfree(dd);
573 * Checks to see if the target joins onto the end of the table.
575 static int adjoin(struct dm_table *table, struct dm_target *ti)
577 struct dm_target *prev;
579 if (!table->num_targets)
580 return !ti->begin;
582 prev = &table->targets[table->num_targets - 1];
583 return (ti->begin == (prev->begin + prev->len));
587 * Used to dynamically allocate the arg array.
589 static char **realloc_argv(unsigned *array_size, char **old_argv)
591 char **argv;
592 unsigned new_size;
594 new_size = *array_size ? *array_size * 2 : 64;
595 argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);
596 if (argv) {
597 memcpy(argv, old_argv, *array_size * sizeof(*argv));
598 *array_size = new_size;
601 kfree(old_argv);
602 return argv;
606 * Destructively splits up the argument list to pass to ctr.
608 int dm_split_args(int *argc, char ***argvp, char *input)
610 char *start, *end = input, *out, **argv = NULL;
611 unsigned array_size = 0;
613 *argc = 0;
615 if (!input) {
616 *argvp = NULL;
617 return 0;
620 argv = realloc_argv(&array_size, argv);
621 if (!argv)
622 return -ENOMEM;
624 while (1) {
625 /* Skip whitespace */
626 start = skip_spaces(end);
628 if (!*start)
629 break; /* success, we hit the end */
631 /* 'out' is used to remove any back-quotes */
632 end = out = start;
633 while (*end) {
634 /* Everything apart from '\0' can be quoted */
635 if (*end == '\\' && *(end + 1)) {
636 *out++ = *(end + 1);
637 end += 2;
638 continue;
641 if (isspace(*end))
642 break; /* end of token */
644 *out++ = *end++;
647 /* have we already filled the array ? */
648 if ((*argc + 1) > array_size) {
649 argv = realloc_argv(&array_size, argv);
650 if (!argv)
651 return -ENOMEM;
654 /* we know this is whitespace */
655 if (*end)
656 end++;
658 /* terminate the string and put it in the array */
659 *out = '\0';
660 argv[*argc] = start;
661 (*argc)++;
664 *argvp = argv;
665 return 0;
669 * Impose necessary and sufficient conditions on a devices's table such
670 * that any incoming bio which respects its logical_block_size can be
671 * processed successfully. If it falls across the boundary between
672 * two or more targets, the size of each piece it gets split into must
673 * be compatible with the logical_block_size of the target processing it.
675 static int validate_hardware_logical_block_alignment(struct dm_table *table,
676 struct queue_limits *limits)
679 * This function uses arithmetic modulo the logical_block_size
680 * (in units of 512-byte sectors).
682 unsigned short device_logical_block_size_sects =
683 limits->logical_block_size >> SECTOR_SHIFT;
686 * Offset of the start of the next table entry, mod logical_block_size.
688 unsigned short next_target_start = 0;
691 * Given an aligned bio that extends beyond the end of a
692 * target, how many sectors must the next target handle?
694 unsigned short remaining = 0;
696 struct dm_target *uninitialized_var(ti);
697 struct queue_limits ti_limits;
698 unsigned i = 0;
701 * Check each entry in the table in turn.
703 while (i < dm_table_get_num_targets(table)) {
704 ti = dm_table_get_target(table, i++);
706 blk_set_default_limits(&ti_limits);
708 /* combine all target devices' limits */
709 if (ti->type->iterate_devices)
710 ti->type->iterate_devices(ti, dm_set_device_limits,
711 &ti_limits);
714 * If the remaining sectors fall entirely within this
715 * table entry are they compatible with its logical_block_size?
717 if (remaining < ti->len &&
718 remaining & ((ti_limits.logical_block_size >>
719 SECTOR_SHIFT) - 1))
720 break; /* Error */
722 next_target_start =
723 (unsigned short) ((next_target_start + ti->len) &
724 (device_logical_block_size_sects - 1));
725 remaining = next_target_start ?
726 device_logical_block_size_sects - next_target_start : 0;
729 if (remaining) {
730 DMWARN("%s: table line %u (start sect %llu len %llu) "
731 "not aligned to h/w logical block size %u",
732 dm_device_name(table->md), i,
733 (unsigned long long) ti->begin,
734 (unsigned long long) ti->len,
735 limits->logical_block_size);
736 return -EINVAL;
739 return 0;
742 int dm_table_add_target(struct dm_table *t, const char *type,
743 sector_t start, sector_t len, char *params)
745 int r = -EINVAL, argc;
746 char **argv;
747 struct dm_target *tgt;
749 if ((r = check_space(t)))
750 return r;
752 tgt = t->targets + t->num_targets;
753 memset(tgt, 0, sizeof(*tgt));
755 if (!len) {
756 DMERR("%s: zero-length target", dm_device_name(t->md));
757 return -EINVAL;
760 tgt->type = dm_get_target_type(type);
761 if (!tgt->type) {
762 DMERR("%s: %s: unknown target type", dm_device_name(t->md),
763 type);
764 return -EINVAL;
767 tgt->table = t;
768 tgt->begin = start;
769 tgt->len = len;
770 tgt->error = "Unknown error";
773 * Does this target adjoin the previous one ?
775 if (!adjoin(t, tgt)) {
776 tgt->error = "Gap in table";
777 r = -EINVAL;
778 goto bad;
781 r = dm_split_args(&argc, &argv, params);
782 if (r) {
783 tgt->error = "couldn't split parameters (insufficient memory)";
784 goto bad;
787 r = tgt->type->ctr(tgt, argc, argv);
788 kfree(argv);
789 if (r)
790 goto bad;
792 t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
794 if (!tgt->num_discard_requests)
795 t->discards_supported = 0;
797 return 0;
799 bad:
800 DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
801 dm_put_target_type(tgt->type);
802 return r;
805 static int dm_table_set_type(struct dm_table *t)
807 unsigned i;
808 unsigned bio_based = 0, request_based = 0;
809 struct dm_target *tgt;
810 struct dm_dev_internal *dd;
811 struct list_head *devices;
813 for (i = 0; i < t->num_targets; i++) {
814 tgt = t->targets + i;
815 if (dm_target_request_based(tgt))
816 request_based = 1;
817 else
818 bio_based = 1;
820 if (bio_based && request_based) {
821 DMWARN("Inconsistent table: different target types"
822 " can't be mixed up");
823 return -EINVAL;
827 if (bio_based) {
828 /* We must use this table as bio-based */
829 t->type = DM_TYPE_BIO_BASED;
830 return 0;
833 BUG_ON(!request_based); /* No targets in this table */
835 /* Non-request-stackable devices can't be used for request-based dm */
836 devices = dm_table_get_devices(t);
837 list_for_each_entry(dd, devices, list) {
838 if (!blk_queue_stackable(bdev_get_queue(dd->dm_dev.bdev))) {
839 DMWARN("table load rejected: including"
840 " non-request-stackable devices");
841 return -EINVAL;
846 * Request-based dm supports only tables that have a single target now.
847 * To support multiple targets, request splitting support is needed,
848 * and that needs lots of changes in the block-layer.
849 * (e.g. request completion process for partial completion.)
851 if (t->num_targets > 1) {
852 DMWARN("Request-based dm doesn't support multiple targets yet");
853 return -EINVAL;
856 t->type = DM_TYPE_REQUEST_BASED;
858 return 0;
861 unsigned dm_table_get_type(struct dm_table *t)
863 return t->type;
866 bool dm_table_request_based(struct dm_table *t)
868 return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED;
871 int dm_table_alloc_md_mempools(struct dm_table *t)
873 unsigned type = dm_table_get_type(t);
875 if (unlikely(type == DM_TYPE_NONE)) {
876 DMWARN("no table type is set, can't allocate mempools");
877 return -EINVAL;
880 t->mempools = dm_alloc_md_mempools(type, t->integrity_supported);
881 if (!t->mempools)
882 return -ENOMEM;
884 return 0;
887 void dm_table_free_md_mempools(struct dm_table *t)
889 dm_free_md_mempools(t->mempools);
890 t->mempools = NULL;
893 struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
895 return t->mempools;
898 static int setup_indexes(struct dm_table *t)
900 int i;
901 unsigned int total = 0;
902 sector_t *indexes;
904 /* allocate the space for *all* the indexes */
905 for (i = t->depth - 2; i >= 0; i--) {
906 t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
907 total += t->counts[i];
910 indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
911 if (!indexes)
912 return -ENOMEM;
914 /* set up internal nodes, bottom-up */
915 for (i = t->depth - 2; i >= 0; i--) {
916 t->index[i] = indexes;
917 indexes += (KEYS_PER_NODE * t->counts[i]);
918 setup_btree_index(i, t);
921 return 0;
925 * Builds the btree to index the map.
927 static int dm_table_build_index(struct dm_table *t)
929 int r = 0;
930 unsigned int leaf_nodes;
932 /* how many indexes will the btree have ? */
933 leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
934 t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
936 /* leaf layer has already been set up */
937 t->counts[t->depth - 1] = leaf_nodes;
938 t->index[t->depth - 1] = t->highs;
940 if (t->depth >= 2)
941 r = setup_indexes(t);
943 return r;
947 * Get a disk whose integrity profile reflects the table's profile.
948 * If %match_all is true, all devices' profiles must match.
949 * If %match_all is false, all devices must at least have an
950 * allocated integrity profile; but uninitialized is ok.
951 * Returns NULL if integrity support was inconsistent or unavailable.
953 static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t,
954 bool match_all)
956 struct list_head *devices = dm_table_get_devices(t);
957 struct dm_dev_internal *dd = NULL;
958 struct gendisk *prev_disk = NULL, *template_disk = NULL;
960 list_for_each_entry(dd, devices, list) {
961 template_disk = dd->dm_dev.bdev->bd_disk;
962 if (!blk_get_integrity(template_disk))
963 goto no_integrity;
964 if (!match_all && !blk_integrity_is_initialized(template_disk))
965 continue; /* skip uninitialized profiles */
966 else if (prev_disk &&
967 blk_integrity_compare(prev_disk, template_disk) < 0)
968 goto no_integrity;
969 prev_disk = template_disk;
972 return template_disk;
974 no_integrity:
975 if (prev_disk)
976 DMWARN("%s: integrity not set: %s and %s profile mismatch",
977 dm_device_name(t->md),
978 prev_disk->disk_name,
979 template_disk->disk_name);
980 return NULL;
984 * Register the mapped device for blk_integrity support if
985 * the underlying devices have an integrity profile. But all devices
986 * may not have matching profiles (checking all devices isn't reliable
987 * during table load because this table may use other DM device(s) which
988 * must be resumed before they will have an initialized integity profile).
989 * Stacked DM devices force a 2 stage integrity profile validation:
990 * 1 - during load, validate all initialized integrity profiles match
991 * 2 - during resume, validate all integrity profiles match
993 static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md)
995 struct gendisk *template_disk = NULL;
997 template_disk = dm_table_get_integrity_disk(t, false);
998 if (!template_disk)
999 return 0;
1001 if (!blk_integrity_is_initialized(dm_disk(md))) {
1002 t->integrity_supported = 1;
1003 return blk_integrity_register(dm_disk(md), NULL);
1007 * If DM device already has an initalized integrity
1008 * profile the new profile should not conflict.
1010 if (blk_integrity_is_initialized(template_disk) &&
1011 blk_integrity_compare(dm_disk(md), template_disk) < 0) {
1012 DMWARN("%s: conflict with existing integrity profile: "
1013 "%s profile mismatch",
1014 dm_device_name(t->md),
1015 template_disk->disk_name);
1016 return 1;
1019 /* Preserve existing initialized integrity profile */
1020 t->integrity_supported = 1;
1021 return 0;
1025 * Prepares the table for use by building the indices,
1026 * setting the type, and allocating mempools.
1028 int dm_table_complete(struct dm_table *t)
1030 int r;
1032 r = dm_table_set_type(t);
1033 if (r) {
1034 DMERR("unable to set table type");
1035 return r;
1038 r = dm_table_build_index(t);
1039 if (r) {
1040 DMERR("unable to build btrees");
1041 return r;
1044 r = dm_table_prealloc_integrity(t, t->md);
1045 if (r) {
1046 DMERR("could not register integrity profile.");
1047 return r;
1050 r = dm_table_alloc_md_mempools(t);
1051 if (r)
1052 DMERR("unable to allocate mempools");
1054 return r;
1057 static DEFINE_MUTEX(_event_lock);
1058 void dm_table_event_callback(struct dm_table *t,
1059 void (*fn)(void *), void *context)
1061 mutex_lock(&_event_lock);
1062 t->event_fn = fn;
1063 t->event_context = context;
1064 mutex_unlock(&_event_lock);
1067 void dm_table_event(struct dm_table *t)
1070 * You can no longer call dm_table_event() from interrupt
1071 * context, use a bottom half instead.
1073 BUG_ON(in_interrupt());
1075 mutex_lock(&_event_lock);
1076 if (t->event_fn)
1077 t->event_fn(t->event_context);
1078 mutex_unlock(&_event_lock);
1081 sector_t dm_table_get_size(struct dm_table *t)
1083 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1086 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
1088 if (index >= t->num_targets)
1089 return NULL;
1091 return t->targets + index;
1095 * Search the btree for the correct target.
1097 * Caller should check returned pointer with dm_target_is_valid()
1098 * to trap I/O beyond end of device.
1100 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1102 unsigned int l, n = 0, k = 0;
1103 sector_t *node;
1105 for (l = 0; l < t->depth; l++) {
1106 n = get_child(n, k);
1107 node = get_node(t, l, n);
1109 for (k = 0; k < KEYS_PER_NODE; k++)
1110 if (node[k] >= sector)
1111 break;
1114 return &t->targets[(KEYS_PER_NODE * n) + k];
1118 * Establish the new table's queue_limits and validate them.
1120 int dm_calculate_queue_limits(struct dm_table *table,
1121 struct queue_limits *limits)
1123 struct dm_target *uninitialized_var(ti);
1124 struct queue_limits ti_limits;
1125 unsigned i = 0;
1127 blk_set_default_limits(limits);
1129 while (i < dm_table_get_num_targets(table)) {
1130 blk_set_default_limits(&ti_limits);
1132 ti = dm_table_get_target(table, i++);
1134 if (!ti->type->iterate_devices)
1135 goto combine_limits;
1138 * Combine queue limits of all the devices this target uses.
1140 ti->type->iterate_devices(ti, dm_set_device_limits,
1141 &ti_limits);
1143 /* Set I/O hints portion of queue limits */
1144 if (ti->type->io_hints)
1145 ti->type->io_hints(ti, &ti_limits);
1148 * Check each device area is consistent with the target's
1149 * overall queue limits.
1151 if (ti->type->iterate_devices(ti, device_area_is_invalid,
1152 &ti_limits))
1153 return -EINVAL;
1155 combine_limits:
1157 * Merge this target's queue limits into the overall limits
1158 * for the table.
1160 if (blk_stack_limits(limits, &ti_limits, 0) < 0)
1161 DMWARN("%s: adding target device "
1162 "(start sect %llu len %llu) "
1163 "caused an alignment inconsistency",
1164 dm_device_name(table->md),
1165 (unsigned long long) ti->begin,
1166 (unsigned long long) ti->len);
1169 return validate_hardware_logical_block_alignment(table, limits);
1173 * Set the integrity profile for this device if all devices used have
1174 * matching profiles. We're quite deep in the resume path but still
1175 * don't know if all devices (particularly DM devices this device
1176 * may be stacked on) have matching profiles. Even if the profiles
1177 * don't match we have no way to fail (to resume) at this point.
1179 static void dm_table_set_integrity(struct dm_table *t)
1181 struct gendisk *template_disk = NULL;
1183 if (!blk_get_integrity(dm_disk(t->md)))
1184 return;
1186 template_disk = dm_table_get_integrity_disk(t, true);
1187 if (!template_disk &&
1188 blk_integrity_is_initialized(dm_disk(t->md))) {
1189 DMWARN("%s: device no longer has a valid integrity profile",
1190 dm_device_name(t->md));
1191 return;
1193 blk_integrity_register(dm_disk(t->md),
1194 blk_get_integrity(template_disk));
1197 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1198 struct queue_limits *limits)
1201 * Copy table's limits to the DM device's request_queue
1203 q->limits = *limits;
1205 if (!dm_table_supports_discards(t))
1206 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
1207 else
1208 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
1210 dm_table_set_integrity(t);
1213 * QUEUE_FLAG_STACKABLE must be set after all queue settings are
1214 * visible to other CPUs because, once the flag is set, incoming bios
1215 * are processed by request-based dm, which refers to the queue
1216 * settings.
1217 * Until the flag set, bios are passed to bio-based dm and queued to
1218 * md->deferred where queue settings are not needed yet.
1219 * Those bios are passed to request-based dm at the resume time.
1221 smp_mb();
1222 if (dm_table_request_based(t))
1223 queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q);
1226 unsigned int dm_table_get_num_targets(struct dm_table *t)
1228 return t->num_targets;
1231 struct list_head *dm_table_get_devices(struct dm_table *t)
1233 return &t->devices;
1236 fmode_t dm_table_get_mode(struct dm_table *t)
1238 return t->mode;
1241 static void suspend_targets(struct dm_table *t, unsigned postsuspend)
1243 int i = t->num_targets;
1244 struct dm_target *ti = t->targets;
1246 while (i--) {
1247 if (postsuspend) {
1248 if (ti->type->postsuspend)
1249 ti->type->postsuspend(ti);
1250 } else if (ti->type->presuspend)
1251 ti->type->presuspend(ti);
1253 ti++;
1257 void dm_table_presuspend_targets(struct dm_table *t)
1259 if (!t)
1260 return;
1262 suspend_targets(t, 0);
1265 void dm_table_postsuspend_targets(struct dm_table *t)
1267 if (!t)
1268 return;
1270 suspend_targets(t, 1);
1273 int dm_table_resume_targets(struct dm_table *t)
1275 int i, r = 0;
1277 for (i = 0; i < t->num_targets; i++) {
1278 struct dm_target *ti = t->targets + i;
1280 if (!ti->type->preresume)
1281 continue;
1283 r = ti->type->preresume(ti);
1284 if (r)
1285 return r;
1288 for (i = 0; i < t->num_targets; i++) {
1289 struct dm_target *ti = t->targets + i;
1291 if (ti->type->resume)
1292 ti->type->resume(ti);
1295 return 0;
1298 void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb)
1300 list_add(&cb->list, &t->target_callbacks);
1302 EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks);
1304 int dm_table_any_congested(struct dm_table *t, int bdi_bits)
1306 struct dm_dev_internal *dd;
1307 struct list_head *devices = dm_table_get_devices(t);
1308 struct dm_target_callbacks *cb;
1309 int r = 0;
1311 list_for_each_entry(dd, devices, list) {
1312 struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
1313 char b[BDEVNAME_SIZE];
1315 if (likely(q))
1316 r |= bdi_congested(&q->backing_dev_info, bdi_bits);
1317 else
1318 DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
1319 dm_device_name(t->md),
1320 bdevname(dd->dm_dev.bdev, b));
1323 list_for_each_entry(cb, &t->target_callbacks, list)
1324 if (cb->congested_fn)
1325 r |= cb->congested_fn(cb, bdi_bits);
1327 return r;
1330 int dm_table_any_busy_target(struct dm_table *t)
1332 unsigned i;
1333 struct dm_target *ti;
1335 for (i = 0; i < t->num_targets; i++) {
1336 ti = t->targets + i;
1337 if (ti->type->busy && ti->type->busy(ti))
1338 return 1;
1341 return 0;
1344 struct mapped_device *dm_table_get_md(struct dm_table *t)
1346 return t->md;
1349 static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
1350 sector_t start, sector_t len, void *data)
1352 struct request_queue *q = bdev_get_queue(dev->bdev);
1354 return q && blk_queue_discard(q);
1357 bool dm_table_supports_discards(struct dm_table *t)
1359 struct dm_target *ti;
1360 unsigned i = 0;
1362 if (!t->discards_supported)
1363 return 0;
1366 * Unless any target used by the table set discards_supported,
1367 * require at least one underlying device to support discards.
1368 * t->devices includes internal dm devices such as mirror logs
1369 * so we need to use iterate_devices here, which targets
1370 * supporting discard must provide.
1372 while (i < dm_table_get_num_targets(t)) {
1373 ti = dm_table_get_target(t, i++);
1375 if (ti->discards_supported)
1376 return 1;
1378 if (ti->type->iterate_devices &&
1379 ti->type->iterate_devices(ti, device_discard_capable, NULL))
1380 return 1;
1383 return 0;
1386 EXPORT_SYMBOL(dm_vcalloc);
1387 EXPORT_SYMBOL(dm_get_device);
1388 EXPORT_SYMBOL(dm_put_device);
1389 EXPORT_SYMBOL(dm_table_event);
1390 EXPORT_SYMBOL(dm_table_get_size);
1391 EXPORT_SYMBOL(dm_table_get_mode);
1392 EXPORT_SYMBOL(dm_table_get_md);
1393 EXPORT_SYMBOL(dm_table_put);
1394 EXPORT_SYMBOL(dm_table_get);