dm: fix to_sector() for 32bit
[linux-stable.git] / include / linux / device-mapper.h
blob52e8709c6df08fe3651d5b9eef14e8d30e930e60
1 /*
2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the LGPL.
6 */
8 #ifndef _LINUX_DEVICE_MAPPER_H
9 #define _LINUX_DEVICE_MAPPER_H
11 #include <linux/bio.h>
12 #include <linux/blkdev.h>
13 #include <linux/math64.h>
14 #include <linux/ratelimit.h>
16 struct dm_dev;
17 struct dm_target;
18 struct dm_table;
19 struct mapped_device;
20 struct bio_vec;
23 * Type of table, mapped_device's mempool and request_queue
25 enum dm_queue_mode {
26 DM_TYPE_NONE = 0,
27 DM_TYPE_BIO_BASED = 1,
28 DM_TYPE_REQUEST_BASED = 2,
29 DM_TYPE_DAX_BIO_BASED = 3,
30 DM_TYPE_NVME_BIO_BASED = 4,
33 typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
35 union map_info {
36 void *ptr;
40 * In the constructor the target parameter will already have the
41 * table, type, begin and len fields filled in.
43 typedef int (*dm_ctr_fn) (struct dm_target *target,
44 unsigned int argc, char **argv);
47 * The destructor doesn't need to free the dm_target, just
48 * anything hidden ti->private.
50 typedef void (*dm_dtr_fn) (struct dm_target *ti);
53 * The map function must return:
54 * < 0: error
55 * = 0: The target will handle the io by resubmitting it later
56 * = 1: simple remap complete
57 * = 2: The target wants to push back the io
59 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
60 typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
61 struct request *rq,
62 union map_info *map_context,
63 struct request **clone);
64 typedef void (*dm_release_clone_request_fn) (struct request *clone);
67 * Returns:
68 * < 0 : error (currently ignored)
69 * 0 : ended successfully
70 * 1 : for some reason the io has still not completed (eg,
71 * multipath target might want to requeue a failed io).
72 * 2 : The target wants to push back the io
74 typedef int (*dm_endio_fn) (struct dm_target *ti,
75 struct bio *bio, blk_status_t *error);
76 typedef int (*dm_request_endio_fn) (struct dm_target *ti,
77 struct request *clone, blk_status_t error,
78 union map_info *map_context);
80 typedef void (*dm_presuspend_fn) (struct dm_target *ti);
81 typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti);
82 typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
83 typedef int (*dm_preresume_fn) (struct dm_target *ti);
84 typedef void (*dm_resume_fn) (struct dm_target *ti);
86 typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
87 unsigned status_flags, char *result, unsigned maxlen);
89 typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv,
90 char *result, unsigned maxlen);
92 typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
94 typedef int (*dm_report_zones_fn) (struct dm_target *ti, sector_t sector,
95 struct blk_zone *zones,
96 unsigned int *nr_zones,
97 gfp_t gfp_mask);
100 * These iteration functions are typically used to check (and combine)
101 * properties of underlying devices.
102 * E.g. Does at least one underlying device support flush?
103 * Does any underlying device not support WRITE_SAME?
105 * The callout function is called once for each contiguous section of
106 * an underlying device. State can be maintained in *data.
107 * Return non-zero to stop iterating through any further devices.
109 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
110 struct dm_dev *dev,
111 sector_t start, sector_t len,
112 void *data);
115 * This function must iterate through each section of device used by the
116 * target until it encounters a non-zero return code, which it then returns.
117 * Returns zero if no callout returned non-zero.
119 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
120 iterate_devices_callout_fn fn,
121 void *data);
123 typedef void (*dm_io_hints_fn) (struct dm_target *ti,
124 struct queue_limits *limits);
127 * Returns:
128 * 0: The target can handle the next I/O immediately.
129 * 1: The target can't handle the next I/O immediately.
131 typedef int (*dm_busy_fn) (struct dm_target *ti);
134 * Returns:
135 * < 0 : error
136 * >= 0 : the number of bytes accessible at the address
138 typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
139 long nr_pages, void **kaddr, pfn_t *pfn);
140 typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
141 void *addr, size_t bytes, struct iov_iter *i);
142 #define PAGE_SECTORS (PAGE_SIZE / 512)
144 void dm_error(const char *message);
146 struct dm_dev {
147 struct block_device *bdev;
148 struct dax_device *dax_dev;
149 fmode_t mode;
150 char name[16];
153 dev_t dm_get_dev_t(const char *path);
156 * Constructors should call these functions to ensure destination devices
157 * are opened/closed correctly.
159 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
160 struct dm_dev **result);
161 void dm_put_device(struct dm_target *ti, struct dm_dev *d);
164 * Information about a target type
167 struct target_type {
168 uint64_t features;
169 const char *name;
170 struct module *module;
171 unsigned version[3];
172 dm_ctr_fn ctr;
173 dm_dtr_fn dtr;
174 dm_map_fn map;
175 dm_clone_and_map_request_fn clone_and_map_rq;
176 dm_release_clone_request_fn release_clone_rq;
177 dm_endio_fn end_io;
178 dm_request_endio_fn rq_end_io;
179 dm_presuspend_fn presuspend;
180 dm_presuspend_undo_fn presuspend_undo;
181 dm_postsuspend_fn postsuspend;
182 dm_preresume_fn preresume;
183 dm_resume_fn resume;
184 dm_status_fn status;
185 dm_message_fn message;
186 dm_prepare_ioctl_fn prepare_ioctl;
187 #ifdef CONFIG_BLK_DEV_ZONED
188 dm_report_zones_fn report_zones;
189 #endif
190 dm_busy_fn busy;
191 dm_iterate_devices_fn iterate_devices;
192 dm_io_hints_fn io_hints;
193 dm_dax_direct_access_fn direct_access;
194 dm_dax_copy_iter_fn dax_copy_from_iter;
195 dm_dax_copy_iter_fn dax_copy_to_iter;
197 /* For internal device-mapper use. */
198 struct list_head list;
202 * Target features
206 * Any table that contains an instance of this target must have only one.
208 #define DM_TARGET_SINGLETON 0x00000001
209 #define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON)
212 * Indicates that a target does not support read-only devices.
214 #define DM_TARGET_ALWAYS_WRITEABLE 0x00000002
215 #define dm_target_always_writeable(type) \
216 ((type)->features & DM_TARGET_ALWAYS_WRITEABLE)
219 * Any device that contains a table with an instance of this target may never
220 * have tables containing any different target type.
222 #define DM_TARGET_IMMUTABLE 0x00000004
223 #define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE)
226 * Indicates that a target may replace any target; even immutable targets.
227 * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined.
229 #define DM_TARGET_WILDCARD 0x00000008
230 #define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD)
233 * A target implements own bio data integrity.
235 #define DM_TARGET_INTEGRITY 0x00000010
236 #define dm_target_has_integrity(type) ((type)->features & DM_TARGET_INTEGRITY)
239 * A target passes integrity data to the lower device.
241 #define DM_TARGET_PASSES_INTEGRITY 0x00000020
242 #define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
245 * Indicates that a target supports host-managed zoned block devices.
247 #define DM_TARGET_ZONED_HM 0x00000040
248 #define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM)
250 struct dm_target {
251 struct dm_table *table;
252 struct target_type *type;
254 /* target limits */
255 sector_t begin;
256 sector_t len;
258 /* If non-zero, maximum size of I/O submitted to a target. */
259 uint32_t max_io_len;
262 * A number of zero-length barrier bios that will be submitted
263 * to the target for the purpose of flushing cache.
265 * The bio number can be accessed with dm_bio_get_target_bio_nr.
266 * It is a responsibility of the target driver to remap these bios
267 * to the real underlying devices.
269 unsigned num_flush_bios;
272 * The number of discard bios that will be submitted to the target.
273 * The bio number can be accessed with dm_bio_get_target_bio_nr.
275 unsigned num_discard_bios;
278 * The number of secure erase bios that will be submitted to the target.
279 * The bio number can be accessed with dm_bio_get_target_bio_nr.
281 unsigned num_secure_erase_bios;
284 * The number of WRITE SAME bios that will be submitted to the target.
285 * The bio number can be accessed with dm_bio_get_target_bio_nr.
287 unsigned num_write_same_bios;
290 * The number of WRITE ZEROES bios that will be submitted to the target.
291 * The bio number can be accessed with dm_bio_get_target_bio_nr.
293 unsigned num_write_zeroes_bios;
296 * The minimum number of extra bytes allocated in each io for the
297 * target to use.
299 unsigned per_io_data_size;
301 /* target specific data */
302 void *private;
304 /* Used to provide an error string from the ctr */
305 char *error;
308 * Set if this target needs to receive flushes regardless of
309 * whether or not its underlying devices have support.
311 bool flush_supported:1;
314 * Set if this target needs to receive discards regardless of
315 * whether or not its underlying devices have support.
317 bool discards_supported:1;
320 /* Each target can link one of these into the table */
321 struct dm_target_callbacks {
322 struct list_head list;
323 int (*congested_fn) (struct dm_target_callbacks *, int);
326 void *dm_per_bio_data(struct bio *bio, size_t data_size);
327 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
328 unsigned dm_bio_get_target_bio_nr(const struct bio *bio);
330 int dm_register_target(struct target_type *t);
331 void dm_unregister_target(struct target_type *t);
334 * Target argument parsing.
336 struct dm_arg_set {
337 unsigned argc;
338 char **argv;
342 * The minimum and maximum value of a numeric argument, together with
343 * the error message to use if the number is found to be outside that range.
345 struct dm_arg {
346 unsigned min;
347 unsigned max;
348 char *error;
352 * Validate the next argument, either returning it as *value or, if invalid,
353 * returning -EINVAL and setting *error.
355 int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
356 unsigned *value, char **error);
359 * Process the next argument as the start of a group containing between
360 * arg->min and arg->max further arguments. Either return the size as
361 * *num_args or, if invalid, return -EINVAL and set *error.
363 int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
364 unsigned *num_args, char **error);
367 * Return the current argument and shift to the next.
369 const char *dm_shift_arg(struct dm_arg_set *as);
372 * Move through num_args arguments.
374 void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
376 /*-----------------------------------------------------------------
377 * Functions for creating and manipulating mapped devices.
378 * Drop the reference with dm_put when you finish with the object.
379 *---------------------------------------------------------------*/
382 * DM_ANY_MINOR chooses the next available minor number.
384 #define DM_ANY_MINOR (-1)
385 int dm_create(int minor, struct mapped_device **md);
388 * Reference counting for md.
390 struct mapped_device *dm_get_md(dev_t dev);
391 void dm_get(struct mapped_device *md);
392 int dm_hold(struct mapped_device *md);
393 void dm_put(struct mapped_device *md);
396 * An arbitrary pointer may be stored alongside a mapped device.
398 void dm_set_mdptr(struct mapped_device *md, void *ptr);
399 void *dm_get_mdptr(struct mapped_device *md);
402 * A device can still be used while suspended, but I/O is deferred.
404 int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
405 int dm_resume(struct mapped_device *md);
408 * Event functions.
410 uint32_t dm_get_event_nr(struct mapped_device *md);
411 int dm_wait_event(struct mapped_device *md, int event_nr);
412 uint32_t dm_next_uevent_seq(struct mapped_device *md);
413 void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
416 * Info functions.
418 const char *dm_device_name(struct mapped_device *md);
419 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
420 struct gendisk *dm_disk(struct mapped_device *md);
421 int dm_suspended(struct dm_target *ti);
422 int dm_noflush_suspending(struct dm_target *ti);
423 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
424 void dm_remap_zone_report(struct dm_target *ti, sector_t start,
425 struct blk_zone *zones, unsigned int *nr_zones);
426 union map_info *dm_get_rq_mapinfo(struct request *rq);
428 struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
431 * Geometry functions.
433 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
434 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
436 /*-----------------------------------------------------------------
437 * Functions for manipulating device-mapper tables.
438 *---------------------------------------------------------------*/
441 * First create an empty table.
443 int dm_table_create(struct dm_table **result, fmode_t mode,
444 unsigned num_targets, struct mapped_device *md);
447 * Then call this once for each target.
449 int dm_table_add_target(struct dm_table *t, const char *type,
450 sector_t start, sector_t len, char *params);
453 * Target_ctr should call this if it needs to add any callbacks.
455 void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb);
458 * Target can use this to set the table's type.
459 * Can only ever be called from a target's ctr.
460 * Useful for "hybrid" target (supports both bio-based
461 * and request-based).
463 void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type);
466 * Finally call this to make the table ready for use.
468 int dm_table_complete(struct dm_table *t);
471 * Destroy the table when finished.
473 void dm_table_destroy(struct dm_table *t);
476 * Target may require that it is never sent I/O larger than len.
478 int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
481 * Table reference counting.
483 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx);
484 void dm_put_live_table(struct mapped_device *md, int srcu_idx);
485 void dm_sync_table(struct mapped_device *md);
488 * Queries
490 sector_t dm_table_get_size(struct dm_table *t);
491 unsigned int dm_table_get_num_targets(struct dm_table *t);
492 fmode_t dm_table_get_mode(struct dm_table *t);
493 struct mapped_device *dm_table_get_md(struct dm_table *t);
494 const char *dm_table_device_name(struct dm_table *t);
497 * Trigger an event.
499 void dm_table_event(struct dm_table *t);
502 * Run the queue for request-based targets.
504 void dm_table_run_md_queue_async(struct dm_table *t);
507 * The device must be suspended before calling this method.
508 * Returns the previous table, which the caller must destroy.
510 struct dm_table *dm_swap_table(struct mapped_device *md,
511 struct dm_table *t);
514 * A wrapper around vmalloc.
516 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
518 /*-----------------------------------------------------------------
519 * Macros.
520 *---------------------------------------------------------------*/
521 #define DM_NAME "device-mapper"
523 #define DM_RATELIMIT(pr_func, fmt, ...) \
524 do { \
525 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, \
526 DEFAULT_RATELIMIT_BURST); \
528 if (__ratelimit(&rs)) \
529 pr_func(DM_FMT(fmt), ##__VA_ARGS__); \
530 } while (0)
532 #define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
534 #define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__)
536 #define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__)
537 #define DMERR_LIMIT(fmt, ...) DM_RATELIMIT(pr_err, fmt, ##__VA_ARGS__)
538 #define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__)
539 #define DMWARN_LIMIT(fmt, ...) DM_RATELIMIT(pr_warn, fmt, ##__VA_ARGS__)
540 #define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
541 #define DMINFO_LIMIT(fmt, ...) DM_RATELIMIT(pr_info, fmt, ##__VA_ARGS__)
543 #ifdef CONFIG_DM_DEBUG
544 #define DMDEBUG(fmt, ...) printk(KERN_DEBUG DM_FMT(fmt), ##__VA_ARGS__)
545 #define DMDEBUG_LIMIT(fmt, ...) DM_RATELIMIT(pr_debug, fmt, ##__VA_ARGS__)
546 #else
547 #define DMDEBUG(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
548 #define DMDEBUG_LIMIT(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
549 #endif
551 #define DMEMIT(x...) sz += ((sz >= maxlen) ? \
552 0 : scnprintf(result + sz, maxlen - sz, x))
555 * Definitions of return values from target end_io function.
557 #define DM_ENDIO_DONE 0
558 #define DM_ENDIO_INCOMPLETE 1
559 #define DM_ENDIO_REQUEUE 2
560 #define DM_ENDIO_DELAY_REQUEUE 3
563 * Definitions of return values from target map function.
565 #define DM_MAPIO_SUBMITTED 0
566 #define DM_MAPIO_REMAPPED 1
567 #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
568 #define DM_MAPIO_DELAY_REQUEUE DM_ENDIO_DELAY_REQUEUE
569 #define DM_MAPIO_KILL 4
571 #define dm_sector_div64(x, y)( \
573 u64 _res; \
574 (x) = div64_u64_rem(x, y, &_res); \
575 _res; \
580 * Ceiling(n / sz)
582 #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
584 #define dm_sector_div_up(n, sz) ( \
586 sector_t _r = ((n) + (sz) - 1); \
587 sector_div(_r, (sz)); \
588 _r; \
593 * ceiling(n / size) * size
595 #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
597 #define dm_array_too_big(fixed, obj, num) \
598 ((num) > (UINT_MAX - (fixed)) / (obj))
601 * Sector offset taken relative to the start of the target instead of
602 * relative to the start of the device.
604 #define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
606 static inline sector_t to_sector(unsigned long long n)
608 return (n >> SECTOR_SHIFT);
611 static inline unsigned long to_bytes(sector_t n)
613 return (n << SECTOR_SHIFT);
616 #endif /* _LINUX_DEVICE_MAPPER_H */