iommu/iova: Use rb_entry()
[linux-2.6/btrfs-unstable.git] / drivers / md / dm-exception-store.h
blob12b5216c2cfed2758d19f55ba9d2c2d4f58d4deb
1 /*
2 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2008 Red Hat, Inc. All rights reserved.
5 * Device-mapper snapshot exception store.
7 * This file is released under the GPL.
8 */
10 #ifndef _LINUX_DM_EXCEPTION_STORE
11 #define _LINUX_DM_EXCEPTION_STORE
13 #include <linux/blkdev.h>
14 #include <linux/device-mapper.h>
17 * The snapshot code deals with largish chunks of the disk at a
18 * time. Typically 32k - 512k.
20 typedef sector_t chunk_t;
23 * An exception is used where an old chunk of data has been
24 * replaced by a new one.
25 * If chunk_t is 64 bits in size, the top 8 bits of new_chunk hold the number
26 * of chunks that follow contiguously. Remaining bits hold the number of the
27 * chunk within the device.
29 struct dm_exception {
30 struct list_head hash_list;
32 chunk_t old_chunk;
33 chunk_t new_chunk;
37 * Abstraction to handle the meta/layout of exception stores (the
38 * COW device).
40 struct dm_exception_store;
41 struct dm_exception_store_type {
42 const char *name;
43 struct module *module;
45 int (*ctr) (struct dm_exception_store *store, char *options);
48 * Destroys this object when you've finished with it.
50 void (*dtr) (struct dm_exception_store *store);
53 * The target shouldn't read the COW device until this is
54 * called. As exceptions are read from the COW, they are
55 * reported back via the callback.
57 int (*read_metadata) (struct dm_exception_store *store,
58 int (*callback)(void *callback_context,
59 chunk_t old, chunk_t new),
60 void *callback_context);
63 * Find somewhere to store the next exception.
65 int (*prepare_exception) (struct dm_exception_store *store,
66 struct dm_exception *e);
69 * Update the metadata with this exception.
71 void (*commit_exception) (struct dm_exception_store *store,
72 struct dm_exception *e, int valid,
73 void (*callback) (void *, int success),
74 void *callback_context);
77 * Returns 0 if the exception store is empty.
79 * If there are exceptions still to be merged, sets
80 * *last_old_chunk and *last_new_chunk to the most recent
81 * still-to-be-merged chunk and returns the number of
82 * consecutive previous ones.
84 int (*prepare_merge) (struct dm_exception_store *store,
85 chunk_t *last_old_chunk, chunk_t *last_new_chunk);
88 * Clear the last n exceptions.
89 * nr_merged must be <= the value returned by prepare_merge.
91 int (*commit_merge) (struct dm_exception_store *store, int nr_merged);
94 * The snapshot is invalid, note this in the metadata.
96 void (*drop_snapshot) (struct dm_exception_store *store);
98 unsigned (*status) (struct dm_exception_store *store,
99 status_type_t status, char *result,
100 unsigned maxlen);
103 * Return how full the snapshot is.
105 void (*usage) (struct dm_exception_store *store,
106 sector_t *total_sectors, sector_t *sectors_allocated,
107 sector_t *metadata_sectors);
109 /* For internal device-mapper use only. */
110 struct list_head list;
113 struct dm_snapshot;
115 struct dm_exception_store {
116 struct dm_exception_store_type *type;
117 struct dm_snapshot *snap;
119 /* Size of data blocks saved - must be a power of 2 */
120 unsigned chunk_size;
121 unsigned chunk_mask;
122 unsigned chunk_shift;
124 void *context;
126 bool userspace_supports_overflow;
130 * Obtain the origin or cow device used by a given snapshot.
132 struct dm_dev *dm_snap_origin(struct dm_snapshot *snap);
133 struct dm_dev *dm_snap_cow(struct dm_snapshot *snap);
136 * Funtions to manipulate consecutive chunks
138 # if defined(CONFIG_LBDAF) || (BITS_PER_LONG == 64)
139 # define DM_CHUNK_CONSECUTIVE_BITS 8
140 # define DM_CHUNK_NUMBER_BITS 56
142 static inline chunk_t dm_chunk_number(chunk_t chunk)
144 return chunk & (chunk_t)((1ULL << DM_CHUNK_NUMBER_BITS) - 1ULL);
147 static inline unsigned dm_consecutive_chunk_count(struct dm_exception *e)
149 return e->new_chunk >> DM_CHUNK_NUMBER_BITS;
152 static inline void dm_consecutive_chunk_count_inc(struct dm_exception *e)
154 e->new_chunk += (1ULL << DM_CHUNK_NUMBER_BITS);
156 BUG_ON(!dm_consecutive_chunk_count(e));
159 static inline void dm_consecutive_chunk_count_dec(struct dm_exception *e)
161 BUG_ON(!dm_consecutive_chunk_count(e));
163 e->new_chunk -= (1ULL << DM_CHUNK_NUMBER_BITS);
166 # else
167 # define DM_CHUNK_CONSECUTIVE_BITS 0
169 static inline chunk_t dm_chunk_number(chunk_t chunk)
171 return chunk;
174 static inline unsigned dm_consecutive_chunk_count(struct dm_exception *e)
176 return 0;
179 static inline void dm_consecutive_chunk_count_inc(struct dm_exception *e)
183 static inline void dm_consecutive_chunk_count_dec(struct dm_exception *e)
187 # endif
190 * Return the number of sectors in the device.
192 static inline sector_t get_dev_size(struct block_device *bdev)
194 return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
197 static inline chunk_t sector_to_chunk(struct dm_exception_store *store,
198 sector_t sector)
200 return sector >> store->chunk_shift;
203 int dm_exception_store_type_register(struct dm_exception_store_type *type);
204 int dm_exception_store_type_unregister(struct dm_exception_store_type *type);
206 int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
207 unsigned chunk_size,
208 char **error);
210 int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
211 struct dm_snapshot *snap,
212 unsigned *args_used,
213 struct dm_exception_store **store);
214 void dm_exception_store_destroy(struct dm_exception_store *store);
216 int dm_exception_store_init(void);
217 void dm_exception_store_exit(void);
220 * Two exception store implementations.
222 int dm_persistent_snapshot_init(void);
223 void dm_persistent_snapshot_exit(void);
225 int dm_transient_snapshot_init(void);
226 void dm_transient_snapshot_exit(void);
228 #endif /* _LINUX_DM_EXCEPTION_STORE */