3 #include <linux/slab.h>
4 #include <linux/module.h>
5 #include <linux/spinlock.h>
6 #include <linux/hardirq.h>
7 #include "extent_map.h"
10 static struct kmem_cache
*extent_map_cache
;
12 int __init
extent_map_init(void)
14 extent_map_cache
= kmem_cache_create("extent_map",
15 sizeof(struct extent_map
), 0,
16 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
, NULL
);
17 if (!extent_map_cache
)
22 void extent_map_exit(void)
25 kmem_cache_destroy(extent_map_cache
);
29 * extent_map_tree_init - initialize extent map tree
30 * @tree: tree to initialize
31 * @mask: flags for memory allocations during tree operations
33 * Initialize the extent tree @tree. Should be called for each new inode
34 * or other user of the extent_map interface.
36 void extent_map_tree_init(struct extent_map_tree
*tree
, gfp_t mask
)
38 tree
->map
.rb_node
= NULL
;
39 spin_lock_init(&tree
->lock
);
43 * alloc_extent_map - allocate new extent map structure
44 * @mask: memory allocation flags
46 * Allocate a new extent_map structure. The new structure is
47 * returned with a reference count of one and needs to be
48 * freed using free_extent_map()
50 struct extent_map
*alloc_extent_map(gfp_t mask
)
52 struct extent_map
*em
;
53 em
= kmem_cache_alloc(extent_map_cache
, mask
);
54 if (!em
|| IS_ERR(em
))
58 atomic_set(&em
->refs
, 1);
63 * free_extent_map - drop reference count of an extent_map
64 * @em: extent map beeing releasead
66 * Drops the reference out on @em by one and free the structure
67 * if the reference count hits zero.
69 void free_extent_map(struct extent_map
*em
)
73 WARN_ON(atomic_read(&em
->refs
) == 0);
74 if (atomic_dec_and_test(&em
->refs
)) {
76 kmem_cache_free(extent_map_cache
, em
);
80 static struct rb_node
*tree_insert(struct rb_root
*root
, u64 offset
,
83 struct rb_node
**p
= &root
->rb_node
;
84 struct rb_node
*parent
= NULL
;
85 struct extent_map
*entry
;
89 entry
= rb_entry(parent
, struct extent_map
, rb_node
);
91 WARN_ON(!entry
->in_tree
);
93 if (offset
< entry
->start
)
95 else if (offset
>= extent_map_end(entry
))
101 entry
= rb_entry(node
, struct extent_map
, rb_node
);
103 rb_link_node(node
, parent
, p
);
104 rb_insert_color(node
, root
);
109 * search through the tree for an extent_map with a given offset. If
110 * it can't be found, try to find some neighboring extents
112 static struct rb_node
*__tree_search(struct rb_root
*root
, u64 offset
,
113 struct rb_node
**prev_ret
,
114 struct rb_node
**next_ret
)
116 struct rb_node
*n
= root
->rb_node
;
117 struct rb_node
*prev
= NULL
;
118 struct rb_node
*orig_prev
= NULL
;
119 struct extent_map
*entry
;
120 struct extent_map
*prev_entry
= NULL
;
123 entry
= rb_entry(n
, struct extent_map
, rb_node
);
127 WARN_ON(!entry
->in_tree
);
129 if (offset
< entry
->start
)
131 else if (offset
>= extent_map_end(entry
))
139 while (prev
&& offset
>= extent_map_end(prev_entry
)) {
140 prev
= rb_next(prev
);
141 prev_entry
= rb_entry(prev
, struct extent_map
, rb_node
);
148 prev_entry
= rb_entry(prev
, struct extent_map
, rb_node
);
149 while (prev
&& offset
< prev_entry
->start
) {
150 prev
= rb_prev(prev
);
151 prev_entry
= rb_entry(prev
, struct extent_map
, rb_node
);
159 * look for an offset in the tree, and if it can't be found, return
160 * the first offset we can find smaller than 'offset'.
162 static inline struct rb_node
*tree_search(struct rb_root
*root
, u64 offset
)
164 struct rb_node
*prev
;
166 ret
= __tree_search(root
, offset
, &prev
, NULL
);
172 /* check to see if two extent_map structs are adjacent and safe to merge */
173 static int mergable_maps(struct extent_map
*prev
, struct extent_map
*next
)
175 if (test_bit(EXTENT_FLAG_PINNED
, &prev
->flags
))
179 * don't merge compressed extents, we need to know their
182 if (test_bit(EXTENT_FLAG_COMPRESSED
, &prev
->flags
))
185 if (extent_map_end(prev
) == next
->start
&&
186 prev
->flags
== next
->flags
&&
187 prev
->bdev
== next
->bdev
&&
188 ((next
->block_start
== EXTENT_MAP_HOLE
&&
189 prev
->block_start
== EXTENT_MAP_HOLE
) ||
190 (next
->block_start
== EXTENT_MAP_INLINE
&&
191 prev
->block_start
== EXTENT_MAP_INLINE
) ||
192 (next
->block_start
== EXTENT_MAP_DELALLOC
&&
193 prev
->block_start
== EXTENT_MAP_DELALLOC
) ||
194 (next
->block_start
< EXTENT_MAP_LAST_BYTE
- 1 &&
195 next
->block_start
== extent_map_block_end(prev
)))) {
202 * add_extent_mapping - add new extent map to the extent tree
203 * @tree: tree to insert new map in
206 * Insert @em into @tree or perform a simple forward/backward merge with
207 * existing mappings. The extent_map struct passed in will be inserted
208 * into the tree directly, with an additional reference taken, or a
209 * reference dropped if the merge attempt was sucessfull.
211 int add_extent_mapping(struct extent_map_tree
*tree
,
212 struct extent_map
*em
)
215 struct extent_map
*merge
= NULL
;
217 struct extent_map
*exist
;
219 exist
= lookup_extent_mapping(tree
, em
->start
, em
->len
);
221 free_extent_map(exist
);
225 assert_spin_locked(&tree
->lock
);
226 rb
= tree_insert(&tree
->map
, em
->start
, &em
->rb_node
);
231 atomic_inc(&em
->refs
);
232 if (em
->start
!= 0) {
233 rb
= rb_prev(&em
->rb_node
);
235 merge
= rb_entry(rb
, struct extent_map
, rb_node
);
236 if (rb
&& mergable_maps(merge
, em
)) {
237 em
->start
= merge
->start
;
238 em
->len
+= merge
->len
;
239 em
->block_len
+= merge
->block_len
;
240 em
->block_start
= merge
->block_start
;
242 rb_erase(&merge
->rb_node
, &tree
->map
);
243 free_extent_map(merge
);
246 rb
= rb_next(&em
->rb_node
);
248 merge
= rb_entry(rb
, struct extent_map
, rb_node
);
249 if (rb
&& mergable_maps(em
, merge
)) {
250 em
->len
+= merge
->len
;
251 em
->block_len
+= merge
->len
;
252 rb_erase(&merge
->rb_node
, &tree
->map
);
254 free_extent_map(merge
);
260 /* simple helper to do math around the end of an extent, handling wrap */
261 static u64
range_end(u64 start
, u64 len
)
263 if (start
+ len
< start
)
269 * lookup_extent_mapping - lookup extent_map
270 * @tree: tree to lookup in
271 * @start: byte offset to start the search
272 * @len: length of the lookup range
274 * Find and return the first extent_map struct in @tree that intersects the
275 * [start, len] range. There may be additional objects in the tree that
276 * intersect, so check the object returned carefully to make sure that no
277 * additional lookups are needed.
279 struct extent_map
*lookup_extent_mapping(struct extent_map_tree
*tree
,
282 struct extent_map
*em
;
283 struct rb_node
*rb_node
;
284 struct rb_node
*prev
= NULL
;
285 struct rb_node
*next
= NULL
;
286 u64 end
= range_end(start
, len
);
288 assert_spin_locked(&tree
->lock
);
289 rb_node
= __tree_search(&tree
->map
, start
, &prev
, &next
);
290 if (!rb_node
&& prev
) {
291 em
= rb_entry(prev
, struct extent_map
, rb_node
);
292 if (end
> em
->start
&& start
< extent_map_end(em
))
295 if (!rb_node
&& next
) {
296 em
= rb_entry(next
, struct extent_map
, rb_node
);
297 if (end
> em
->start
&& start
< extent_map_end(em
))
304 if (IS_ERR(rb_node
)) {
305 em
= ERR_PTR(PTR_ERR(rb_node
));
308 em
= rb_entry(rb_node
, struct extent_map
, rb_node
);
309 if (end
> em
->start
&& start
< extent_map_end(em
))
316 atomic_inc(&em
->refs
);
322 * remove_extent_mapping - removes an extent_map from the extent tree
323 * @tree: extent tree to remove from
324 * @em: extent map beeing removed
326 * Removes @em from @tree. No reference counts are dropped, and no checks
327 * are done to see if the range is in use
329 int remove_extent_mapping(struct extent_map_tree
*tree
, struct extent_map
*em
)
333 WARN_ON(test_bit(EXTENT_FLAG_PINNED
, &em
->flags
));
334 assert_spin_locked(&tree
->lock
);
335 rb_erase(&em
->rb_node
, &tree
->map
);