3 #include <linux/slab.h>
4 #include <linux/module.h>
5 #include <linux/spinlock.h>
6 #include <linux/hardirq.h>
7 #include "extent_map.h"
10 static struct kmem_cache
*extent_map_cache
;
12 int __init
extent_map_init(void)
14 extent_map_cache
= kmem_cache_create("extent_map",
15 sizeof(struct extent_map
), 0,
16 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
, NULL
);
17 if (!extent_map_cache
)
22 void extent_map_exit(void)
25 kmem_cache_destroy(extent_map_cache
);
29 * extent_map_tree_init - initialize extent map tree
30 * @tree: tree to initialize
31 * @mask: flags for memory allocations during tree operations
33 * Initialize the extent tree @tree. Should be called for each new inode
34 * or other user of the extent_map interface.
36 void extent_map_tree_init(struct extent_map_tree
*tree
, gfp_t mask
)
39 rwlock_init(&tree
->lock
);
43 * alloc_extent_map - allocate new extent map structure
44 * @mask: memory allocation flags
46 * Allocate a new extent_map structure. The new structure is
47 * returned with a reference count of one and needs to be
48 * freed using free_extent_map()
50 struct extent_map
*alloc_extent_map(gfp_t mask
)
52 struct extent_map
*em
;
53 em
= kmem_cache_alloc(extent_map_cache
, mask
);
54 if (!em
|| IS_ERR(em
))
58 atomic_set(&em
->refs
, 1);
63 * free_extent_map - drop reference count of an extent_map
64 * @em: extent map beeing releasead
66 * Drops the reference out on @em by one and free the structure
67 * if the reference count hits zero.
69 void free_extent_map(struct extent_map
*em
)
73 WARN_ON(atomic_read(&em
->refs
) == 0);
74 if (atomic_dec_and_test(&em
->refs
)) {
76 kmem_cache_free(extent_map_cache
, em
);
80 static struct rb_node
*tree_insert(struct rb_root
*root
, u64 offset
,
83 struct rb_node
**p
= &root
->rb_node
;
84 struct rb_node
*parent
= NULL
;
85 struct extent_map
*entry
;
89 entry
= rb_entry(parent
, struct extent_map
, rb_node
);
91 WARN_ON(!entry
->in_tree
);
93 if (offset
< entry
->start
)
95 else if (offset
>= extent_map_end(entry
))
101 entry
= rb_entry(node
, struct extent_map
, rb_node
);
103 rb_link_node(node
, parent
, p
);
104 rb_insert_color(node
, root
);
109 * search through the tree for an extent_map with a given offset. If
110 * it can't be found, try to find some neighboring extents
112 static struct rb_node
*__tree_search(struct rb_root
*root
, u64 offset
,
113 struct rb_node
**prev_ret
,
114 struct rb_node
**next_ret
)
116 struct rb_node
*n
= root
->rb_node
;
117 struct rb_node
*prev
= NULL
;
118 struct rb_node
*orig_prev
= NULL
;
119 struct extent_map
*entry
;
120 struct extent_map
*prev_entry
= NULL
;
123 entry
= rb_entry(n
, struct extent_map
, rb_node
);
127 WARN_ON(!entry
->in_tree
);
129 if (offset
< entry
->start
)
131 else if (offset
>= extent_map_end(entry
))
139 while (prev
&& offset
>= extent_map_end(prev_entry
)) {
140 prev
= rb_next(prev
);
141 prev_entry
= rb_entry(prev
, struct extent_map
, rb_node
);
148 prev_entry
= rb_entry(prev
, struct extent_map
, rb_node
);
149 while (prev
&& offset
< prev_entry
->start
) {
150 prev
= rb_prev(prev
);
151 prev_entry
= rb_entry(prev
, struct extent_map
, rb_node
);
158 /* check to see if two extent_map structs are adjacent and safe to merge */
159 static int mergable_maps(struct extent_map
*prev
, struct extent_map
*next
)
161 if (test_bit(EXTENT_FLAG_PINNED
, &prev
->flags
))
165 * don't merge compressed extents, we need to know their
168 if (test_bit(EXTENT_FLAG_COMPRESSED
, &prev
->flags
))
171 if (extent_map_end(prev
) == next
->start
&&
172 prev
->flags
== next
->flags
&&
173 prev
->bdev
== next
->bdev
&&
174 ((next
->block_start
== EXTENT_MAP_HOLE
&&
175 prev
->block_start
== EXTENT_MAP_HOLE
) ||
176 (next
->block_start
== EXTENT_MAP_INLINE
&&
177 prev
->block_start
== EXTENT_MAP_INLINE
) ||
178 (next
->block_start
== EXTENT_MAP_DELALLOC
&&
179 prev
->block_start
== EXTENT_MAP_DELALLOC
) ||
180 (next
->block_start
< EXTENT_MAP_LAST_BYTE
- 1 &&
181 next
->block_start
== extent_map_block_end(prev
)))) {
187 int unpin_extent_cache(struct extent_map_tree
*tree
, u64 start
, u64 len
)
190 struct extent_map
*merge
= NULL
;
192 struct extent_map
*em
;
194 write_lock(&tree
->lock
);
195 em
= lookup_extent_mapping(tree
, start
, len
);
197 WARN_ON(!em
|| em
->start
!= start
);
202 clear_bit(EXTENT_FLAG_PINNED
, &em
->flags
);
204 if (em
->start
!= 0) {
205 rb
= rb_prev(&em
->rb_node
);
207 merge
= rb_entry(rb
, struct extent_map
, rb_node
);
208 if (rb
&& mergable_maps(merge
, em
)) {
209 em
->start
= merge
->start
;
210 em
->len
+= merge
->len
;
211 em
->block_len
+= merge
->block_len
;
212 em
->block_start
= merge
->block_start
;
214 rb_erase(&merge
->rb_node
, &tree
->map
);
215 free_extent_map(merge
);
219 rb
= rb_next(&em
->rb_node
);
221 merge
= rb_entry(rb
, struct extent_map
, rb_node
);
222 if (rb
&& mergable_maps(em
, merge
)) {
223 em
->len
+= merge
->len
;
224 em
->block_len
+= merge
->len
;
225 rb_erase(&merge
->rb_node
, &tree
->map
);
227 free_extent_map(merge
);
232 write_unlock(&tree
->lock
);
238 * add_extent_mapping - add new extent map to the extent tree
239 * @tree: tree to insert new map in
242 * Insert @em into @tree or perform a simple forward/backward merge with
243 * existing mappings. The extent_map struct passed in will be inserted
244 * into the tree directly, with an additional reference taken, or a
245 * reference dropped if the merge attempt was successfull.
247 int add_extent_mapping(struct extent_map_tree
*tree
,
248 struct extent_map
*em
)
251 struct extent_map
*merge
= NULL
;
253 struct extent_map
*exist
;
255 exist
= lookup_extent_mapping(tree
, em
->start
, em
->len
);
257 free_extent_map(exist
);
261 rb
= tree_insert(&tree
->map
, em
->start
, &em
->rb_node
);
266 atomic_inc(&em
->refs
);
267 if (em
->start
!= 0) {
268 rb
= rb_prev(&em
->rb_node
);
270 merge
= rb_entry(rb
, struct extent_map
, rb_node
);
271 if (rb
&& mergable_maps(merge
, em
)) {
272 em
->start
= merge
->start
;
273 em
->len
+= merge
->len
;
274 em
->block_len
+= merge
->block_len
;
275 em
->block_start
= merge
->block_start
;
277 rb_erase(&merge
->rb_node
, &tree
->map
);
278 free_extent_map(merge
);
281 rb
= rb_next(&em
->rb_node
);
283 merge
= rb_entry(rb
, struct extent_map
, rb_node
);
284 if (rb
&& mergable_maps(em
, merge
)) {
285 em
->len
+= merge
->len
;
286 em
->block_len
+= merge
->len
;
287 rb_erase(&merge
->rb_node
, &tree
->map
);
289 free_extent_map(merge
);
295 /* simple helper to do math around the end of an extent, handling wrap */
296 static u64
range_end(u64 start
, u64 len
)
298 if (start
+ len
< start
)
304 * lookup_extent_mapping - lookup extent_map
305 * @tree: tree to lookup in
306 * @start: byte offset to start the search
307 * @len: length of the lookup range
309 * Find and return the first extent_map struct in @tree that intersects the
310 * [start, len] range. There may be additional objects in the tree that
311 * intersect, so check the object returned carefully to make sure that no
312 * additional lookups are needed.
314 struct extent_map
*lookup_extent_mapping(struct extent_map_tree
*tree
,
317 struct extent_map
*em
;
318 struct rb_node
*rb_node
;
319 struct rb_node
*prev
= NULL
;
320 struct rb_node
*next
= NULL
;
321 u64 end
= range_end(start
, len
);
323 rb_node
= __tree_search(&tree
->map
, start
, &prev
, &next
);
324 if (!rb_node
&& prev
) {
325 em
= rb_entry(prev
, struct extent_map
, rb_node
);
326 if (end
> em
->start
&& start
< extent_map_end(em
))
329 if (!rb_node
&& next
) {
330 em
= rb_entry(next
, struct extent_map
, rb_node
);
331 if (end
> em
->start
&& start
< extent_map_end(em
))
338 if (IS_ERR(rb_node
)) {
339 em
= ERR_PTR(PTR_ERR(rb_node
));
342 em
= rb_entry(rb_node
, struct extent_map
, rb_node
);
343 if (end
> em
->start
&& start
< extent_map_end(em
))
350 atomic_inc(&em
->refs
);
356 * search_extent_mapping - find a nearby extent map
357 * @tree: tree to lookup in
358 * @start: byte offset to start the search
359 * @len: length of the lookup range
361 * Find and return the first extent_map struct in @tree that intersects the
362 * [start, len] range.
364 * If one can't be found, any nearby extent may be returned
366 struct extent_map
*search_extent_mapping(struct extent_map_tree
*tree
,
369 struct extent_map
*em
;
370 struct rb_node
*rb_node
;
371 struct rb_node
*prev
= NULL
;
372 struct rb_node
*next
= NULL
;
374 rb_node
= __tree_search(&tree
->map
, start
, &prev
, &next
);
375 if (!rb_node
&& prev
) {
376 em
= rb_entry(prev
, struct extent_map
, rb_node
);
379 if (!rb_node
&& next
) {
380 em
= rb_entry(next
, struct extent_map
, rb_node
);
387 if (IS_ERR(rb_node
)) {
388 em
= ERR_PTR(PTR_ERR(rb_node
));
391 em
= rb_entry(rb_node
, struct extent_map
, rb_node
);
398 atomic_inc(&em
->refs
);
404 * remove_extent_mapping - removes an extent_map from the extent tree
405 * @tree: extent tree to remove from
406 * @em: extent map beeing removed
408 * Removes @em from @tree. No reference counts are dropped, and no checks
409 * are done to see if the range is in use
411 int remove_extent_mapping(struct extent_map_tree
*tree
, struct extent_map
*em
)
415 WARN_ON(test_bit(EXTENT_FLAG_PINNED
, &em
->flags
));
416 rb_erase(&em
->rb_node
, &tree
->map
);