mmc_block: do not DMA to stack
[linux-2.6/mini2440.git] / fs / btrfs / extent_io.c
blob68260180f5871975b8f673df1234bf4091d91bb4
1 #include <linux/bitops.h>
2 #include <linux/slab.h>
3 #include <linux/bio.h>
4 #include <linux/mm.h>
5 #include <linux/gfp.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/writeback.h>
13 #include <linux/pagevec.h>
14 #include "extent_io.h"
15 #include "extent_map.h"
16 #include "compat.h"
17 #include "ctree.h"
18 #include "btrfs_inode.h"
20 static struct kmem_cache *extent_state_cache;
21 static struct kmem_cache *extent_buffer_cache;
23 static LIST_HEAD(buffers);
24 static LIST_HEAD(states);
26 #define LEAK_DEBUG 0
27 #if LEAK_DEBUG
28 static DEFINE_SPINLOCK(leak_lock);
29 #endif
31 #define BUFFER_LRU_MAX 64
33 struct tree_entry {
34 u64 start;
35 u64 end;
36 struct rb_node rb_node;
39 struct extent_page_data {
40 struct bio *bio;
41 struct extent_io_tree *tree;
42 get_extent_t *get_extent;
44 /* tells writepage not to lock the state bits for this range
45 * it still does the unlocking
47 unsigned int extent_locked:1;
49 /* tells the submit_bio code to use a WRITE_SYNC */
50 unsigned int sync_io:1;
53 int __init extent_io_init(void)
55 extent_state_cache = kmem_cache_create("extent_state",
56 sizeof(struct extent_state), 0,
57 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
58 if (!extent_state_cache)
59 return -ENOMEM;
61 extent_buffer_cache = kmem_cache_create("extent_buffers",
62 sizeof(struct extent_buffer), 0,
63 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
64 if (!extent_buffer_cache)
65 goto free_state_cache;
66 return 0;
68 free_state_cache:
69 kmem_cache_destroy(extent_state_cache);
70 return -ENOMEM;
73 void extent_io_exit(void)
75 struct extent_state *state;
76 struct extent_buffer *eb;
78 while (!list_empty(&states)) {
79 state = list_entry(states.next, struct extent_state, leak_list);
80 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
81 "state %lu in tree %p refs %d\n",
82 (unsigned long long)state->start,
83 (unsigned long long)state->end,
84 state->state, state->tree, atomic_read(&state->refs));
85 list_del(&state->leak_list);
86 kmem_cache_free(extent_state_cache, state);
90 while (!list_empty(&buffers)) {
91 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
92 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
93 "refs %d\n", (unsigned long long)eb->start,
94 eb->len, atomic_read(&eb->refs));
95 list_del(&eb->leak_list);
96 kmem_cache_free(extent_buffer_cache, eb);
98 if (extent_state_cache)
99 kmem_cache_destroy(extent_state_cache);
100 if (extent_buffer_cache)
101 kmem_cache_destroy(extent_buffer_cache);
104 void extent_io_tree_init(struct extent_io_tree *tree,
105 struct address_space *mapping, gfp_t mask)
107 tree->state.rb_node = NULL;
108 tree->buffer.rb_node = NULL;
109 tree->ops = NULL;
110 tree->dirty_bytes = 0;
111 spin_lock_init(&tree->lock);
112 spin_lock_init(&tree->buffer_lock);
113 tree->mapping = mapping;
116 static struct extent_state *alloc_extent_state(gfp_t mask)
118 struct extent_state *state;
119 #if LEAK_DEBUG
120 unsigned long flags;
121 #endif
123 state = kmem_cache_alloc(extent_state_cache, mask);
124 if (!state)
125 return state;
126 state->state = 0;
127 state->private = 0;
128 state->tree = NULL;
129 #if LEAK_DEBUG
130 spin_lock_irqsave(&leak_lock, flags);
131 list_add(&state->leak_list, &states);
132 spin_unlock_irqrestore(&leak_lock, flags);
133 #endif
134 atomic_set(&state->refs, 1);
135 init_waitqueue_head(&state->wq);
136 return state;
139 static void free_extent_state(struct extent_state *state)
141 if (!state)
142 return;
143 if (atomic_dec_and_test(&state->refs)) {
144 #if LEAK_DEBUG
145 unsigned long flags;
146 #endif
147 WARN_ON(state->tree);
148 #if LEAK_DEBUG
149 spin_lock_irqsave(&leak_lock, flags);
150 list_del(&state->leak_list);
151 spin_unlock_irqrestore(&leak_lock, flags);
152 #endif
153 kmem_cache_free(extent_state_cache, state);
157 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
158 struct rb_node *node)
160 struct rb_node **p = &root->rb_node;
161 struct rb_node *parent = NULL;
162 struct tree_entry *entry;
164 while (*p) {
165 parent = *p;
166 entry = rb_entry(parent, struct tree_entry, rb_node);
168 if (offset < entry->start)
169 p = &(*p)->rb_left;
170 else if (offset > entry->end)
171 p = &(*p)->rb_right;
172 else
173 return parent;
176 entry = rb_entry(node, struct tree_entry, rb_node);
177 rb_link_node(node, parent, p);
178 rb_insert_color(node, root);
179 return NULL;
182 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
183 struct rb_node **prev_ret,
184 struct rb_node **next_ret)
186 struct rb_root *root = &tree->state;
187 struct rb_node *n = root->rb_node;
188 struct rb_node *prev = NULL;
189 struct rb_node *orig_prev = NULL;
190 struct tree_entry *entry;
191 struct tree_entry *prev_entry = NULL;
193 while (n) {
194 entry = rb_entry(n, struct tree_entry, rb_node);
195 prev = n;
196 prev_entry = entry;
198 if (offset < entry->start)
199 n = n->rb_left;
200 else if (offset > entry->end)
201 n = n->rb_right;
202 else
203 return n;
206 if (prev_ret) {
207 orig_prev = prev;
208 while (prev && offset > prev_entry->end) {
209 prev = rb_next(prev);
210 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
212 *prev_ret = prev;
213 prev = orig_prev;
216 if (next_ret) {
217 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
218 while (prev && offset < prev_entry->start) {
219 prev = rb_prev(prev);
220 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
222 *next_ret = prev;
224 return NULL;
227 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
228 u64 offset)
230 struct rb_node *prev = NULL;
231 struct rb_node *ret;
233 ret = __etree_search(tree, offset, &prev, NULL);
234 if (!ret)
235 return prev;
236 return ret;
239 static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree,
240 u64 offset, struct rb_node *node)
242 struct rb_root *root = &tree->buffer;
243 struct rb_node **p = &root->rb_node;
244 struct rb_node *parent = NULL;
245 struct extent_buffer *eb;
247 while (*p) {
248 parent = *p;
249 eb = rb_entry(parent, struct extent_buffer, rb_node);
251 if (offset < eb->start)
252 p = &(*p)->rb_left;
253 else if (offset > eb->start)
254 p = &(*p)->rb_right;
255 else
256 return eb;
259 rb_link_node(node, parent, p);
260 rb_insert_color(node, root);
261 return NULL;
264 static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
265 u64 offset)
267 struct rb_root *root = &tree->buffer;
268 struct rb_node *n = root->rb_node;
269 struct extent_buffer *eb;
271 while (n) {
272 eb = rb_entry(n, struct extent_buffer, rb_node);
273 if (offset < eb->start)
274 n = n->rb_left;
275 else if (offset > eb->start)
276 n = n->rb_right;
277 else
278 return eb;
280 return NULL;
284 * utility function to look for merge candidates inside a given range.
285 * Any extents with matching state are merged together into a single
286 * extent in the tree. Extents with EXTENT_IO in their state field
287 * are not merged because the end_io handlers need to be able to do
288 * operations on them without sleeping (or doing allocations/splits).
290 * This should be called with the tree lock held.
292 static int merge_state(struct extent_io_tree *tree,
293 struct extent_state *state)
295 struct extent_state *other;
296 struct rb_node *other_node;
298 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
299 return 0;
301 other_node = rb_prev(&state->rb_node);
302 if (other_node) {
303 other = rb_entry(other_node, struct extent_state, rb_node);
304 if (other->end == state->start - 1 &&
305 other->state == state->state) {
306 state->start = other->start;
307 other->tree = NULL;
308 rb_erase(&other->rb_node, &tree->state);
309 free_extent_state(other);
312 other_node = rb_next(&state->rb_node);
313 if (other_node) {
314 other = rb_entry(other_node, struct extent_state, rb_node);
315 if (other->start == state->end + 1 &&
316 other->state == state->state) {
317 other->start = state->start;
318 state->tree = NULL;
319 rb_erase(&state->rb_node, &tree->state);
320 free_extent_state(state);
323 return 0;
326 static void set_state_cb(struct extent_io_tree *tree,
327 struct extent_state *state,
328 unsigned long bits)
330 if (tree->ops && tree->ops->set_bit_hook) {
331 tree->ops->set_bit_hook(tree->mapping->host, state->start,
332 state->end, state->state, bits);
336 static void clear_state_cb(struct extent_io_tree *tree,
337 struct extent_state *state,
338 unsigned long bits)
340 if (tree->ops && tree->ops->clear_bit_hook) {
341 tree->ops->clear_bit_hook(tree->mapping->host, state->start,
342 state->end, state->state, bits);
347 * insert an extent_state struct into the tree. 'bits' are set on the
348 * struct before it is inserted.
350 * This may return -EEXIST if the extent is already there, in which case the
351 * state struct is freed.
353 * The tree lock is not taken internally. This is a utility function and
354 * probably isn't what you want to call (see set/clear_extent_bit).
356 static int insert_state(struct extent_io_tree *tree,
357 struct extent_state *state, u64 start, u64 end,
358 int bits)
360 struct rb_node *node;
362 if (end < start) {
363 printk(KERN_ERR "btrfs end < start %llu %llu\n",
364 (unsigned long long)end,
365 (unsigned long long)start);
366 WARN_ON(1);
368 if (bits & EXTENT_DIRTY)
369 tree->dirty_bytes += end - start + 1;
370 set_state_cb(tree, state, bits);
371 state->state |= bits;
372 state->start = start;
373 state->end = end;
374 node = tree_insert(&tree->state, end, &state->rb_node);
375 if (node) {
376 struct extent_state *found;
377 found = rb_entry(node, struct extent_state, rb_node);
378 printk(KERN_ERR "btrfs found node %llu %llu on insert of "
379 "%llu %llu\n", (unsigned long long)found->start,
380 (unsigned long long)found->end,
381 (unsigned long long)start, (unsigned long long)end);
382 free_extent_state(state);
383 return -EEXIST;
385 state->tree = tree;
386 merge_state(tree, state);
387 return 0;
391 * split a given extent state struct in two, inserting the preallocated
392 * struct 'prealloc' as the newly created second half. 'split' indicates an
393 * offset inside 'orig' where it should be split.
395 * Before calling,
396 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
397 * are two extent state structs in the tree:
398 * prealloc: [orig->start, split - 1]
399 * orig: [ split, orig->end ]
401 * The tree locks are not taken by this function. They need to be held
402 * by the caller.
404 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
405 struct extent_state *prealloc, u64 split)
407 struct rb_node *node;
408 prealloc->start = orig->start;
409 prealloc->end = split - 1;
410 prealloc->state = orig->state;
411 orig->start = split;
413 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
414 if (node) {
415 free_extent_state(prealloc);
416 return -EEXIST;
418 prealloc->tree = tree;
419 return 0;
423 * utility function to clear some bits in an extent state struct.
424 * it will optionally wake up any one waiting on this state (wake == 1), or
425 * forcibly remove the state from the tree (delete == 1).
427 * If no bits are set on the state struct after clearing things, the
428 * struct is freed and removed from the tree
430 static int clear_state_bit(struct extent_io_tree *tree,
431 struct extent_state *state, int bits, int wake,
432 int delete)
434 int ret = state->state & bits;
436 if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
437 u64 range = state->end - state->start + 1;
438 WARN_ON(range > tree->dirty_bytes);
439 tree->dirty_bytes -= range;
441 clear_state_cb(tree, state, bits);
442 state->state &= ~bits;
443 if (wake)
444 wake_up(&state->wq);
445 if (delete || state->state == 0) {
446 if (state->tree) {
447 clear_state_cb(tree, state, state->state);
448 rb_erase(&state->rb_node, &tree->state);
449 state->tree = NULL;
450 free_extent_state(state);
451 } else {
452 WARN_ON(1);
454 } else {
455 merge_state(tree, state);
457 return ret;
461 * clear some bits on a range in the tree. This may require splitting
462 * or inserting elements in the tree, so the gfp mask is used to
463 * indicate which allocations or sleeping are allowed.
465 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
466 * the given range from the tree regardless of state (ie for truncate).
468 * the range [start, end] is inclusive.
470 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
471 * bits were already set, or zero if none of the bits were already set.
473 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
474 int bits, int wake, int delete, gfp_t mask)
476 struct extent_state *state;
477 struct extent_state *prealloc = NULL;
478 struct rb_node *node;
479 u64 last_end;
480 int err;
481 int set = 0;
483 again:
484 if (!prealloc && (mask & __GFP_WAIT)) {
485 prealloc = alloc_extent_state(mask);
486 if (!prealloc)
487 return -ENOMEM;
490 spin_lock(&tree->lock);
492 * this search will find the extents that end after
493 * our range starts
495 node = tree_search(tree, start);
496 if (!node)
497 goto out;
498 state = rb_entry(node, struct extent_state, rb_node);
499 if (state->start > end)
500 goto out;
501 WARN_ON(state->end < start);
502 last_end = state->end;
505 * | ---- desired range ---- |
506 * | state | or
507 * | ------------- state -------------- |
509 * We need to split the extent we found, and may flip
510 * bits on second half.
512 * If the extent we found extends past our range, we
513 * just split and search again. It'll get split again
514 * the next time though.
516 * If the extent we found is inside our range, we clear
517 * the desired bit on it.
520 if (state->start < start) {
521 if (!prealloc)
522 prealloc = alloc_extent_state(GFP_ATOMIC);
523 err = split_state(tree, state, prealloc, start);
524 BUG_ON(err == -EEXIST);
525 prealloc = NULL;
526 if (err)
527 goto out;
528 if (state->end <= end) {
529 set |= clear_state_bit(tree, state, bits,
530 wake, delete);
531 if (last_end == (u64)-1)
532 goto out;
533 start = last_end + 1;
534 } else {
535 start = state->start;
537 goto search_again;
540 * | ---- desired range ---- |
541 * | state |
542 * We need to split the extent, and clear the bit
543 * on the first half
545 if (state->start <= end && state->end > end) {
546 if (!prealloc)
547 prealloc = alloc_extent_state(GFP_ATOMIC);
548 err = split_state(tree, state, prealloc, end + 1);
549 BUG_ON(err == -EEXIST);
551 if (wake)
552 wake_up(&state->wq);
553 set |= clear_state_bit(tree, prealloc, bits,
554 wake, delete);
555 prealloc = NULL;
556 goto out;
559 set |= clear_state_bit(tree, state, bits, wake, delete);
560 if (last_end == (u64)-1)
561 goto out;
562 start = last_end + 1;
563 goto search_again;
565 out:
566 spin_unlock(&tree->lock);
567 if (prealloc)
568 free_extent_state(prealloc);
570 return set;
572 search_again:
573 if (start > end)
574 goto out;
575 spin_unlock(&tree->lock);
576 if (mask & __GFP_WAIT)
577 cond_resched();
578 goto again;
581 static int wait_on_state(struct extent_io_tree *tree,
582 struct extent_state *state)
583 __releases(tree->lock)
584 __acquires(tree->lock)
586 DEFINE_WAIT(wait);
587 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
588 spin_unlock(&tree->lock);
589 schedule();
590 spin_lock(&tree->lock);
591 finish_wait(&state->wq, &wait);
592 return 0;
596 * waits for one or more bits to clear on a range in the state tree.
597 * The range [start, end] is inclusive.
598 * The tree lock is taken by this function
600 int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
602 struct extent_state *state;
603 struct rb_node *node;
605 spin_lock(&tree->lock);
606 again:
607 while (1) {
609 * this search will find all the extents that end after
610 * our range starts
612 node = tree_search(tree, start);
613 if (!node)
614 break;
616 state = rb_entry(node, struct extent_state, rb_node);
618 if (state->start > end)
619 goto out;
621 if (state->state & bits) {
622 start = state->start;
623 atomic_inc(&state->refs);
624 wait_on_state(tree, state);
625 free_extent_state(state);
626 goto again;
628 start = state->end + 1;
630 if (start > end)
631 break;
633 if (need_resched()) {
634 spin_unlock(&tree->lock);
635 cond_resched();
636 spin_lock(&tree->lock);
639 out:
640 spin_unlock(&tree->lock);
641 return 0;
644 static void set_state_bits(struct extent_io_tree *tree,
645 struct extent_state *state,
646 int bits)
648 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
649 u64 range = state->end - state->start + 1;
650 tree->dirty_bytes += range;
652 set_state_cb(tree, state, bits);
653 state->state |= bits;
657 * set some bits on a range in the tree. This may require allocations
658 * or sleeping, so the gfp mask is used to indicate what is allowed.
660 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
661 * range already has the desired bits set. The start of the existing
662 * range is returned in failed_start in this case.
664 * [start, end] is inclusive
665 * This takes the tree lock.
667 static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
668 int bits, int exclusive, u64 *failed_start,
669 gfp_t mask)
671 struct extent_state *state;
672 struct extent_state *prealloc = NULL;
673 struct rb_node *node;
674 int err = 0;
675 int set;
676 u64 last_start;
677 u64 last_end;
678 again:
679 if (!prealloc && (mask & __GFP_WAIT)) {
680 prealloc = alloc_extent_state(mask);
681 if (!prealloc)
682 return -ENOMEM;
685 spin_lock(&tree->lock);
687 * this search will find all the extents that end after
688 * our range starts.
690 node = tree_search(tree, start);
691 if (!node) {
692 err = insert_state(tree, prealloc, start, end, bits);
693 prealloc = NULL;
694 BUG_ON(err == -EEXIST);
695 goto out;
698 state = rb_entry(node, struct extent_state, rb_node);
699 last_start = state->start;
700 last_end = state->end;
703 * | ---- desired range ---- |
704 * | state |
706 * Just lock what we found and keep going
708 if (state->start == start && state->end <= end) {
709 set = state->state & bits;
710 if (set && exclusive) {
711 *failed_start = state->start;
712 err = -EEXIST;
713 goto out;
715 set_state_bits(tree, state, bits);
716 merge_state(tree, state);
717 if (last_end == (u64)-1)
718 goto out;
719 start = last_end + 1;
720 goto search_again;
724 * | ---- desired range ---- |
725 * | state |
726 * or
727 * | ------------- state -------------- |
729 * We need to split the extent we found, and may flip bits on
730 * second half.
732 * If the extent we found extends past our
733 * range, we just split and search again. It'll get split
734 * again the next time though.
736 * If the extent we found is inside our range, we set the
737 * desired bit on it.
739 if (state->start < start) {
740 set = state->state & bits;
741 if (exclusive && set) {
742 *failed_start = start;
743 err = -EEXIST;
744 goto out;
746 err = split_state(tree, state, prealloc, start);
747 BUG_ON(err == -EEXIST);
748 prealloc = NULL;
749 if (err)
750 goto out;
751 if (state->end <= end) {
752 set_state_bits(tree, state, bits);
753 merge_state(tree, state);
754 if (last_end == (u64)-1)
755 goto out;
756 start = last_end + 1;
757 } else {
758 start = state->start;
760 goto search_again;
763 * | ---- desired range ---- |
764 * | state | or | state |
766 * There's a hole, we need to insert something in it and
767 * ignore the extent we found.
769 if (state->start > start) {
770 u64 this_end;
771 if (end < last_start)
772 this_end = end;
773 else
774 this_end = last_start - 1;
775 err = insert_state(tree, prealloc, start, this_end,
776 bits);
777 prealloc = NULL;
778 BUG_ON(err == -EEXIST);
779 if (err)
780 goto out;
781 start = this_end + 1;
782 goto search_again;
785 * | ---- desired range ---- |
786 * | state |
787 * We need to split the extent, and set the bit
788 * on the first half
790 if (state->start <= end && state->end > end) {
791 set = state->state & bits;
792 if (exclusive && set) {
793 *failed_start = start;
794 err = -EEXIST;
795 goto out;
797 err = split_state(tree, state, prealloc, end + 1);
798 BUG_ON(err == -EEXIST);
800 set_state_bits(tree, prealloc, bits);
801 merge_state(tree, prealloc);
802 prealloc = NULL;
803 goto out;
806 goto search_again;
808 out:
809 spin_unlock(&tree->lock);
810 if (prealloc)
811 free_extent_state(prealloc);
813 return err;
815 search_again:
816 if (start > end)
817 goto out;
818 spin_unlock(&tree->lock);
819 if (mask & __GFP_WAIT)
820 cond_resched();
821 goto again;
824 /* wrappers around set/clear extent bit */
825 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
826 gfp_t mask)
828 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
829 mask);
832 int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
833 gfp_t mask)
835 return set_extent_bit(tree, start, end, EXTENT_ORDERED, 0, NULL, mask);
838 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
839 int bits, gfp_t mask)
841 return set_extent_bit(tree, start, end, bits, 0, NULL,
842 mask);
845 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
846 int bits, gfp_t mask)
848 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
851 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
852 gfp_t mask)
854 return set_extent_bit(tree, start, end,
855 EXTENT_DELALLOC | EXTENT_DIRTY,
856 0, NULL, mask);
859 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
860 gfp_t mask)
862 return clear_extent_bit(tree, start, end,
863 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
866 int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
867 gfp_t mask)
869 return clear_extent_bit(tree, start, end, EXTENT_ORDERED, 1, 0, mask);
872 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
873 gfp_t mask)
875 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
876 mask);
879 static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
880 gfp_t mask)
882 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
885 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
886 gfp_t mask)
888 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
889 mask);
892 static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
893 u64 end, gfp_t mask)
895 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
898 static int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
899 gfp_t mask)
901 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
902 0, NULL, mask);
905 static int clear_extent_writeback(struct extent_io_tree *tree, u64 start,
906 u64 end, gfp_t mask)
908 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
911 int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
913 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
917 * either insert or lock state struct between start and end use mask to tell
918 * us if waiting is desired.
920 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
922 int err;
923 u64 failed_start;
924 while (1) {
925 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
926 &failed_start, mask);
927 if (err == -EEXIST && (mask & __GFP_WAIT)) {
928 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
929 start = failed_start;
930 } else {
931 break;
933 WARN_ON(start > end);
935 return err;
938 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
939 gfp_t mask)
941 int err;
942 u64 failed_start;
944 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
945 &failed_start, mask);
946 if (err == -EEXIST) {
947 if (failed_start > start)
948 clear_extent_bit(tree, start, failed_start - 1,
949 EXTENT_LOCKED, 1, 0, mask);
950 return 0;
952 return 1;
955 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
956 gfp_t mask)
958 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
962 * helper function to set pages and extents in the tree dirty
964 int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
966 unsigned long index = start >> PAGE_CACHE_SHIFT;
967 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
968 struct page *page;
970 while (index <= end_index) {
971 page = find_get_page(tree->mapping, index);
972 BUG_ON(!page);
973 __set_page_dirty_nobuffers(page);
974 page_cache_release(page);
975 index++;
977 set_extent_dirty(tree, start, end, GFP_NOFS);
978 return 0;
982 * helper function to set both pages and extents in the tree writeback
984 static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
986 unsigned long index = start >> PAGE_CACHE_SHIFT;
987 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
988 struct page *page;
990 while (index <= end_index) {
991 page = find_get_page(tree->mapping, index);
992 BUG_ON(!page);
993 set_page_writeback(page);
994 page_cache_release(page);
995 index++;
997 set_extent_writeback(tree, start, end, GFP_NOFS);
998 return 0;
1002 * find the first offset in the io tree with 'bits' set. zero is
1003 * returned if we find something, and *start_ret and *end_ret are
1004 * set to reflect the state struct that was found.
1006 * If nothing was found, 1 is returned, < 0 on error
1008 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1009 u64 *start_ret, u64 *end_ret, int bits)
1011 struct rb_node *node;
1012 struct extent_state *state;
1013 int ret = 1;
1015 spin_lock(&tree->lock);
1017 * this search will find all the extents that end after
1018 * our range starts.
1020 node = tree_search(tree, start);
1021 if (!node)
1022 goto out;
1024 while (1) {
1025 state = rb_entry(node, struct extent_state, rb_node);
1026 if (state->end >= start && (state->state & bits)) {
1027 *start_ret = state->start;
1028 *end_ret = state->end;
1029 ret = 0;
1030 break;
1032 node = rb_next(node);
1033 if (!node)
1034 break;
1036 out:
1037 spin_unlock(&tree->lock);
1038 return ret;
1041 /* find the first state struct with 'bits' set after 'start', and
1042 * return it. tree->lock must be held. NULL will returned if
1043 * nothing was found after 'start'
1045 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1046 u64 start, int bits)
1048 struct rb_node *node;
1049 struct extent_state *state;
1052 * this search will find all the extents that end after
1053 * our range starts.
1055 node = tree_search(tree, start);
1056 if (!node)
1057 goto out;
1059 while (1) {
1060 state = rb_entry(node, struct extent_state, rb_node);
1061 if (state->end >= start && (state->state & bits))
1062 return state;
1064 node = rb_next(node);
1065 if (!node)
1066 break;
1068 out:
1069 return NULL;
1073 * find a contiguous range of bytes in the file marked as delalloc, not
1074 * more than 'max_bytes'. start and end are used to return the range,
1076 * 1 is returned if we find something, 0 if nothing was in the tree
1078 static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1079 u64 *start, u64 *end, u64 max_bytes)
1081 struct rb_node *node;
1082 struct extent_state *state;
1083 u64 cur_start = *start;
1084 u64 found = 0;
1085 u64 total_bytes = 0;
1087 spin_lock(&tree->lock);
1090 * this search will find all the extents that end after
1091 * our range starts.
1093 node = tree_search(tree, cur_start);
1094 if (!node) {
1095 if (!found)
1096 *end = (u64)-1;
1097 goto out;
1100 while (1) {
1101 state = rb_entry(node, struct extent_state, rb_node);
1102 if (found && (state->start != cur_start ||
1103 (state->state & EXTENT_BOUNDARY))) {
1104 goto out;
1106 if (!(state->state & EXTENT_DELALLOC)) {
1107 if (!found)
1108 *end = state->end;
1109 goto out;
1111 if (!found)
1112 *start = state->start;
1113 found++;
1114 *end = state->end;
1115 cur_start = state->end + 1;
1116 node = rb_next(node);
1117 if (!node)
1118 break;
1119 total_bytes += state->end - state->start + 1;
1120 if (total_bytes >= max_bytes)
1121 break;
1123 out:
1124 spin_unlock(&tree->lock);
1125 return found;
1128 static noinline int __unlock_for_delalloc(struct inode *inode,
1129 struct page *locked_page,
1130 u64 start, u64 end)
1132 int ret;
1133 struct page *pages[16];
1134 unsigned long index = start >> PAGE_CACHE_SHIFT;
1135 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1136 unsigned long nr_pages = end_index - index + 1;
1137 int i;
1139 if (index == locked_page->index && end_index == index)
1140 return 0;
1142 while (nr_pages > 0) {
1143 ret = find_get_pages_contig(inode->i_mapping, index,
1144 min_t(unsigned long, nr_pages,
1145 ARRAY_SIZE(pages)), pages);
1146 for (i = 0; i < ret; i++) {
1147 if (pages[i] != locked_page)
1148 unlock_page(pages[i]);
1149 page_cache_release(pages[i]);
1151 nr_pages -= ret;
1152 index += ret;
1153 cond_resched();
1155 return 0;
1158 static noinline int lock_delalloc_pages(struct inode *inode,
1159 struct page *locked_page,
1160 u64 delalloc_start,
1161 u64 delalloc_end)
1163 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1164 unsigned long start_index = index;
1165 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1166 unsigned long pages_locked = 0;
1167 struct page *pages[16];
1168 unsigned long nrpages;
1169 int ret;
1170 int i;
1172 /* the caller is responsible for locking the start index */
1173 if (index == locked_page->index && index == end_index)
1174 return 0;
1176 /* skip the page at the start index */
1177 nrpages = end_index - index + 1;
1178 while (nrpages > 0) {
1179 ret = find_get_pages_contig(inode->i_mapping, index,
1180 min_t(unsigned long,
1181 nrpages, ARRAY_SIZE(pages)), pages);
1182 if (ret == 0) {
1183 ret = -EAGAIN;
1184 goto done;
1186 /* now we have an array of pages, lock them all */
1187 for (i = 0; i < ret; i++) {
1189 * the caller is taking responsibility for
1190 * locked_page
1192 if (pages[i] != locked_page) {
1193 lock_page(pages[i]);
1194 if (!PageDirty(pages[i]) ||
1195 pages[i]->mapping != inode->i_mapping) {
1196 ret = -EAGAIN;
1197 unlock_page(pages[i]);
1198 page_cache_release(pages[i]);
1199 goto done;
1202 page_cache_release(pages[i]);
1203 pages_locked++;
1205 nrpages -= ret;
1206 index += ret;
1207 cond_resched();
1209 ret = 0;
1210 done:
1211 if (ret && pages_locked) {
1212 __unlock_for_delalloc(inode, locked_page,
1213 delalloc_start,
1214 ((u64)(start_index + pages_locked - 1)) <<
1215 PAGE_CACHE_SHIFT);
1217 return ret;
1221 * find a contiguous range of bytes in the file marked as delalloc, not
1222 * more than 'max_bytes'. start and end are used to return the range,
1224 * 1 is returned if we find something, 0 if nothing was in the tree
1226 static noinline u64 find_lock_delalloc_range(struct inode *inode,
1227 struct extent_io_tree *tree,
1228 struct page *locked_page,
1229 u64 *start, u64 *end,
1230 u64 max_bytes)
1232 u64 delalloc_start;
1233 u64 delalloc_end;
1234 u64 found;
1235 int ret;
1236 int loops = 0;
1238 again:
1239 /* step one, find a bunch of delalloc bytes starting at start */
1240 delalloc_start = *start;
1241 delalloc_end = 0;
1242 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1243 max_bytes);
1244 if (!found || delalloc_end <= *start) {
1245 *start = delalloc_start;
1246 *end = delalloc_end;
1247 return found;
1251 * start comes from the offset of locked_page. We have to lock
1252 * pages in order, so we can't process delalloc bytes before
1253 * locked_page
1255 if (delalloc_start < *start)
1256 delalloc_start = *start;
1259 * make sure to limit the number of pages we try to lock down
1260 * if we're looping.
1262 if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
1263 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
1265 /* step two, lock all the pages after the page that has start */
1266 ret = lock_delalloc_pages(inode, locked_page,
1267 delalloc_start, delalloc_end);
1268 if (ret == -EAGAIN) {
1269 /* some of the pages are gone, lets avoid looping by
1270 * shortening the size of the delalloc range we're searching
1272 if (!loops) {
1273 unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1274 max_bytes = PAGE_CACHE_SIZE - offset;
1275 loops = 1;
1276 goto again;
1277 } else {
1278 found = 0;
1279 goto out_failed;
1282 BUG_ON(ret);
1284 /* step three, lock the state bits for the whole range */
1285 lock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS);
1287 /* then test to make sure it is all still delalloc */
1288 ret = test_range_bit(tree, delalloc_start, delalloc_end,
1289 EXTENT_DELALLOC, 1);
1290 if (!ret) {
1291 unlock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS);
1292 __unlock_for_delalloc(inode, locked_page,
1293 delalloc_start, delalloc_end);
1294 cond_resched();
1295 goto again;
1297 *start = delalloc_start;
1298 *end = delalloc_end;
1299 out_failed:
1300 return found;
1303 int extent_clear_unlock_delalloc(struct inode *inode,
1304 struct extent_io_tree *tree,
1305 u64 start, u64 end, struct page *locked_page,
1306 int unlock_pages,
1307 int clear_unlock,
1308 int clear_delalloc, int clear_dirty,
1309 int set_writeback,
1310 int end_writeback)
1312 int ret;
1313 struct page *pages[16];
1314 unsigned long index = start >> PAGE_CACHE_SHIFT;
1315 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1316 unsigned long nr_pages = end_index - index + 1;
1317 int i;
1318 int clear_bits = 0;
1320 if (clear_unlock)
1321 clear_bits |= EXTENT_LOCKED;
1322 if (clear_dirty)
1323 clear_bits |= EXTENT_DIRTY;
1325 if (clear_delalloc)
1326 clear_bits |= EXTENT_DELALLOC;
1328 clear_extent_bit(tree, start, end, clear_bits, 1, 0, GFP_NOFS);
1329 if (!(unlock_pages || clear_dirty || set_writeback || end_writeback))
1330 return 0;
1332 while (nr_pages > 0) {
1333 ret = find_get_pages_contig(inode->i_mapping, index,
1334 min_t(unsigned long,
1335 nr_pages, ARRAY_SIZE(pages)), pages);
1336 for (i = 0; i < ret; i++) {
1337 if (pages[i] == locked_page) {
1338 page_cache_release(pages[i]);
1339 continue;
1341 if (clear_dirty)
1342 clear_page_dirty_for_io(pages[i]);
1343 if (set_writeback)
1344 set_page_writeback(pages[i]);
1345 if (end_writeback)
1346 end_page_writeback(pages[i]);
1347 if (unlock_pages)
1348 unlock_page(pages[i]);
1349 page_cache_release(pages[i]);
1351 nr_pages -= ret;
1352 index += ret;
1353 cond_resched();
1355 return 0;
1359 * count the number of bytes in the tree that have a given bit(s)
1360 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1361 * cached. The total number found is returned.
1363 u64 count_range_bits(struct extent_io_tree *tree,
1364 u64 *start, u64 search_end, u64 max_bytes,
1365 unsigned long bits)
1367 struct rb_node *node;
1368 struct extent_state *state;
1369 u64 cur_start = *start;
1370 u64 total_bytes = 0;
1371 int found = 0;
1373 if (search_end <= cur_start) {
1374 WARN_ON(1);
1375 return 0;
1378 spin_lock(&tree->lock);
1379 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1380 total_bytes = tree->dirty_bytes;
1381 goto out;
1384 * this search will find all the extents that end after
1385 * our range starts.
1387 node = tree_search(tree, cur_start);
1388 if (!node)
1389 goto out;
1391 while (1) {
1392 state = rb_entry(node, struct extent_state, rb_node);
1393 if (state->start > search_end)
1394 break;
1395 if (state->end >= cur_start && (state->state & bits)) {
1396 total_bytes += min(search_end, state->end) + 1 -
1397 max(cur_start, state->start);
1398 if (total_bytes >= max_bytes)
1399 break;
1400 if (!found) {
1401 *start = state->start;
1402 found = 1;
1405 node = rb_next(node);
1406 if (!node)
1407 break;
1409 out:
1410 spin_unlock(&tree->lock);
1411 return total_bytes;
1415 * set the private field for a given byte offset in the tree. If there isn't
1416 * an extent_state there already, this does nothing.
1418 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1420 struct rb_node *node;
1421 struct extent_state *state;
1422 int ret = 0;
1424 spin_lock(&tree->lock);
1426 * this search will find all the extents that end after
1427 * our range starts.
1429 node = tree_search(tree, start);
1430 if (!node) {
1431 ret = -ENOENT;
1432 goto out;
1434 state = rb_entry(node, struct extent_state, rb_node);
1435 if (state->start != start) {
1436 ret = -ENOENT;
1437 goto out;
1439 state->private = private;
1440 out:
1441 spin_unlock(&tree->lock);
1442 return ret;
1445 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1447 struct rb_node *node;
1448 struct extent_state *state;
1449 int ret = 0;
1451 spin_lock(&tree->lock);
1453 * this search will find all the extents that end after
1454 * our range starts.
1456 node = tree_search(tree, start);
1457 if (!node) {
1458 ret = -ENOENT;
1459 goto out;
1461 state = rb_entry(node, struct extent_state, rb_node);
1462 if (state->start != start) {
1463 ret = -ENOENT;
1464 goto out;
1466 *private = state->private;
1467 out:
1468 spin_unlock(&tree->lock);
1469 return ret;
1473 * searches a range in the state tree for a given mask.
1474 * If 'filled' == 1, this returns 1 only if every extent in the tree
1475 * has the bits set. Otherwise, 1 is returned if any bit in the
1476 * range is found set.
1478 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1479 int bits, int filled)
1481 struct extent_state *state = NULL;
1482 struct rb_node *node;
1483 int bitset = 0;
1485 spin_lock(&tree->lock);
1486 node = tree_search(tree, start);
1487 while (node && start <= end) {
1488 state = rb_entry(node, struct extent_state, rb_node);
1490 if (filled && state->start > start) {
1491 bitset = 0;
1492 break;
1495 if (state->start > end)
1496 break;
1498 if (state->state & bits) {
1499 bitset = 1;
1500 if (!filled)
1501 break;
1502 } else if (filled) {
1503 bitset = 0;
1504 break;
1506 start = state->end + 1;
1507 if (start > end)
1508 break;
1509 node = rb_next(node);
1510 if (!node) {
1511 if (filled)
1512 bitset = 0;
1513 break;
1516 spin_unlock(&tree->lock);
1517 return bitset;
1521 * helper function to set a given page up to date if all the
1522 * extents in the tree for that page are up to date
1524 static int check_page_uptodate(struct extent_io_tree *tree,
1525 struct page *page)
1527 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1528 u64 end = start + PAGE_CACHE_SIZE - 1;
1529 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1530 SetPageUptodate(page);
1531 return 0;
1535 * helper function to unlock a page if all the extents in the tree
1536 * for that page are unlocked
1538 static int check_page_locked(struct extent_io_tree *tree,
1539 struct page *page)
1541 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1542 u64 end = start + PAGE_CACHE_SIZE - 1;
1543 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1544 unlock_page(page);
1545 return 0;
1549 * helper function to end page writeback if all the extents
1550 * in the tree for that page are done with writeback
1552 static int check_page_writeback(struct extent_io_tree *tree,
1553 struct page *page)
1555 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1556 u64 end = start + PAGE_CACHE_SIZE - 1;
1557 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1558 end_page_writeback(page);
1559 return 0;
1562 /* lots and lots of room for performance fixes in the end_bio funcs */
1565 * after a writepage IO is done, we need to:
1566 * clear the uptodate bits on error
1567 * clear the writeback bits in the extent tree for this IO
1568 * end_page_writeback if the page has no more pending IO
1570 * Scheduling is not allowed, so the extent state tree is expected
1571 * to have one and only one object corresponding to this IO.
1573 static void end_bio_extent_writepage(struct bio *bio, int err)
1575 int uptodate = err == 0;
1576 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1577 struct extent_io_tree *tree;
1578 u64 start;
1579 u64 end;
1580 int whole_page;
1581 int ret;
1583 do {
1584 struct page *page = bvec->bv_page;
1585 tree = &BTRFS_I(page->mapping->host)->io_tree;
1587 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1588 bvec->bv_offset;
1589 end = start + bvec->bv_len - 1;
1591 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1592 whole_page = 1;
1593 else
1594 whole_page = 0;
1596 if (--bvec >= bio->bi_io_vec)
1597 prefetchw(&bvec->bv_page->flags);
1598 if (tree->ops && tree->ops->writepage_end_io_hook) {
1599 ret = tree->ops->writepage_end_io_hook(page, start,
1600 end, NULL, uptodate);
1601 if (ret)
1602 uptodate = 0;
1605 if (!uptodate && tree->ops &&
1606 tree->ops->writepage_io_failed_hook) {
1607 ret = tree->ops->writepage_io_failed_hook(bio, page,
1608 start, end, NULL);
1609 if (ret == 0) {
1610 uptodate = (err == 0);
1611 continue;
1615 if (!uptodate) {
1616 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1617 ClearPageUptodate(page);
1618 SetPageError(page);
1621 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1623 if (whole_page)
1624 end_page_writeback(page);
1625 else
1626 check_page_writeback(tree, page);
1627 } while (bvec >= bio->bi_io_vec);
1629 bio_put(bio);
1633 * after a readpage IO is done, we need to:
1634 * clear the uptodate bits on error
1635 * set the uptodate bits if things worked
1636 * set the page up to date if all extents in the tree are uptodate
1637 * clear the lock bit in the extent tree
1638 * unlock the page if there are no other extents locked for it
1640 * Scheduling is not allowed, so the extent state tree is expected
1641 * to have one and only one object corresponding to this IO.
1643 static void end_bio_extent_readpage(struct bio *bio, int err)
1645 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1646 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1647 struct extent_io_tree *tree;
1648 u64 start;
1649 u64 end;
1650 int whole_page;
1651 int ret;
1653 if (err)
1654 uptodate = 0;
1656 do {
1657 struct page *page = bvec->bv_page;
1658 tree = &BTRFS_I(page->mapping->host)->io_tree;
1660 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1661 bvec->bv_offset;
1662 end = start + bvec->bv_len - 1;
1664 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1665 whole_page = 1;
1666 else
1667 whole_page = 0;
1669 if (--bvec >= bio->bi_io_vec)
1670 prefetchw(&bvec->bv_page->flags);
1672 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1673 ret = tree->ops->readpage_end_io_hook(page, start, end,
1674 NULL);
1675 if (ret)
1676 uptodate = 0;
1678 if (!uptodate && tree->ops &&
1679 tree->ops->readpage_io_failed_hook) {
1680 ret = tree->ops->readpage_io_failed_hook(bio, page,
1681 start, end, NULL);
1682 if (ret == 0) {
1683 uptodate =
1684 test_bit(BIO_UPTODATE, &bio->bi_flags);
1685 if (err)
1686 uptodate = 0;
1687 continue;
1691 if (uptodate) {
1692 set_extent_uptodate(tree, start, end,
1693 GFP_ATOMIC);
1695 unlock_extent(tree, start, end, GFP_ATOMIC);
1697 if (whole_page) {
1698 if (uptodate) {
1699 SetPageUptodate(page);
1700 } else {
1701 ClearPageUptodate(page);
1702 SetPageError(page);
1704 unlock_page(page);
1705 } else {
1706 if (uptodate) {
1707 check_page_uptodate(tree, page);
1708 } else {
1709 ClearPageUptodate(page);
1710 SetPageError(page);
1712 check_page_locked(tree, page);
1714 } while (bvec >= bio->bi_io_vec);
1716 bio_put(bio);
1720 * IO done from prepare_write is pretty simple, we just unlock
1721 * the structs in the extent tree when done, and set the uptodate bits
1722 * as appropriate.
1724 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1726 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1727 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1728 struct extent_io_tree *tree;
1729 u64 start;
1730 u64 end;
1732 do {
1733 struct page *page = bvec->bv_page;
1734 tree = &BTRFS_I(page->mapping->host)->io_tree;
1736 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1737 bvec->bv_offset;
1738 end = start + bvec->bv_len - 1;
1740 if (--bvec >= bio->bi_io_vec)
1741 prefetchw(&bvec->bv_page->flags);
1743 if (uptodate) {
1744 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1745 } else {
1746 ClearPageUptodate(page);
1747 SetPageError(page);
1750 unlock_extent(tree, start, end, GFP_ATOMIC);
1752 } while (bvec >= bio->bi_io_vec);
1754 bio_put(bio);
1757 static struct bio *
1758 extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1759 gfp_t gfp_flags)
1761 struct bio *bio;
1763 bio = bio_alloc(gfp_flags, nr_vecs);
1765 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1766 while (!bio && (nr_vecs /= 2))
1767 bio = bio_alloc(gfp_flags, nr_vecs);
1770 if (bio) {
1771 bio->bi_size = 0;
1772 bio->bi_bdev = bdev;
1773 bio->bi_sector = first_sector;
1775 return bio;
1778 static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
1779 unsigned long bio_flags)
1781 int ret = 0;
1782 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1783 struct page *page = bvec->bv_page;
1784 struct extent_io_tree *tree = bio->bi_private;
1785 u64 start;
1786 u64 end;
1788 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1789 end = start + bvec->bv_len - 1;
1791 bio->bi_private = NULL;
1793 bio_get(bio);
1795 if (tree->ops && tree->ops->submit_bio_hook)
1796 tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
1797 mirror_num, bio_flags);
1798 else
1799 submit_bio(rw, bio);
1800 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1801 ret = -EOPNOTSUPP;
1802 bio_put(bio);
1803 return ret;
1806 static int submit_extent_page(int rw, struct extent_io_tree *tree,
1807 struct page *page, sector_t sector,
1808 size_t size, unsigned long offset,
1809 struct block_device *bdev,
1810 struct bio **bio_ret,
1811 unsigned long max_pages,
1812 bio_end_io_t end_io_func,
1813 int mirror_num,
1814 unsigned long prev_bio_flags,
1815 unsigned long bio_flags)
1817 int ret = 0;
1818 struct bio *bio;
1819 int nr;
1820 int contig = 0;
1821 int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
1822 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
1823 size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
1825 if (bio_ret && *bio_ret) {
1826 bio = *bio_ret;
1827 if (old_compressed)
1828 contig = bio->bi_sector == sector;
1829 else
1830 contig = bio->bi_sector + (bio->bi_size >> 9) ==
1831 sector;
1833 if (prev_bio_flags != bio_flags || !contig ||
1834 (tree->ops && tree->ops->merge_bio_hook &&
1835 tree->ops->merge_bio_hook(page, offset, page_size, bio,
1836 bio_flags)) ||
1837 bio_add_page(bio, page, page_size, offset) < page_size) {
1838 ret = submit_one_bio(rw, bio, mirror_num,
1839 prev_bio_flags);
1840 bio = NULL;
1841 } else {
1842 return 0;
1845 if (this_compressed)
1846 nr = BIO_MAX_PAGES;
1847 else
1848 nr = bio_get_nr_vecs(bdev);
1850 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1852 bio_add_page(bio, page, page_size, offset);
1853 bio->bi_end_io = end_io_func;
1854 bio->bi_private = tree;
1856 if (bio_ret)
1857 *bio_ret = bio;
1858 else
1859 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
1861 return ret;
1864 void set_page_extent_mapped(struct page *page)
1866 if (!PagePrivate(page)) {
1867 SetPagePrivate(page);
1868 page_cache_get(page);
1869 set_page_private(page, EXTENT_PAGE_PRIVATE);
1873 static void set_page_extent_head(struct page *page, unsigned long len)
1875 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1879 * basic readpage implementation. Locked extent state structs are inserted
1880 * into the tree that are removed when the IO is done (by the end_io
1881 * handlers)
1883 static int __extent_read_full_page(struct extent_io_tree *tree,
1884 struct page *page,
1885 get_extent_t *get_extent,
1886 struct bio **bio, int mirror_num,
1887 unsigned long *bio_flags)
1889 struct inode *inode = page->mapping->host;
1890 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1891 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1892 u64 end;
1893 u64 cur = start;
1894 u64 extent_offset;
1895 u64 last_byte = i_size_read(inode);
1896 u64 block_start;
1897 u64 cur_end;
1898 sector_t sector;
1899 struct extent_map *em;
1900 struct block_device *bdev;
1901 int ret;
1902 int nr = 0;
1903 size_t page_offset = 0;
1904 size_t iosize;
1905 size_t disk_io_size;
1906 size_t blocksize = inode->i_sb->s_blocksize;
1907 unsigned long this_bio_flag = 0;
1909 set_page_extent_mapped(page);
1911 end = page_end;
1912 lock_extent(tree, start, end, GFP_NOFS);
1914 if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
1915 char *userpage;
1916 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
1918 if (zero_offset) {
1919 iosize = PAGE_CACHE_SIZE - zero_offset;
1920 userpage = kmap_atomic(page, KM_USER0);
1921 memset(userpage + zero_offset, 0, iosize);
1922 flush_dcache_page(page);
1923 kunmap_atomic(userpage, KM_USER0);
1926 while (cur <= end) {
1927 if (cur >= last_byte) {
1928 char *userpage;
1929 iosize = PAGE_CACHE_SIZE - page_offset;
1930 userpage = kmap_atomic(page, KM_USER0);
1931 memset(userpage + page_offset, 0, iosize);
1932 flush_dcache_page(page);
1933 kunmap_atomic(userpage, KM_USER0);
1934 set_extent_uptodate(tree, cur, cur + iosize - 1,
1935 GFP_NOFS);
1936 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1937 break;
1939 em = get_extent(inode, page, page_offset, cur,
1940 end - cur + 1, 0);
1941 if (IS_ERR(em) || !em) {
1942 SetPageError(page);
1943 unlock_extent(tree, cur, end, GFP_NOFS);
1944 break;
1946 extent_offset = cur - em->start;
1947 BUG_ON(extent_map_end(em) <= cur);
1948 BUG_ON(end < cur);
1950 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
1951 this_bio_flag = EXTENT_BIO_COMPRESSED;
1953 iosize = min(extent_map_end(em) - cur, end - cur + 1);
1954 cur_end = min(extent_map_end(em) - 1, end);
1955 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1956 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
1957 disk_io_size = em->block_len;
1958 sector = em->block_start >> 9;
1959 } else {
1960 sector = (em->block_start + extent_offset) >> 9;
1961 disk_io_size = iosize;
1963 bdev = em->bdev;
1964 block_start = em->block_start;
1965 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
1966 block_start = EXTENT_MAP_HOLE;
1967 free_extent_map(em);
1968 em = NULL;
1970 /* we've found a hole, just zero and go on */
1971 if (block_start == EXTENT_MAP_HOLE) {
1972 char *userpage;
1973 userpage = kmap_atomic(page, KM_USER0);
1974 memset(userpage + page_offset, 0, iosize);
1975 flush_dcache_page(page);
1976 kunmap_atomic(userpage, KM_USER0);
1978 set_extent_uptodate(tree, cur, cur + iosize - 1,
1979 GFP_NOFS);
1980 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1981 cur = cur + iosize;
1982 page_offset += iosize;
1983 continue;
1985 /* the get_extent function already copied into the page */
1986 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1987 check_page_uptodate(tree, page);
1988 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1989 cur = cur + iosize;
1990 page_offset += iosize;
1991 continue;
1993 /* we have an inline extent but it didn't get marked up
1994 * to date. Error out
1996 if (block_start == EXTENT_MAP_INLINE) {
1997 SetPageError(page);
1998 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1999 cur = cur + iosize;
2000 page_offset += iosize;
2001 continue;
2004 ret = 0;
2005 if (tree->ops && tree->ops->readpage_io_hook) {
2006 ret = tree->ops->readpage_io_hook(page, cur,
2007 cur + iosize - 1);
2009 if (!ret) {
2010 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2011 pnr -= page->index;
2012 ret = submit_extent_page(READ, tree, page,
2013 sector, disk_io_size, page_offset,
2014 bdev, bio, pnr,
2015 end_bio_extent_readpage, mirror_num,
2016 *bio_flags,
2017 this_bio_flag);
2018 nr++;
2019 *bio_flags = this_bio_flag;
2021 if (ret)
2022 SetPageError(page);
2023 cur = cur + iosize;
2024 page_offset += iosize;
2026 if (!nr) {
2027 if (!PageError(page))
2028 SetPageUptodate(page);
2029 unlock_page(page);
2031 return 0;
2034 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2035 get_extent_t *get_extent)
2037 struct bio *bio = NULL;
2038 unsigned long bio_flags = 0;
2039 int ret;
2041 ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
2042 &bio_flags);
2043 if (bio)
2044 submit_one_bio(READ, bio, 0, bio_flags);
2045 return ret;
2048 static noinline void update_nr_written(struct page *page,
2049 struct writeback_control *wbc,
2050 unsigned long nr_written)
2052 wbc->nr_to_write -= nr_written;
2053 if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2054 wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2055 page->mapping->writeback_index = page->index + nr_written;
2059 * the writepage semantics are similar to regular writepage. extent
2060 * records are inserted to lock ranges in the tree, and as dirty areas
2061 * are found, they are marked writeback. Then the lock bits are removed
2062 * and the end_io handler clears the writeback ranges
2064 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2065 void *data)
2067 struct inode *inode = page->mapping->host;
2068 struct extent_page_data *epd = data;
2069 struct extent_io_tree *tree = epd->tree;
2070 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2071 u64 delalloc_start;
2072 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2073 u64 end;
2074 u64 cur = start;
2075 u64 extent_offset;
2076 u64 last_byte = i_size_read(inode);
2077 u64 block_start;
2078 u64 iosize;
2079 u64 unlock_start;
2080 sector_t sector;
2081 struct extent_map *em;
2082 struct block_device *bdev;
2083 int ret;
2084 int nr = 0;
2085 size_t pg_offset = 0;
2086 size_t blocksize;
2087 loff_t i_size = i_size_read(inode);
2088 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2089 u64 nr_delalloc;
2090 u64 delalloc_end;
2091 int page_started;
2092 int compressed;
2093 int write_flags;
2094 unsigned long nr_written = 0;
2096 if (wbc->sync_mode == WB_SYNC_ALL)
2097 write_flags = WRITE_SYNC_PLUG;
2098 else
2099 write_flags = WRITE;
2101 WARN_ON(!PageLocked(page));
2102 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
2103 if (page->index > end_index ||
2104 (page->index == end_index && !pg_offset)) {
2105 page->mapping->a_ops->invalidatepage(page, 0);
2106 unlock_page(page);
2107 return 0;
2110 if (page->index == end_index) {
2111 char *userpage;
2113 userpage = kmap_atomic(page, KM_USER0);
2114 memset(userpage + pg_offset, 0,
2115 PAGE_CACHE_SIZE - pg_offset);
2116 kunmap_atomic(userpage, KM_USER0);
2117 flush_dcache_page(page);
2119 pg_offset = 0;
2121 set_page_extent_mapped(page);
2123 delalloc_start = start;
2124 delalloc_end = 0;
2125 page_started = 0;
2126 if (!epd->extent_locked) {
2128 * make sure the wbc mapping index is at least updated
2129 * to this page.
2131 update_nr_written(page, wbc, 0);
2133 while (delalloc_end < page_end) {
2134 nr_delalloc = find_lock_delalloc_range(inode, tree,
2135 page,
2136 &delalloc_start,
2137 &delalloc_end,
2138 128 * 1024 * 1024);
2139 if (nr_delalloc == 0) {
2140 delalloc_start = delalloc_end + 1;
2141 continue;
2143 tree->ops->fill_delalloc(inode, page, delalloc_start,
2144 delalloc_end, &page_started,
2145 &nr_written);
2146 delalloc_start = delalloc_end + 1;
2149 /* did the fill delalloc function already unlock and start
2150 * the IO?
2152 if (page_started) {
2153 ret = 0;
2155 * we've unlocked the page, so we can't update
2156 * the mapping's writeback index, just update
2157 * nr_to_write.
2159 wbc->nr_to_write -= nr_written;
2160 goto done_unlocked;
2163 lock_extent(tree, start, page_end, GFP_NOFS);
2165 unlock_start = start;
2167 if (tree->ops && tree->ops->writepage_start_hook) {
2168 ret = tree->ops->writepage_start_hook(page, start,
2169 page_end);
2170 if (ret == -EAGAIN) {
2171 unlock_extent(tree, start, page_end, GFP_NOFS);
2172 redirty_page_for_writepage(wbc, page);
2173 update_nr_written(page, wbc, nr_written);
2174 unlock_page(page);
2175 ret = 0;
2176 goto done_unlocked;
2181 * we don't want to touch the inode after unlocking the page,
2182 * so we update the mapping writeback index now
2184 update_nr_written(page, wbc, nr_written + 1);
2186 end = page_end;
2187 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0))
2188 printk(KERN_ERR "btrfs delalloc bits after lock_extent\n");
2190 if (last_byte <= start) {
2191 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
2192 unlock_extent(tree, start, page_end, GFP_NOFS);
2193 if (tree->ops && tree->ops->writepage_end_io_hook)
2194 tree->ops->writepage_end_io_hook(page, start,
2195 page_end, NULL, 1);
2196 unlock_start = page_end + 1;
2197 goto done;
2200 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
2201 blocksize = inode->i_sb->s_blocksize;
2203 while (cur <= end) {
2204 if (cur >= last_byte) {
2205 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
2206 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
2207 if (tree->ops && tree->ops->writepage_end_io_hook)
2208 tree->ops->writepage_end_io_hook(page, cur,
2209 page_end, NULL, 1);
2210 unlock_start = page_end + 1;
2211 break;
2213 em = epd->get_extent(inode, page, pg_offset, cur,
2214 end - cur + 1, 1);
2215 if (IS_ERR(em) || !em) {
2216 SetPageError(page);
2217 break;
2220 extent_offset = cur - em->start;
2221 BUG_ON(extent_map_end(em) <= cur);
2222 BUG_ON(end < cur);
2223 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2224 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2225 sector = (em->block_start + extent_offset) >> 9;
2226 bdev = em->bdev;
2227 block_start = em->block_start;
2228 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
2229 free_extent_map(em);
2230 em = NULL;
2233 * compressed and inline extents are written through other
2234 * paths in the FS
2236 if (compressed || block_start == EXTENT_MAP_HOLE ||
2237 block_start == EXTENT_MAP_INLINE) {
2238 clear_extent_dirty(tree, cur,
2239 cur + iosize - 1, GFP_NOFS);
2241 unlock_extent(tree, unlock_start, cur + iosize - 1,
2242 GFP_NOFS);
2245 * end_io notification does not happen here for
2246 * compressed extents
2248 if (!compressed && tree->ops &&
2249 tree->ops->writepage_end_io_hook)
2250 tree->ops->writepage_end_io_hook(page, cur,
2251 cur + iosize - 1,
2252 NULL, 1);
2253 else if (compressed) {
2254 /* we don't want to end_page_writeback on
2255 * a compressed extent. this happens
2256 * elsewhere
2258 nr++;
2261 cur += iosize;
2262 pg_offset += iosize;
2263 unlock_start = cur;
2264 continue;
2266 /* leave this out until we have a page_mkwrite call */
2267 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2268 EXTENT_DIRTY, 0)) {
2269 cur = cur + iosize;
2270 pg_offset += iosize;
2271 continue;
2274 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
2275 if (tree->ops && tree->ops->writepage_io_hook) {
2276 ret = tree->ops->writepage_io_hook(page, cur,
2277 cur + iosize - 1);
2278 } else {
2279 ret = 0;
2281 if (ret) {
2282 SetPageError(page);
2283 } else {
2284 unsigned long max_nr = end_index + 1;
2286 set_range_writeback(tree, cur, cur + iosize - 1);
2287 if (!PageWriteback(page)) {
2288 printk(KERN_ERR "btrfs warning page %lu not "
2289 "writeback, cur %llu end %llu\n",
2290 page->index, (unsigned long long)cur,
2291 (unsigned long long)end);
2294 ret = submit_extent_page(write_flags, tree, page,
2295 sector, iosize, pg_offset,
2296 bdev, &epd->bio, max_nr,
2297 end_bio_extent_writepage,
2298 0, 0, 0);
2299 if (ret)
2300 SetPageError(page);
2302 cur = cur + iosize;
2303 pg_offset += iosize;
2304 nr++;
2306 done:
2307 if (nr == 0) {
2308 /* make sure the mapping tag for page dirty gets cleared */
2309 set_page_writeback(page);
2310 end_page_writeback(page);
2312 if (unlock_start <= page_end)
2313 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
2314 unlock_page(page);
2316 done_unlocked:
2318 return 0;
2322 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2323 * @mapping: address space structure to write
2324 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2325 * @writepage: function called for each page
2326 * @data: data passed to writepage function
2328 * If a page is already under I/O, write_cache_pages() skips it, even
2329 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2330 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2331 * and msync() need to guarantee that all the data which was dirty at the time
2332 * the call was made get new I/O started against them. If wbc->sync_mode is
2333 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2334 * existing IO to complete.
2336 static int extent_write_cache_pages(struct extent_io_tree *tree,
2337 struct address_space *mapping,
2338 struct writeback_control *wbc,
2339 writepage_t writepage, void *data,
2340 void (*flush_fn)(void *))
2342 struct backing_dev_info *bdi = mapping->backing_dev_info;
2343 int ret = 0;
2344 int done = 0;
2345 struct pagevec pvec;
2346 int nr_pages;
2347 pgoff_t index;
2348 pgoff_t end; /* Inclusive */
2349 int scanned = 0;
2350 int range_whole = 0;
2352 pagevec_init(&pvec, 0);
2353 if (wbc->range_cyclic) {
2354 index = mapping->writeback_index; /* Start from prev offset */
2355 end = -1;
2356 } else {
2357 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2358 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2359 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2360 range_whole = 1;
2361 scanned = 1;
2363 retry:
2364 while (!done && (index <= end) &&
2365 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2366 PAGECACHE_TAG_DIRTY, min(end - index,
2367 (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2368 unsigned i;
2370 scanned = 1;
2371 for (i = 0; i < nr_pages; i++) {
2372 struct page *page = pvec.pages[i];
2375 * At this point we hold neither mapping->tree_lock nor
2376 * lock on the page itself: the page may be truncated or
2377 * invalidated (changing page->mapping to NULL), or even
2378 * swizzled back from swapper_space to tmpfs file
2379 * mapping
2381 if (tree->ops && tree->ops->write_cache_pages_lock_hook)
2382 tree->ops->write_cache_pages_lock_hook(page);
2383 else
2384 lock_page(page);
2386 if (unlikely(page->mapping != mapping)) {
2387 unlock_page(page);
2388 continue;
2391 if (!wbc->range_cyclic && page->index > end) {
2392 done = 1;
2393 unlock_page(page);
2394 continue;
2397 if (wbc->sync_mode != WB_SYNC_NONE) {
2398 if (PageWriteback(page))
2399 flush_fn(data);
2400 wait_on_page_writeback(page);
2403 if (PageWriteback(page) ||
2404 !clear_page_dirty_for_io(page)) {
2405 unlock_page(page);
2406 continue;
2409 ret = (*writepage)(page, wbc, data);
2411 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2412 unlock_page(page);
2413 ret = 0;
2415 if (ret || wbc->nr_to_write <= 0)
2416 done = 1;
2417 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2418 wbc->encountered_congestion = 1;
2419 done = 1;
2422 pagevec_release(&pvec);
2423 cond_resched();
2425 if (!scanned && !done) {
2427 * We hit the last page and there is more work to be done: wrap
2428 * back to the start of the file
2430 scanned = 1;
2431 index = 0;
2432 goto retry;
2434 return ret;
2437 static void flush_epd_write_bio(struct extent_page_data *epd)
2439 if (epd->bio) {
2440 if (epd->sync_io)
2441 submit_one_bio(WRITE_SYNC, epd->bio, 0, 0);
2442 else
2443 submit_one_bio(WRITE, epd->bio, 0, 0);
2444 epd->bio = NULL;
2448 static noinline void flush_write_bio(void *data)
2450 struct extent_page_data *epd = data;
2451 flush_epd_write_bio(epd);
2454 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2455 get_extent_t *get_extent,
2456 struct writeback_control *wbc)
2458 int ret;
2459 struct address_space *mapping = page->mapping;
2460 struct extent_page_data epd = {
2461 .bio = NULL,
2462 .tree = tree,
2463 .get_extent = get_extent,
2464 .extent_locked = 0,
2465 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
2467 struct writeback_control wbc_writepages = {
2468 .bdi = wbc->bdi,
2469 .sync_mode = wbc->sync_mode,
2470 .older_than_this = NULL,
2471 .nr_to_write = 64,
2472 .range_start = page_offset(page) + PAGE_CACHE_SIZE,
2473 .range_end = (loff_t)-1,
2476 ret = __extent_writepage(page, wbc, &epd);
2478 extent_write_cache_pages(tree, mapping, &wbc_writepages,
2479 __extent_writepage, &epd, flush_write_bio);
2480 flush_epd_write_bio(&epd);
2481 return ret;
2484 int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
2485 u64 start, u64 end, get_extent_t *get_extent,
2486 int mode)
2488 int ret = 0;
2489 struct address_space *mapping = inode->i_mapping;
2490 struct page *page;
2491 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
2492 PAGE_CACHE_SHIFT;
2494 struct extent_page_data epd = {
2495 .bio = NULL,
2496 .tree = tree,
2497 .get_extent = get_extent,
2498 .extent_locked = 1,
2499 .sync_io = mode == WB_SYNC_ALL,
2501 struct writeback_control wbc_writepages = {
2502 .bdi = inode->i_mapping->backing_dev_info,
2503 .sync_mode = mode,
2504 .older_than_this = NULL,
2505 .nr_to_write = nr_pages * 2,
2506 .range_start = start,
2507 .range_end = end + 1,
2510 while (start <= end) {
2511 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
2512 if (clear_page_dirty_for_io(page))
2513 ret = __extent_writepage(page, &wbc_writepages, &epd);
2514 else {
2515 if (tree->ops && tree->ops->writepage_end_io_hook)
2516 tree->ops->writepage_end_io_hook(page, start,
2517 start + PAGE_CACHE_SIZE - 1,
2518 NULL, 1);
2519 unlock_page(page);
2521 page_cache_release(page);
2522 start += PAGE_CACHE_SIZE;
2525 flush_epd_write_bio(&epd);
2526 return ret;
2529 int extent_writepages(struct extent_io_tree *tree,
2530 struct address_space *mapping,
2531 get_extent_t *get_extent,
2532 struct writeback_control *wbc)
2534 int ret = 0;
2535 struct extent_page_data epd = {
2536 .bio = NULL,
2537 .tree = tree,
2538 .get_extent = get_extent,
2539 .extent_locked = 0,
2540 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
2543 ret = extent_write_cache_pages(tree, mapping, wbc,
2544 __extent_writepage, &epd,
2545 flush_write_bio);
2546 flush_epd_write_bio(&epd);
2547 return ret;
2550 int extent_readpages(struct extent_io_tree *tree,
2551 struct address_space *mapping,
2552 struct list_head *pages, unsigned nr_pages,
2553 get_extent_t get_extent)
2555 struct bio *bio = NULL;
2556 unsigned page_idx;
2557 struct pagevec pvec;
2558 unsigned long bio_flags = 0;
2560 pagevec_init(&pvec, 0);
2561 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2562 struct page *page = list_entry(pages->prev, struct page, lru);
2564 prefetchw(&page->flags);
2565 list_del(&page->lru);
2567 * what we want to do here is call add_to_page_cache_lru,
2568 * but that isn't exported, so we reproduce it here
2570 if (!add_to_page_cache(page, mapping,
2571 page->index, GFP_KERNEL)) {
2573 /* open coding of lru_cache_add, also not exported */
2574 page_cache_get(page);
2575 if (!pagevec_add(&pvec, page))
2576 __pagevec_lru_add_file(&pvec);
2577 __extent_read_full_page(tree, page, get_extent,
2578 &bio, 0, &bio_flags);
2580 page_cache_release(page);
2582 if (pagevec_count(&pvec))
2583 __pagevec_lru_add_file(&pvec);
2584 BUG_ON(!list_empty(pages));
2585 if (bio)
2586 submit_one_bio(READ, bio, 0, bio_flags);
2587 return 0;
2591 * basic invalidatepage code, this waits on any locked or writeback
2592 * ranges corresponding to the page, and then deletes any extent state
2593 * records from the tree
2595 int extent_invalidatepage(struct extent_io_tree *tree,
2596 struct page *page, unsigned long offset)
2598 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2599 u64 end = start + PAGE_CACHE_SIZE - 1;
2600 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2602 start += (offset + blocksize - 1) & ~(blocksize - 1);
2603 if (start > end)
2604 return 0;
2606 lock_extent(tree, start, end, GFP_NOFS);
2607 wait_on_extent_writeback(tree, start, end);
2608 clear_extent_bit(tree, start, end,
2609 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2610 1, 1, GFP_NOFS);
2611 return 0;
2615 * simple commit_write call, set_range_dirty is used to mark both
2616 * the pages and the extent records as dirty
2618 int extent_commit_write(struct extent_io_tree *tree,
2619 struct inode *inode, struct page *page,
2620 unsigned from, unsigned to)
2622 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2624 set_page_extent_mapped(page);
2625 set_page_dirty(page);
2627 if (pos > inode->i_size) {
2628 i_size_write(inode, pos);
2629 mark_inode_dirty(inode);
2631 return 0;
2634 int extent_prepare_write(struct extent_io_tree *tree,
2635 struct inode *inode, struct page *page,
2636 unsigned from, unsigned to, get_extent_t *get_extent)
2638 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2639 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2640 u64 block_start;
2641 u64 orig_block_start;
2642 u64 block_end;
2643 u64 cur_end;
2644 struct extent_map *em;
2645 unsigned blocksize = 1 << inode->i_blkbits;
2646 size_t page_offset = 0;
2647 size_t block_off_start;
2648 size_t block_off_end;
2649 int err = 0;
2650 int iocount = 0;
2651 int ret = 0;
2652 int isnew;
2654 set_page_extent_mapped(page);
2656 block_start = (page_start + from) & ~((u64)blocksize - 1);
2657 block_end = (page_start + to - 1) | (blocksize - 1);
2658 orig_block_start = block_start;
2660 lock_extent(tree, page_start, page_end, GFP_NOFS);
2661 while (block_start <= block_end) {
2662 em = get_extent(inode, page, page_offset, block_start,
2663 block_end - block_start + 1, 1);
2664 if (IS_ERR(em) || !em)
2665 goto err;
2667 cur_end = min(block_end, extent_map_end(em) - 1);
2668 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2669 block_off_end = block_off_start + blocksize;
2670 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2672 if (!PageUptodate(page) && isnew &&
2673 (block_off_end > to || block_off_start < from)) {
2674 void *kaddr;
2676 kaddr = kmap_atomic(page, KM_USER0);
2677 if (block_off_end > to)
2678 memset(kaddr + to, 0, block_off_end - to);
2679 if (block_off_start < from)
2680 memset(kaddr + block_off_start, 0,
2681 from - block_off_start);
2682 flush_dcache_page(page);
2683 kunmap_atomic(kaddr, KM_USER0);
2685 if ((em->block_start != EXTENT_MAP_HOLE &&
2686 em->block_start != EXTENT_MAP_INLINE) &&
2687 !isnew && !PageUptodate(page) &&
2688 (block_off_end > to || block_off_start < from) &&
2689 !test_range_bit(tree, block_start, cur_end,
2690 EXTENT_UPTODATE, 1)) {
2691 u64 sector;
2692 u64 extent_offset = block_start - em->start;
2693 size_t iosize;
2694 sector = (em->block_start + extent_offset) >> 9;
2695 iosize = (cur_end - block_start + blocksize) &
2696 ~((u64)blocksize - 1);
2698 * we've already got the extent locked, but we
2699 * need to split the state such that our end_bio
2700 * handler can clear the lock.
2702 set_extent_bit(tree, block_start,
2703 block_start + iosize - 1,
2704 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2705 ret = submit_extent_page(READ, tree, page,
2706 sector, iosize, page_offset, em->bdev,
2707 NULL, 1,
2708 end_bio_extent_preparewrite, 0,
2709 0, 0);
2710 iocount++;
2711 block_start = block_start + iosize;
2712 } else {
2713 set_extent_uptodate(tree, block_start, cur_end,
2714 GFP_NOFS);
2715 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2716 block_start = cur_end + 1;
2718 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2719 free_extent_map(em);
2721 if (iocount) {
2722 wait_extent_bit(tree, orig_block_start,
2723 block_end, EXTENT_LOCKED);
2725 check_page_uptodate(tree, page);
2726 err:
2727 /* FIXME, zero out newly allocated blocks on error */
2728 return err;
2732 * a helper for releasepage, this tests for areas of the page that
2733 * are locked or under IO and drops the related state bits if it is safe
2734 * to drop the page.
2736 int try_release_extent_state(struct extent_map_tree *map,
2737 struct extent_io_tree *tree, struct page *page,
2738 gfp_t mask)
2740 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2741 u64 end = start + PAGE_CACHE_SIZE - 1;
2742 int ret = 1;
2744 if (test_range_bit(tree, start, end,
2745 EXTENT_IOBITS | EXTENT_ORDERED, 0))
2746 ret = 0;
2747 else {
2748 if ((mask & GFP_NOFS) == GFP_NOFS)
2749 mask = GFP_NOFS;
2750 clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
2751 1, 1, mask);
2753 return ret;
2757 * a helper for releasepage. As long as there are no locked extents
2758 * in the range corresponding to the page, both state records and extent
2759 * map records are removed
2761 int try_release_extent_mapping(struct extent_map_tree *map,
2762 struct extent_io_tree *tree, struct page *page,
2763 gfp_t mask)
2765 struct extent_map *em;
2766 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2767 u64 end = start + PAGE_CACHE_SIZE - 1;
2769 if ((mask & __GFP_WAIT) &&
2770 page->mapping->host->i_size > 16 * 1024 * 1024) {
2771 u64 len;
2772 while (start <= end) {
2773 len = end - start + 1;
2774 spin_lock(&map->lock);
2775 em = lookup_extent_mapping(map, start, len);
2776 if (!em || IS_ERR(em)) {
2777 spin_unlock(&map->lock);
2778 break;
2780 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2781 em->start != start) {
2782 spin_unlock(&map->lock);
2783 free_extent_map(em);
2784 break;
2786 if (!test_range_bit(tree, em->start,
2787 extent_map_end(em) - 1,
2788 EXTENT_LOCKED | EXTENT_WRITEBACK |
2789 EXTENT_ORDERED,
2790 0)) {
2791 remove_extent_mapping(map, em);
2792 /* once for the rb tree */
2793 free_extent_map(em);
2795 start = extent_map_end(em);
2796 spin_unlock(&map->lock);
2798 /* once for us */
2799 free_extent_map(em);
2802 return try_release_extent_state(map, tree, page, mask);
2805 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2806 get_extent_t *get_extent)
2808 struct inode *inode = mapping->host;
2809 u64 start = iblock << inode->i_blkbits;
2810 sector_t sector = 0;
2811 size_t blksize = (1 << inode->i_blkbits);
2812 struct extent_map *em;
2814 lock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
2815 GFP_NOFS);
2816 em = get_extent(inode, NULL, 0, start, blksize, 0);
2817 unlock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
2818 GFP_NOFS);
2819 if (!em || IS_ERR(em))
2820 return 0;
2822 if (em->block_start > EXTENT_MAP_LAST_BYTE)
2823 goto out;
2825 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2826 out:
2827 free_extent_map(em);
2828 return sector;
2831 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2832 __u64 start, __u64 len, get_extent_t *get_extent)
2834 int ret;
2835 u64 off = start;
2836 u64 max = start + len;
2837 u32 flags = 0;
2838 u64 disko = 0;
2839 struct extent_map *em = NULL;
2840 int end = 0;
2841 u64 em_start = 0, em_len = 0;
2842 unsigned long emflags;
2843 ret = 0;
2845 if (len == 0)
2846 return -EINVAL;
2848 lock_extent(&BTRFS_I(inode)->io_tree, start, start + len,
2849 GFP_NOFS);
2850 em = get_extent(inode, NULL, 0, off, max - off, 0);
2851 if (!em)
2852 goto out;
2853 if (IS_ERR(em)) {
2854 ret = PTR_ERR(em);
2855 goto out;
2857 while (!end) {
2858 off = em->start + em->len;
2859 if (off >= max)
2860 end = 1;
2862 em_start = em->start;
2863 em_len = em->len;
2865 disko = 0;
2866 flags = 0;
2868 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
2869 end = 1;
2870 flags |= FIEMAP_EXTENT_LAST;
2871 } else if (em->block_start == EXTENT_MAP_HOLE) {
2872 flags |= FIEMAP_EXTENT_UNWRITTEN;
2873 } else if (em->block_start == EXTENT_MAP_INLINE) {
2874 flags |= (FIEMAP_EXTENT_DATA_INLINE |
2875 FIEMAP_EXTENT_NOT_ALIGNED);
2876 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
2877 flags |= (FIEMAP_EXTENT_DELALLOC |
2878 FIEMAP_EXTENT_UNKNOWN);
2879 } else {
2880 disko = em->block_start;
2882 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
2883 flags |= FIEMAP_EXTENT_ENCODED;
2885 emflags = em->flags;
2886 free_extent_map(em);
2887 em = NULL;
2889 if (!end) {
2890 em = get_extent(inode, NULL, 0, off, max - off, 0);
2891 if (!em)
2892 goto out;
2893 if (IS_ERR(em)) {
2894 ret = PTR_ERR(em);
2895 goto out;
2897 emflags = em->flags;
2899 if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) {
2900 flags |= FIEMAP_EXTENT_LAST;
2901 end = 1;
2904 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
2905 em_len, flags);
2906 if (ret)
2907 goto out_free;
2909 out_free:
2910 free_extent_map(em);
2911 out:
2912 unlock_extent(&BTRFS_I(inode)->io_tree, start, start + len,
2913 GFP_NOFS);
2914 return ret;
2917 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2918 unsigned long i)
2920 struct page *p;
2921 struct address_space *mapping;
2923 if (i == 0)
2924 return eb->first_page;
2925 i += eb->start >> PAGE_CACHE_SHIFT;
2926 mapping = eb->first_page->mapping;
2927 if (!mapping)
2928 return NULL;
2931 * extent_buffer_page is only called after pinning the page
2932 * by increasing the reference count. So we know the page must
2933 * be in the radix tree.
2935 rcu_read_lock();
2936 p = radix_tree_lookup(&mapping->page_tree, i);
2937 rcu_read_unlock();
2939 return p;
2942 static inline unsigned long num_extent_pages(u64 start, u64 len)
2944 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2945 (start >> PAGE_CACHE_SHIFT);
2948 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2949 u64 start,
2950 unsigned long len,
2951 gfp_t mask)
2953 struct extent_buffer *eb = NULL;
2954 #if LEAK_DEBUG
2955 unsigned long flags;
2956 #endif
2958 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2959 eb->start = start;
2960 eb->len = len;
2961 spin_lock_init(&eb->lock);
2962 init_waitqueue_head(&eb->lock_wq);
2964 #if LEAK_DEBUG
2965 spin_lock_irqsave(&leak_lock, flags);
2966 list_add(&eb->leak_list, &buffers);
2967 spin_unlock_irqrestore(&leak_lock, flags);
2968 #endif
2969 atomic_set(&eb->refs, 1);
2971 return eb;
2974 static void __free_extent_buffer(struct extent_buffer *eb)
2976 #if LEAK_DEBUG
2977 unsigned long flags;
2978 spin_lock_irqsave(&leak_lock, flags);
2979 list_del(&eb->leak_list);
2980 spin_unlock_irqrestore(&leak_lock, flags);
2981 #endif
2982 kmem_cache_free(extent_buffer_cache, eb);
2985 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
2986 u64 start, unsigned long len,
2987 struct page *page0,
2988 gfp_t mask)
2990 unsigned long num_pages = num_extent_pages(start, len);
2991 unsigned long i;
2992 unsigned long index = start >> PAGE_CACHE_SHIFT;
2993 struct extent_buffer *eb;
2994 struct extent_buffer *exists = NULL;
2995 struct page *p;
2996 struct address_space *mapping = tree->mapping;
2997 int uptodate = 1;
2999 spin_lock(&tree->buffer_lock);
3000 eb = buffer_search(tree, start);
3001 if (eb) {
3002 atomic_inc(&eb->refs);
3003 spin_unlock(&tree->buffer_lock);
3004 mark_page_accessed(eb->first_page);
3005 return eb;
3007 spin_unlock(&tree->buffer_lock);
3009 eb = __alloc_extent_buffer(tree, start, len, mask);
3010 if (!eb)
3011 return NULL;
3013 if (page0) {
3014 eb->first_page = page0;
3015 i = 1;
3016 index++;
3017 page_cache_get(page0);
3018 mark_page_accessed(page0);
3019 set_page_extent_mapped(page0);
3020 set_page_extent_head(page0, len);
3021 uptodate = PageUptodate(page0);
3022 } else {
3023 i = 0;
3025 for (; i < num_pages; i++, index++) {
3026 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
3027 if (!p) {
3028 WARN_ON(1);
3029 goto free_eb;
3031 set_page_extent_mapped(p);
3032 mark_page_accessed(p);
3033 if (i == 0) {
3034 eb->first_page = p;
3035 set_page_extent_head(p, len);
3036 } else {
3037 set_page_private(p, EXTENT_PAGE_PRIVATE);
3039 if (!PageUptodate(p))
3040 uptodate = 0;
3041 unlock_page(p);
3043 if (uptodate)
3044 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3046 spin_lock(&tree->buffer_lock);
3047 exists = buffer_tree_insert(tree, start, &eb->rb_node);
3048 if (exists) {
3049 /* add one reference for the caller */
3050 atomic_inc(&exists->refs);
3051 spin_unlock(&tree->buffer_lock);
3052 goto free_eb;
3054 spin_unlock(&tree->buffer_lock);
3056 /* add one reference for the tree */
3057 atomic_inc(&eb->refs);
3058 return eb;
3060 free_eb:
3061 if (!atomic_dec_and_test(&eb->refs))
3062 return exists;
3063 for (index = 1; index < i; index++)
3064 page_cache_release(extent_buffer_page(eb, index));
3065 page_cache_release(extent_buffer_page(eb, 0));
3066 __free_extent_buffer(eb);
3067 return exists;
3070 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
3071 u64 start, unsigned long len,
3072 gfp_t mask)
3074 struct extent_buffer *eb;
3076 spin_lock(&tree->buffer_lock);
3077 eb = buffer_search(tree, start);
3078 if (eb)
3079 atomic_inc(&eb->refs);
3080 spin_unlock(&tree->buffer_lock);
3082 if (eb)
3083 mark_page_accessed(eb->first_page);
3085 return eb;
3088 void free_extent_buffer(struct extent_buffer *eb)
3090 if (!eb)
3091 return;
3093 if (!atomic_dec_and_test(&eb->refs))
3094 return;
3096 WARN_ON(1);
3099 int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3100 struct extent_buffer *eb)
3102 unsigned long i;
3103 unsigned long num_pages;
3104 struct page *page;
3106 num_pages = num_extent_pages(eb->start, eb->len);
3108 for (i = 0; i < num_pages; i++) {
3109 page = extent_buffer_page(eb, i);
3110 if (!PageDirty(page))
3111 continue;
3113 lock_page(page);
3114 if (i == 0)
3115 set_page_extent_head(page, eb->len);
3116 else
3117 set_page_private(page, EXTENT_PAGE_PRIVATE);
3119 clear_page_dirty_for_io(page);
3120 spin_lock_irq(&page->mapping->tree_lock);
3121 if (!PageDirty(page)) {
3122 radix_tree_tag_clear(&page->mapping->page_tree,
3123 page_index(page),
3124 PAGECACHE_TAG_DIRTY);
3126 spin_unlock_irq(&page->mapping->tree_lock);
3127 unlock_page(page);
3129 return 0;
3132 int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
3133 struct extent_buffer *eb)
3135 return wait_on_extent_writeback(tree, eb->start,
3136 eb->start + eb->len - 1);
3139 int set_extent_buffer_dirty(struct extent_io_tree *tree,
3140 struct extent_buffer *eb)
3142 unsigned long i;
3143 unsigned long num_pages;
3144 int was_dirty = 0;
3146 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
3147 num_pages = num_extent_pages(eb->start, eb->len);
3148 for (i = 0; i < num_pages; i++)
3149 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
3150 return was_dirty;
3153 int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3154 struct extent_buffer *eb)
3156 unsigned long i;
3157 struct page *page;
3158 unsigned long num_pages;
3160 num_pages = num_extent_pages(eb->start, eb->len);
3161 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3163 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3164 GFP_NOFS);
3165 for (i = 0; i < num_pages; i++) {
3166 page = extent_buffer_page(eb, i);
3167 if (page)
3168 ClearPageUptodate(page);
3170 return 0;
3173 int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3174 struct extent_buffer *eb)
3176 unsigned long i;
3177 struct page *page;
3178 unsigned long num_pages;
3180 num_pages = num_extent_pages(eb->start, eb->len);
3182 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3183 GFP_NOFS);
3184 for (i = 0; i < num_pages; i++) {
3185 page = extent_buffer_page(eb, i);
3186 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3187 ((i == num_pages - 1) &&
3188 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3189 check_page_uptodate(tree, page);
3190 continue;
3192 SetPageUptodate(page);
3194 return 0;
3197 int extent_range_uptodate(struct extent_io_tree *tree,
3198 u64 start, u64 end)
3200 struct page *page;
3201 int ret;
3202 int pg_uptodate = 1;
3203 int uptodate;
3204 unsigned long index;
3206 ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1);
3207 if (ret)
3208 return 1;
3209 while (start <= end) {
3210 index = start >> PAGE_CACHE_SHIFT;
3211 page = find_get_page(tree->mapping, index);
3212 uptodate = PageUptodate(page);
3213 page_cache_release(page);
3214 if (!uptodate) {
3215 pg_uptodate = 0;
3216 break;
3218 start += PAGE_CACHE_SIZE;
3220 return pg_uptodate;
3223 int extent_buffer_uptodate(struct extent_io_tree *tree,
3224 struct extent_buffer *eb)
3226 int ret = 0;
3227 unsigned long num_pages;
3228 unsigned long i;
3229 struct page *page;
3230 int pg_uptodate = 1;
3232 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3233 return 1;
3235 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3236 EXTENT_UPTODATE, 1);
3237 if (ret)
3238 return ret;
3240 num_pages = num_extent_pages(eb->start, eb->len);
3241 for (i = 0; i < num_pages; i++) {
3242 page = extent_buffer_page(eb, i);
3243 if (!PageUptodate(page)) {
3244 pg_uptodate = 0;
3245 break;
3248 return pg_uptodate;
3251 int read_extent_buffer_pages(struct extent_io_tree *tree,
3252 struct extent_buffer *eb,
3253 u64 start, int wait,
3254 get_extent_t *get_extent, int mirror_num)
3256 unsigned long i;
3257 unsigned long start_i;
3258 struct page *page;
3259 int err;
3260 int ret = 0;
3261 int locked_pages = 0;
3262 int all_uptodate = 1;
3263 int inc_all_pages = 0;
3264 unsigned long num_pages;
3265 struct bio *bio = NULL;
3266 unsigned long bio_flags = 0;
3268 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3269 return 0;
3271 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3272 EXTENT_UPTODATE, 1)) {
3273 return 0;
3276 if (start) {
3277 WARN_ON(start < eb->start);
3278 start_i = (start >> PAGE_CACHE_SHIFT) -
3279 (eb->start >> PAGE_CACHE_SHIFT);
3280 } else {
3281 start_i = 0;
3284 num_pages = num_extent_pages(eb->start, eb->len);
3285 for (i = start_i; i < num_pages; i++) {
3286 page = extent_buffer_page(eb, i);
3287 if (!wait) {
3288 if (!trylock_page(page))
3289 goto unlock_exit;
3290 } else {
3291 lock_page(page);
3293 locked_pages++;
3294 if (!PageUptodate(page))
3295 all_uptodate = 0;
3297 if (all_uptodate) {
3298 if (start_i == 0)
3299 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3300 goto unlock_exit;
3303 for (i = start_i; i < num_pages; i++) {
3304 page = extent_buffer_page(eb, i);
3305 if (inc_all_pages)
3306 page_cache_get(page);
3307 if (!PageUptodate(page)) {
3308 if (start_i == 0)
3309 inc_all_pages = 1;
3310 ClearPageError(page);
3311 err = __extent_read_full_page(tree, page,
3312 get_extent, &bio,
3313 mirror_num, &bio_flags);
3314 if (err)
3315 ret = err;
3316 } else {
3317 unlock_page(page);
3321 if (bio)
3322 submit_one_bio(READ, bio, mirror_num, bio_flags);
3324 if (ret || !wait)
3325 return ret;
3327 for (i = start_i; i < num_pages; i++) {
3328 page = extent_buffer_page(eb, i);
3329 wait_on_page_locked(page);
3330 if (!PageUptodate(page))
3331 ret = -EIO;
3334 if (!ret)
3335 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3336 return ret;
3338 unlock_exit:
3339 i = start_i;
3340 while (locked_pages > 0) {
3341 page = extent_buffer_page(eb, i);
3342 i++;
3343 unlock_page(page);
3344 locked_pages--;
3346 return ret;
3349 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3350 unsigned long start,
3351 unsigned long len)
3353 size_t cur;
3354 size_t offset;
3355 struct page *page;
3356 char *kaddr;
3357 char *dst = (char *)dstv;
3358 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3359 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3361 WARN_ON(start > eb->len);
3362 WARN_ON(start + len > eb->start + eb->len);
3364 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3366 while (len > 0) {
3367 page = extent_buffer_page(eb, i);
3369 cur = min(len, (PAGE_CACHE_SIZE - offset));
3370 kaddr = kmap_atomic(page, KM_USER1);
3371 memcpy(dst, kaddr + offset, cur);
3372 kunmap_atomic(kaddr, KM_USER1);
3374 dst += cur;
3375 len -= cur;
3376 offset = 0;
3377 i++;
3381 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3382 unsigned long min_len, char **token, char **map,
3383 unsigned long *map_start,
3384 unsigned long *map_len, int km)
3386 size_t offset = start & (PAGE_CACHE_SIZE - 1);
3387 char *kaddr;
3388 struct page *p;
3389 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3390 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3391 unsigned long end_i = (start_offset + start + min_len - 1) >>
3392 PAGE_CACHE_SHIFT;
3394 if (i != end_i)
3395 return -EINVAL;
3397 if (i == 0) {
3398 offset = start_offset;
3399 *map_start = 0;
3400 } else {
3401 offset = 0;
3402 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3405 if (start + min_len > eb->len) {
3406 printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
3407 "wanted %lu %lu\n", (unsigned long long)eb->start,
3408 eb->len, start, min_len);
3409 WARN_ON(1);
3412 p = extent_buffer_page(eb, i);
3413 kaddr = kmap_atomic(p, km);
3414 *token = kaddr;
3415 *map = kaddr + offset;
3416 *map_len = PAGE_CACHE_SIZE - offset;
3417 return 0;
3420 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3421 unsigned long min_len,
3422 char **token, char **map,
3423 unsigned long *map_start,
3424 unsigned long *map_len, int km)
3426 int err;
3427 int save = 0;
3428 if (eb->map_token) {
3429 unmap_extent_buffer(eb, eb->map_token, km);
3430 eb->map_token = NULL;
3431 save = 1;
3433 err = map_private_extent_buffer(eb, start, min_len, token, map,
3434 map_start, map_len, km);
3435 if (!err && save) {
3436 eb->map_token = *token;
3437 eb->kaddr = *map;
3438 eb->map_start = *map_start;
3439 eb->map_len = *map_len;
3441 return err;
3444 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3446 kunmap_atomic(token, km);
3449 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3450 unsigned long start,
3451 unsigned long len)
3453 size_t cur;
3454 size_t offset;
3455 struct page *page;
3456 char *kaddr;
3457 char *ptr = (char *)ptrv;
3458 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3459 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3460 int ret = 0;
3462 WARN_ON(start > eb->len);
3463 WARN_ON(start + len > eb->start + eb->len);
3465 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3467 while (len > 0) {
3468 page = extent_buffer_page(eb, i);
3470 cur = min(len, (PAGE_CACHE_SIZE - offset));
3472 kaddr = kmap_atomic(page, KM_USER0);
3473 ret = memcmp(ptr, kaddr + offset, cur);
3474 kunmap_atomic(kaddr, KM_USER0);
3475 if (ret)
3476 break;
3478 ptr += cur;
3479 len -= cur;
3480 offset = 0;
3481 i++;
3483 return ret;
3486 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3487 unsigned long start, unsigned long len)
3489 size_t cur;
3490 size_t offset;
3491 struct page *page;
3492 char *kaddr;
3493 char *src = (char *)srcv;
3494 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3495 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3497 WARN_ON(start > eb->len);
3498 WARN_ON(start + len > eb->start + eb->len);
3500 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3502 while (len > 0) {
3503 page = extent_buffer_page(eb, i);
3504 WARN_ON(!PageUptodate(page));
3506 cur = min(len, PAGE_CACHE_SIZE - offset);
3507 kaddr = kmap_atomic(page, KM_USER1);
3508 memcpy(kaddr + offset, src, cur);
3509 kunmap_atomic(kaddr, KM_USER1);
3511 src += cur;
3512 len -= cur;
3513 offset = 0;
3514 i++;
3518 void memset_extent_buffer(struct extent_buffer *eb, char c,
3519 unsigned long start, unsigned long len)
3521 size_t cur;
3522 size_t offset;
3523 struct page *page;
3524 char *kaddr;
3525 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3526 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3528 WARN_ON(start > eb->len);
3529 WARN_ON(start + len > eb->start + eb->len);
3531 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3533 while (len > 0) {
3534 page = extent_buffer_page(eb, i);
3535 WARN_ON(!PageUptodate(page));
3537 cur = min(len, PAGE_CACHE_SIZE - offset);
3538 kaddr = kmap_atomic(page, KM_USER0);
3539 memset(kaddr + offset, c, cur);
3540 kunmap_atomic(kaddr, KM_USER0);
3542 len -= cur;
3543 offset = 0;
3544 i++;
3548 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3549 unsigned long dst_offset, unsigned long src_offset,
3550 unsigned long len)
3552 u64 dst_len = dst->len;
3553 size_t cur;
3554 size_t offset;
3555 struct page *page;
3556 char *kaddr;
3557 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3558 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3560 WARN_ON(src->len != dst_len);
3562 offset = (start_offset + dst_offset) &
3563 ((unsigned long)PAGE_CACHE_SIZE - 1);
3565 while (len > 0) {
3566 page = extent_buffer_page(dst, i);
3567 WARN_ON(!PageUptodate(page));
3569 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3571 kaddr = kmap_atomic(page, KM_USER0);
3572 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3573 kunmap_atomic(kaddr, KM_USER0);
3575 src_offset += cur;
3576 len -= cur;
3577 offset = 0;
3578 i++;
3582 static void move_pages(struct page *dst_page, struct page *src_page,
3583 unsigned long dst_off, unsigned long src_off,
3584 unsigned long len)
3586 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3587 if (dst_page == src_page) {
3588 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3589 } else {
3590 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3591 char *p = dst_kaddr + dst_off + len;
3592 char *s = src_kaddr + src_off + len;
3594 while (len--)
3595 *--p = *--s;
3597 kunmap_atomic(src_kaddr, KM_USER1);
3599 kunmap_atomic(dst_kaddr, KM_USER0);
3602 static void copy_pages(struct page *dst_page, struct page *src_page,
3603 unsigned long dst_off, unsigned long src_off,
3604 unsigned long len)
3606 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3607 char *src_kaddr;
3609 if (dst_page != src_page)
3610 src_kaddr = kmap_atomic(src_page, KM_USER1);
3611 else
3612 src_kaddr = dst_kaddr;
3614 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3615 kunmap_atomic(dst_kaddr, KM_USER0);
3616 if (dst_page != src_page)
3617 kunmap_atomic(src_kaddr, KM_USER1);
3620 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3621 unsigned long src_offset, unsigned long len)
3623 size_t cur;
3624 size_t dst_off_in_page;
3625 size_t src_off_in_page;
3626 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3627 unsigned long dst_i;
3628 unsigned long src_i;
3630 if (src_offset + len > dst->len) {
3631 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3632 "len %lu dst len %lu\n", src_offset, len, dst->len);
3633 BUG_ON(1);
3635 if (dst_offset + len > dst->len) {
3636 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3637 "len %lu dst len %lu\n", dst_offset, len, dst->len);
3638 BUG_ON(1);
3641 while (len > 0) {
3642 dst_off_in_page = (start_offset + dst_offset) &
3643 ((unsigned long)PAGE_CACHE_SIZE - 1);
3644 src_off_in_page = (start_offset + src_offset) &
3645 ((unsigned long)PAGE_CACHE_SIZE - 1);
3647 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3648 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3650 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3651 src_off_in_page));
3652 cur = min_t(unsigned long, cur,
3653 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3655 copy_pages(extent_buffer_page(dst, dst_i),
3656 extent_buffer_page(dst, src_i),
3657 dst_off_in_page, src_off_in_page, cur);
3659 src_offset += cur;
3660 dst_offset += cur;
3661 len -= cur;
3665 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3666 unsigned long src_offset, unsigned long len)
3668 size_t cur;
3669 size_t dst_off_in_page;
3670 size_t src_off_in_page;
3671 unsigned long dst_end = dst_offset + len - 1;
3672 unsigned long src_end = src_offset + len - 1;
3673 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3674 unsigned long dst_i;
3675 unsigned long src_i;
3677 if (src_offset + len > dst->len) {
3678 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3679 "len %lu len %lu\n", src_offset, len, dst->len);
3680 BUG_ON(1);
3682 if (dst_offset + len > dst->len) {
3683 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3684 "len %lu len %lu\n", dst_offset, len, dst->len);
3685 BUG_ON(1);
3687 if (dst_offset < src_offset) {
3688 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3689 return;
3691 while (len > 0) {
3692 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3693 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3695 dst_off_in_page = (start_offset + dst_end) &
3696 ((unsigned long)PAGE_CACHE_SIZE - 1);
3697 src_off_in_page = (start_offset + src_end) &
3698 ((unsigned long)PAGE_CACHE_SIZE - 1);
3700 cur = min_t(unsigned long, len, src_off_in_page + 1);
3701 cur = min(cur, dst_off_in_page + 1);
3702 move_pages(extent_buffer_page(dst, dst_i),
3703 extent_buffer_page(dst, src_i),
3704 dst_off_in_page - cur + 1,
3705 src_off_in_page - cur + 1, cur);
3707 dst_end -= cur;
3708 src_end -= cur;
3709 len -= cur;
3713 int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3715 u64 start = page_offset(page);
3716 struct extent_buffer *eb;
3717 int ret = 1;
3718 unsigned long i;
3719 unsigned long num_pages;
3721 spin_lock(&tree->buffer_lock);
3722 eb = buffer_search(tree, start);
3723 if (!eb)
3724 goto out;
3726 if (atomic_read(&eb->refs) > 1) {
3727 ret = 0;
3728 goto out;
3730 if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3731 ret = 0;
3732 goto out;
3734 /* at this point we can safely release the extent buffer */
3735 num_pages = num_extent_pages(eb->start, eb->len);
3736 for (i = 0; i < num_pages; i++)
3737 page_cache_release(extent_buffer_page(eb, i));
3738 rb_erase(&eb->rb_node, &tree->buffer);
3739 __free_extent_buffer(eb);
3740 out:
3741 spin_unlock(&tree->buffer_lock);
3742 return ret;