2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/slab.h>
20 #include <linux/blkdev.h>
21 #include <linux/writeback.h>
22 #include <linux/pagevec.h>
24 #include "transaction.h"
25 #include "btrfs_inode.h"
26 #include "extent_io.h"
28 static u64
entry_end(struct btrfs_ordered_extent
*entry
)
30 if (entry
->file_offset
+ entry
->len
< entry
->file_offset
)
32 return entry
->file_offset
+ entry
->len
;
35 /* returns NULL if the insertion worked, or it returns the node it did find
38 static struct rb_node
*tree_insert(struct rb_root
*root
, u64 file_offset
,
41 struct rb_node
**p
= &root
->rb_node
;
42 struct rb_node
*parent
= NULL
;
43 struct btrfs_ordered_extent
*entry
;
47 entry
= rb_entry(parent
, struct btrfs_ordered_extent
, rb_node
);
49 if (file_offset
< entry
->file_offset
)
51 else if (file_offset
>= entry_end(entry
))
57 rb_link_node(node
, parent
, p
);
58 rb_insert_color(node
, root
);
63 * look for a given offset in the tree, and if it can't be found return the
66 static struct rb_node
*__tree_search(struct rb_root
*root
, u64 file_offset
,
67 struct rb_node
**prev_ret
)
69 struct rb_node
*n
= root
->rb_node
;
70 struct rb_node
*prev
= NULL
;
72 struct btrfs_ordered_extent
*entry
;
73 struct btrfs_ordered_extent
*prev_entry
= NULL
;
76 entry
= rb_entry(n
, struct btrfs_ordered_extent
, rb_node
);
80 if (file_offset
< entry
->file_offset
)
82 else if (file_offset
>= entry_end(entry
))
90 while (prev
&& file_offset
>= entry_end(prev_entry
)) {
94 prev_entry
= rb_entry(test
, struct btrfs_ordered_extent
,
96 if (file_offset
< entry_end(prev_entry
))
102 prev_entry
= rb_entry(prev
, struct btrfs_ordered_extent
,
104 while (prev
&& file_offset
< entry_end(prev_entry
)) {
105 test
= rb_prev(prev
);
108 prev_entry
= rb_entry(test
, struct btrfs_ordered_extent
,
117 * helper to check if a given offset is inside a given entry
119 static int offset_in_entry(struct btrfs_ordered_extent
*entry
, u64 file_offset
)
121 if (file_offset
< entry
->file_offset
||
122 entry
->file_offset
+ entry
->len
<= file_offset
)
127 static int range_overlaps(struct btrfs_ordered_extent
*entry
, u64 file_offset
,
130 if (file_offset
+ len
<= entry
->file_offset
||
131 entry
->file_offset
+ entry
->len
<= file_offset
)
137 * look find the first ordered struct that has this offset, otherwise
138 * the first one less than this offset
140 static inline struct rb_node
*tree_search(struct btrfs_ordered_inode_tree
*tree
,
143 struct rb_root
*root
= &tree
->tree
;
144 struct rb_node
*prev
= NULL
;
146 struct btrfs_ordered_extent
*entry
;
149 entry
= rb_entry(tree
->last
, struct btrfs_ordered_extent
,
151 if (offset_in_entry(entry
, file_offset
))
154 ret
= __tree_search(root
, file_offset
, &prev
);
162 /* allocate and add a new ordered_extent into the per-inode tree.
163 * file_offset is the logical offset in the file
165 * start is the disk block number of an extent already reserved in the
166 * extent allocation tree
168 * len is the length of the extent
170 * The tree is given a single reference on the ordered extent that was
173 static int __btrfs_add_ordered_extent(struct inode
*inode
, u64 file_offset
,
174 u64 start
, u64 len
, u64 disk_len
,
175 int type
, int dio
, int compress_type
)
177 struct btrfs_ordered_inode_tree
*tree
;
178 struct rb_node
*node
;
179 struct btrfs_ordered_extent
*entry
;
181 tree
= &BTRFS_I(inode
)->ordered_tree
;
182 entry
= kzalloc(sizeof(*entry
), GFP_NOFS
);
186 entry
->file_offset
= file_offset
;
187 entry
->start
= start
;
189 entry
->disk_len
= disk_len
;
190 entry
->bytes_left
= len
;
191 entry
->inode
= inode
;
192 entry
->compress_type
= compress_type
;
193 if (type
!= BTRFS_ORDERED_IO_DONE
&& type
!= BTRFS_ORDERED_COMPLETE
)
194 set_bit(type
, &entry
->flags
);
197 set_bit(BTRFS_ORDERED_DIRECT
, &entry
->flags
);
199 /* one ref for the tree */
200 atomic_set(&entry
->refs
, 1);
201 init_waitqueue_head(&entry
->wait
);
202 INIT_LIST_HEAD(&entry
->list
);
203 INIT_LIST_HEAD(&entry
->root_extent_list
);
205 trace_btrfs_ordered_extent_add(inode
, entry
);
207 spin_lock(&tree
->lock
);
208 node
= tree_insert(&tree
->tree
, file_offset
,
211 spin_unlock(&tree
->lock
);
213 spin_lock(&BTRFS_I(inode
)->root
->fs_info
->ordered_extent_lock
);
214 list_add_tail(&entry
->root_extent_list
,
215 &BTRFS_I(inode
)->root
->fs_info
->ordered_extents
);
216 spin_unlock(&BTRFS_I(inode
)->root
->fs_info
->ordered_extent_lock
);
222 int btrfs_add_ordered_extent(struct inode
*inode
, u64 file_offset
,
223 u64 start
, u64 len
, u64 disk_len
, int type
)
225 return __btrfs_add_ordered_extent(inode
, file_offset
, start
, len
,
227 BTRFS_COMPRESS_NONE
);
230 int btrfs_add_ordered_extent_dio(struct inode
*inode
, u64 file_offset
,
231 u64 start
, u64 len
, u64 disk_len
, int type
)
233 return __btrfs_add_ordered_extent(inode
, file_offset
, start
, len
,
235 BTRFS_COMPRESS_NONE
);
238 int btrfs_add_ordered_extent_compress(struct inode
*inode
, u64 file_offset
,
239 u64 start
, u64 len
, u64 disk_len
,
240 int type
, int compress_type
)
242 return __btrfs_add_ordered_extent(inode
, file_offset
, start
, len
,
248 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
249 * when an ordered extent is finished. If the list covers more than one
250 * ordered extent, it is split across multiples.
252 int btrfs_add_ordered_sum(struct inode
*inode
,
253 struct btrfs_ordered_extent
*entry
,
254 struct btrfs_ordered_sum
*sum
)
256 struct btrfs_ordered_inode_tree
*tree
;
258 tree
= &BTRFS_I(inode
)->ordered_tree
;
259 spin_lock(&tree
->lock
);
260 list_add_tail(&sum
->list
, &entry
->list
);
261 spin_unlock(&tree
->lock
);
266 * this is used to account for finished IO across a given range
267 * of the file. The IO may span ordered extents. If
268 * a given ordered_extent is completely done, 1 is returned, otherwise
271 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
272 * to make sure this function only returns 1 once for a given ordered extent.
274 * file_offset is updated to one byte past the range that is recorded as
275 * complete. This allows you to walk forward in the file.
277 int btrfs_dec_test_first_ordered_pending(struct inode
*inode
,
278 struct btrfs_ordered_extent
**cached
,
279 u64
*file_offset
, u64 io_size
)
281 struct btrfs_ordered_inode_tree
*tree
;
282 struct rb_node
*node
;
283 struct btrfs_ordered_extent
*entry
= NULL
;
289 tree
= &BTRFS_I(inode
)->ordered_tree
;
290 spin_lock(&tree
->lock
);
291 node
= tree_search(tree
, *file_offset
);
297 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
298 if (!offset_in_entry(entry
, *file_offset
)) {
303 dec_start
= max(*file_offset
, entry
->file_offset
);
304 dec_end
= min(*file_offset
+ io_size
, entry
->file_offset
+
306 *file_offset
= dec_end
;
307 if (dec_start
> dec_end
) {
308 printk(KERN_CRIT
"bad ordering dec_start %llu end %llu\n",
309 (unsigned long long)dec_start
,
310 (unsigned long long)dec_end
);
312 to_dec
= dec_end
- dec_start
;
313 if (to_dec
> entry
->bytes_left
) {
314 printk(KERN_CRIT
"bad ordered accounting left %llu size %llu\n",
315 (unsigned long long)entry
->bytes_left
,
316 (unsigned long long)to_dec
);
318 entry
->bytes_left
-= to_dec
;
319 if (entry
->bytes_left
== 0)
320 ret
= test_and_set_bit(BTRFS_ORDERED_IO_DONE
, &entry
->flags
);
324 if (!ret
&& cached
&& entry
) {
326 atomic_inc(&entry
->refs
);
328 spin_unlock(&tree
->lock
);
333 * this is used to account for finished IO across a given range
334 * of the file. The IO should not span ordered extents. If
335 * a given ordered_extent is completely done, 1 is returned, otherwise
338 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
339 * to make sure this function only returns 1 once for a given ordered extent.
341 int btrfs_dec_test_ordered_pending(struct inode
*inode
,
342 struct btrfs_ordered_extent
**cached
,
343 u64 file_offset
, u64 io_size
)
345 struct btrfs_ordered_inode_tree
*tree
;
346 struct rb_node
*node
;
347 struct btrfs_ordered_extent
*entry
= NULL
;
350 tree
= &BTRFS_I(inode
)->ordered_tree
;
351 spin_lock(&tree
->lock
);
352 node
= tree_search(tree
, file_offset
);
358 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
359 if (!offset_in_entry(entry
, file_offset
)) {
364 if (io_size
> entry
->bytes_left
) {
365 printk(KERN_CRIT
"bad ordered accounting left %llu size %llu\n",
366 (unsigned long long)entry
->bytes_left
,
367 (unsigned long long)io_size
);
369 entry
->bytes_left
-= io_size
;
370 if (entry
->bytes_left
== 0)
371 ret
= test_and_set_bit(BTRFS_ORDERED_IO_DONE
, &entry
->flags
);
375 if (!ret
&& cached
&& entry
) {
377 atomic_inc(&entry
->refs
);
379 spin_unlock(&tree
->lock
);
384 * used to drop a reference on an ordered extent. This will free
385 * the extent if the last reference is dropped
387 int btrfs_put_ordered_extent(struct btrfs_ordered_extent
*entry
)
389 struct list_head
*cur
;
390 struct btrfs_ordered_sum
*sum
;
392 trace_btrfs_ordered_extent_put(entry
->inode
, entry
);
394 if (atomic_dec_and_test(&entry
->refs
)) {
395 while (!list_empty(&entry
->list
)) {
396 cur
= entry
->list
.next
;
397 sum
= list_entry(cur
, struct btrfs_ordered_sum
, list
);
398 list_del(&sum
->list
);
407 * remove an ordered extent from the tree. No references are dropped
408 * and you must wake_up entry->wait. You must hold the tree lock
409 * while you call this function.
411 static int __btrfs_remove_ordered_extent(struct inode
*inode
,
412 struct btrfs_ordered_extent
*entry
)
414 struct btrfs_ordered_inode_tree
*tree
;
415 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
416 struct rb_node
*node
;
418 tree
= &BTRFS_I(inode
)->ordered_tree
;
419 node
= &entry
->rb_node
;
420 rb_erase(node
, &tree
->tree
);
422 set_bit(BTRFS_ORDERED_COMPLETE
, &entry
->flags
);
424 spin_lock(&root
->fs_info
->ordered_extent_lock
);
425 list_del_init(&entry
->root_extent_list
);
427 trace_btrfs_ordered_extent_remove(inode
, entry
);
430 * we have no more ordered extents for this inode and
431 * no dirty pages. We can safely remove it from the
432 * list of ordered extents
434 if (RB_EMPTY_ROOT(&tree
->tree
) &&
435 !mapping_tagged(inode
->i_mapping
, PAGECACHE_TAG_DIRTY
)) {
436 list_del_init(&BTRFS_I(inode
)->ordered_operations
);
438 spin_unlock(&root
->fs_info
->ordered_extent_lock
);
444 * remove an ordered extent from the tree. No references are dropped
445 * but any waiters are woken.
447 int btrfs_remove_ordered_extent(struct inode
*inode
,
448 struct btrfs_ordered_extent
*entry
)
450 struct btrfs_ordered_inode_tree
*tree
;
453 tree
= &BTRFS_I(inode
)->ordered_tree
;
454 spin_lock(&tree
->lock
);
455 ret
= __btrfs_remove_ordered_extent(inode
, entry
);
456 spin_unlock(&tree
->lock
);
457 wake_up(&entry
->wait
);
463 * wait for all the ordered extents in a root. This is done when balancing
464 * space between drives.
466 int btrfs_wait_ordered_extents(struct btrfs_root
*root
,
467 int nocow_only
, int delay_iput
)
469 struct list_head splice
;
470 struct list_head
*cur
;
471 struct btrfs_ordered_extent
*ordered
;
474 INIT_LIST_HEAD(&splice
);
476 spin_lock(&root
->fs_info
->ordered_extent_lock
);
477 list_splice_init(&root
->fs_info
->ordered_extents
, &splice
);
478 while (!list_empty(&splice
)) {
480 ordered
= list_entry(cur
, struct btrfs_ordered_extent
,
483 !test_bit(BTRFS_ORDERED_NOCOW
, &ordered
->flags
) &&
484 !test_bit(BTRFS_ORDERED_PREALLOC
, &ordered
->flags
)) {
485 list_move(&ordered
->root_extent_list
,
486 &root
->fs_info
->ordered_extents
);
487 cond_resched_lock(&root
->fs_info
->ordered_extent_lock
);
491 list_del_init(&ordered
->root_extent_list
);
492 atomic_inc(&ordered
->refs
);
495 * the inode may be getting freed (in sys_unlink path).
497 inode
= igrab(ordered
->inode
);
499 spin_unlock(&root
->fs_info
->ordered_extent_lock
);
502 btrfs_start_ordered_extent(inode
, ordered
, 1);
503 btrfs_put_ordered_extent(ordered
);
505 btrfs_add_delayed_iput(inode
);
509 btrfs_put_ordered_extent(ordered
);
512 spin_lock(&root
->fs_info
->ordered_extent_lock
);
514 spin_unlock(&root
->fs_info
->ordered_extent_lock
);
519 * this is used during transaction commit to write all the inodes
520 * added to the ordered operation list. These files must be fully on
521 * disk before the transaction commits.
523 * we have two modes here, one is to just start the IO via filemap_flush
524 * and the other is to wait for all the io. When we wait, we have an
525 * extra check to make sure the ordered operation list really is empty
528 int btrfs_run_ordered_operations(struct btrfs_root
*root
, int wait
)
530 struct btrfs_inode
*btrfs_inode
;
532 struct list_head splice
;
534 INIT_LIST_HEAD(&splice
);
536 mutex_lock(&root
->fs_info
->ordered_operations_mutex
);
537 spin_lock(&root
->fs_info
->ordered_extent_lock
);
539 list_splice_init(&root
->fs_info
->ordered_operations
, &splice
);
541 while (!list_empty(&splice
)) {
542 btrfs_inode
= list_entry(splice
.next
, struct btrfs_inode
,
545 inode
= &btrfs_inode
->vfs_inode
;
547 list_del_init(&btrfs_inode
->ordered_operations
);
550 * the inode may be getting freed (in sys_unlink path).
552 inode
= igrab(inode
);
554 if (!wait
&& inode
) {
555 list_add_tail(&BTRFS_I(inode
)->ordered_operations
,
556 &root
->fs_info
->ordered_operations
);
558 spin_unlock(&root
->fs_info
->ordered_extent_lock
);
562 btrfs_wait_ordered_range(inode
, 0, (u64
)-1);
564 filemap_flush(inode
->i_mapping
);
565 btrfs_add_delayed_iput(inode
);
569 spin_lock(&root
->fs_info
->ordered_extent_lock
);
571 if (wait
&& !list_empty(&root
->fs_info
->ordered_operations
))
574 spin_unlock(&root
->fs_info
->ordered_extent_lock
);
575 mutex_unlock(&root
->fs_info
->ordered_operations_mutex
);
581 * Used to start IO or wait for a given ordered extent to finish.
583 * If wait is one, this effectively waits on page writeback for all the pages
584 * in the extent, and it waits on the io completion code to insert
585 * metadata into the btree corresponding to the extent
587 void btrfs_start_ordered_extent(struct inode
*inode
,
588 struct btrfs_ordered_extent
*entry
,
591 u64 start
= entry
->file_offset
;
592 u64 end
= start
+ entry
->len
- 1;
594 trace_btrfs_ordered_extent_start(inode
, entry
);
597 * pages in the range can be dirty, clean or writeback. We
598 * start IO on any dirty ones so the wait doesn't stall waiting
599 * for pdflush to find them
601 if (!test_bit(BTRFS_ORDERED_DIRECT
, &entry
->flags
))
602 filemap_fdatawrite_range(inode
->i_mapping
, start
, end
);
604 wait_event(entry
->wait
, test_bit(BTRFS_ORDERED_COMPLETE
,
610 * Used to wait on ordered extents across a large range of bytes.
612 int btrfs_wait_ordered_range(struct inode
*inode
, u64 start
, u64 len
)
616 struct btrfs_ordered_extent
*ordered
;
619 if (start
+ len
< start
) {
620 orig_end
= INT_LIMIT(loff_t
);
622 orig_end
= start
+ len
- 1;
623 if (orig_end
> INT_LIMIT(loff_t
))
624 orig_end
= INT_LIMIT(loff_t
);
627 /* start IO across the range first to instantiate any delalloc
630 filemap_fdatawrite_range(inode
->i_mapping
, start
, orig_end
);
632 /* The compression code will leave pages locked but return from
633 * writepage without setting the page writeback. Starting again
634 * with WB_SYNC_ALL will end up waiting for the IO to actually start.
636 filemap_fdatawrite_range(inode
->i_mapping
, start
, orig_end
);
638 filemap_fdatawait_range(inode
->i_mapping
, start
, orig_end
);
643 ordered
= btrfs_lookup_first_ordered_extent(inode
, end
);
646 if (ordered
->file_offset
> orig_end
) {
647 btrfs_put_ordered_extent(ordered
);
650 if (ordered
->file_offset
+ ordered
->len
< start
) {
651 btrfs_put_ordered_extent(ordered
);
655 btrfs_start_ordered_extent(inode
, ordered
, 1);
656 end
= ordered
->file_offset
;
657 btrfs_put_ordered_extent(ordered
);
658 if (end
== 0 || end
== start
)
662 if (found
|| test_range_bit(&BTRFS_I(inode
)->io_tree
, start
, orig_end
,
663 EXTENT_DELALLOC
, 0, NULL
)) {
671 * find an ordered extent corresponding to file_offset. return NULL if
672 * nothing is found, otherwise take a reference on the extent and return it
674 struct btrfs_ordered_extent
*btrfs_lookup_ordered_extent(struct inode
*inode
,
677 struct btrfs_ordered_inode_tree
*tree
;
678 struct rb_node
*node
;
679 struct btrfs_ordered_extent
*entry
= NULL
;
681 tree
= &BTRFS_I(inode
)->ordered_tree
;
682 spin_lock(&tree
->lock
);
683 node
= tree_search(tree
, file_offset
);
687 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
688 if (!offset_in_entry(entry
, file_offset
))
691 atomic_inc(&entry
->refs
);
693 spin_unlock(&tree
->lock
);
697 /* Since the DIO code tries to lock a wide area we need to look for any ordered
698 * extents that exist in the range, rather than just the start of the range.
700 struct btrfs_ordered_extent
*btrfs_lookup_ordered_range(struct inode
*inode
,
704 struct btrfs_ordered_inode_tree
*tree
;
705 struct rb_node
*node
;
706 struct btrfs_ordered_extent
*entry
= NULL
;
708 tree
= &BTRFS_I(inode
)->ordered_tree
;
709 spin_lock(&tree
->lock
);
710 node
= tree_search(tree
, file_offset
);
712 node
= tree_search(tree
, file_offset
+ len
);
718 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
719 if (range_overlaps(entry
, file_offset
, len
))
722 if (entry
->file_offset
>= file_offset
+ len
) {
727 node
= rb_next(node
);
733 atomic_inc(&entry
->refs
);
734 spin_unlock(&tree
->lock
);
739 * lookup and return any extent before 'file_offset'. NULL is returned
742 struct btrfs_ordered_extent
*
743 btrfs_lookup_first_ordered_extent(struct inode
*inode
, u64 file_offset
)
745 struct btrfs_ordered_inode_tree
*tree
;
746 struct rb_node
*node
;
747 struct btrfs_ordered_extent
*entry
= NULL
;
749 tree
= &BTRFS_I(inode
)->ordered_tree
;
750 spin_lock(&tree
->lock
);
751 node
= tree_search(tree
, file_offset
);
755 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
756 atomic_inc(&entry
->refs
);
758 spin_unlock(&tree
->lock
);
763 * After an extent is done, call this to conditionally update the on disk
764 * i_size. i_size is updated to cover any fully written part of the file.
766 int btrfs_ordered_update_i_size(struct inode
*inode
, u64 offset
,
767 struct btrfs_ordered_extent
*ordered
)
769 struct btrfs_ordered_inode_tree
*tree
= &BTRFS_I(inode
)->ordered_tree
;
770 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
774 u64 i_size
= i_size_read(inode
);
775 struct rb_node
*node
;
776 struct rb_node
*prev
= NULL
;
777 struct btrfs_ordered_extent
*test
;
781 offset
= entry_end(ordered
);
783 offset
= ALIGN(offset
, BTRFS_I(inode
)->root
->sectorsize
);
785 spin_lock(&tree
->lock
);
786 disk_i_size
= BTRFS_I(inode
)->disk_i_size
;
789 if (disk_i_size
> i_size
) {
790 BTRFS_I(inode
)->disk_i_size
= i_size
;
796 * if the disk i_size is already at the inode->i_size, or
797 * this ordered extent is inside the disk i_size, we're done
799 if (disk_i_size
== i_size
|| offset
<= disk_i_size
) {
804 * we can't update the disk_isize if there are delalloc bytes
805 * between disk_i_size and this ordered extent
807 if (test_range_bit(io_tree
, disk_i_size
, offset
- 1,
808 EXTENT_DELALLOC
, 0, NULL
)) {
812 * walk backward from this ordered extent to disk_i_size.
813 * if we find an ordered extent then we can't update disk i_size
817 node
= rb_prev(&ordered
->rb_node
);
819 prev
= tree_search(tree
, offset
);
821 * we insert file extents without involving ordered struct,
822 * so there should be no ordered struct cover this offset
825 test
= rb_entry(prev
, struct btrfs_ordered_extent
,
827 BUG_ON(offset_in_entry(test
, offset
));
832 test
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
833 if (test
->file_offset
+ test
->len
<= disk_i_size
)
835 if (test
->file_offset
>= i_size
)
837 if (test
->file_offset
>= disk_i_size
)
839 node
= rb_prev(node
);
841 new_i_size
= min_t(u64
, offset
, i_size
);
844 * at this point, we know we can safely update i_size to at least
845 * the offset from this ordered extent. But, we need to
846 * walk forward and see if ios from higher up in the file have
850 node
= rb_next(&ordered
->rb_node
);
853 node
= rb_next(prev
);
855 node
= rb_first(&tree
->tree
);
860 * do we have an area where IO might have finished
861 * between our ordered extent and the next one.
863 test
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
864 if (test
->file_offset
> offset
)
865 i_size_test
= test
->file_offset
;
867 i_size_test
= i_size
;
871 * i_size_test is the end of a region after this ordered
872 * extent where there are no ordered extents. As long as there
873 * are no delalloc bytes in this area, it is safe to update
874 * disk_i_size to the end of the region.
876 if (i_size_test
> offset
&&
877 !test_range_bit(io_tree
, offset
, i_size_test
- 1,
878 EXTENT_DELALLOC
, 0, NULL
)) {
879 new_i_size
= min_t(u64
, i_size_test
, i_size
);
881 BTRFS_I(inode
)->disk_i_size
= new_i_size
;
885 * we need to remove the ordered extent with the tree lock held
886 * so that other people calling this function don't find our fully
887 * processed ordered entry and skip updating the i_size
890 __btrfs_remove_ordered_extent(inode
, ordered
);
891 spin_unlock(&tree
->lock
);
893 wake_up(&ordered
->wait
);
898 * search the ordered extents for one corresponding to 'offset' and
899 * try to find a checksum. This is used because we allow pages to
900 * be reclaimed before their checksum is actually put into the btree
902 int btrfs_find_ordered_sum(struct inode
*inode
, u64 offset
, u64 disk_bytenr
,
905 struct btrfs_ordered_sum
*ordered_sum
;
906 struct btrfs_sector_sum
*sector_sums
;
907 struct btrfs_ordered_extent
*ordered
;
908 struct btrfs_ordered_inode_tree
*tree
= &BTRFS_I(inode
)->ordered_tree
;
909 unsigned long num_sectors
;
911 u32 sectorsize
= BTRFS_I(inode
)->root
->sectorsize
;
914 ordered
= btrfs_lookup_ordered_extent(inode
, offset
);
918 spin_lock(&tree
->lock
);
919 list_for_each_entry_reverse(ordered_sum
, &ordered
->list
, list
) {
920 if (disk_bytenr
>= ordered_sum
->bytenr
) {
921 num_sectors
= ordered_sum
->len
/ sectorsize
;
922 sector_sums
= ordered_sum
->sums
;
923 for (i
= 0; i
< num_sectors
; i
++) {
924 if (sector_sums
[i
].bytenr
== disk_bytenr
) {
925 *sum
= sector_sums
[i
].sum
;
933 spin_unlock(&tree
->lock
);
934 btrfs_put_ordered_extent(ordered
);
940 * add a given inode to the list of inodes that must be fully on
941 * disk before a transaction commit finishes.
943 * This basically gives us the ext3 style data=ordered mode, and it is mostly
944 * used to make sure renamed files are fully on disk.
946 * It is a noop if the inode is already fully on disk.
948 * If trans is not null, we'll do a friendly check for a transaction that
949 * is already flushing things and force the IO down ourselves.
951 int btrfs_add_ordered_operation(struct btrfs_trans_handle
*trans
,
952 struct btrfs_root
*root
,
957 last_mod
= max(BTRFS_I(inode
)->generation
, BTRFS_I(inode
)->last_trans
);
960 * if this file hasn't been changed since the last transaction
961 * commit, we can safely return without doing anything
963 if (last_mod
< root
->fs_info
->last_trans_committed
)
967 * the transaction is already committing. Just start the IO and
968 * don't bother with all of this list nonsense
970 if (trans
&& root
->fs_info
->running_transaction
->blocked
) {
971 btrfs_wait_ordered_range(inode
, 0, (u64
)-1);
975 spin_lock(&root
->fs_info
->ordered_extent_lock
);
976 if (list_empty(&BTRFS_I(inode
)->ordered_operations
)) {
977 list_add_tail(&BTRFS_I(inode
)->ordered_operations
,
978 &root
->fs_info
->ordered_operations
);
980 spin_unlock(&root
->fs_info
->ordered_extent_lock
);