2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/slab.h>
20 #include <linux/blkdev.h>
21 #include <linux/writeback.h>
22 #include <linux/pagevec.h>
24 #include "transaction.h"
25 #include "btrfs_inode.h"
26 #include "extent_io.h"
28 static u64
entry_end(struct btrfs_ordered_extent
*entry
)
30 if (entry
->file_offset
+ entry
->len
< entry
->file_offset
)
32 return entry
->file_offset
+ entry
->len
;
35 /* returns NULL if the insertion worked, or it returns the node it did find
38 static struct rb_node
*tree_insert(struct rb_root
*root
, u64 file_offset
,
41 struct rb_node
**p
= &root
->rb_node
;
42 struct rb_node
*parent
= NULL
;
43 struct btrfs_ordered_extent
*entry
;
47 entry
= rb_entry(parent
, struct btrfs_ordered_extent
, rb_node
);
49 if (file_offset
< entry
->file_offset
)
51 else if (file_offset
>= entry_end(entry
))
57 rb_link_node(node
, parent
, p
);
58 rb_insert_color(node
, root
);
63 * look for a given offset in the tree, and if it can't be found return the
66 static struct rb_node
*__tree_search(struct rb_root
*root
, u64 file_offset
,
67 struct rb_node
**prev_ret
)
69 struct rb_node
*n
= root
->rb_node
;
70 struct rb_node
*prev
= NULL
;
72 struct btrfs_ordered_extent
*entry
;
73 struct btrfs_ordered_extent
*prev_entry
= NULL
;
76 entry
= rb_entry(n
, struct btrfs_ordered_extent
, rb_node
);
80 if (file_offset
< entry
->file_offset
)
82 else if (file_offset
>= entry_end(entry
))
90 while (prev
&& file_offset
>= entry_end(prev_entry
)) {
94 prev_entry
= rb_entry(test
, struct btrfs_ordered_extent
,
96 if (file_offset
< entry_end(prev_entry
))
102 prev_entry
= rb_entry(prev
, struct btrfs_ordered_extent
,
104 while (prev
&& file_offset
< entry_end(prev_entry
)) {
105 test
= rb_prev(prev
);
108 prev_entry
= rb_entry(test
, struct btrfs_ordered_extent
,
117 * helper to check if a given offset is inside a given entry
119 static int offset_in_entry(struct btrfs_ordered_extent
*entry
, u64 file_offset
)
121 if (file_offset
< entry
->file_offset
||
122 entry
->file_offset
+ entry
->len
<= file_offset
)
127 static int range_overlaps(struct btrfs_ordered_extent
*entry
, u64 file_offset
,
130 if (file_offset
+ len
<= entry
->file_offset
||
131 entry
->file_offset
+ entry
->len
<= file_offset
)
137 * look find the first ordered struct that has this offset, otherwise
138 * the first one less than this offset
140 static inline struct rb_node
*tree_search(struct btrfs_ordered_inode_tree
*tree
,
143 struct rb_root
*root
= &tree
->tree
;
144 struct rb_node
*prev
= NULL
;
146 struct btrfs_ordered_extent
*entry
;
149 entry
= rb_entry(tree
->last
, struct btrfs_ordered_extent
,
151 if (offset_in_entry(entry
, file_offset
))
154 ret
= __tree_search(root
, file_offset
, &prev
);
162 /* allocate and add a new ordered_extent into the per-inode tree.
163 * file_offset is the logical offset in the file
165 * start is the disk block number of an extent already reserved in the
166 * extent allocation tree
168 * len is the length of the extent
170 * The tree is given a single reference on the ordered extent that was
173 static int __btrfs_add_ordered_extent(struct inode
*inode
, u64 file_offset
,
174 u64 start
, u64 len
, u64 disk_len
,
175 int type
, int dio
, int compress_type
)
177 struct btrfs_ordered_inode_tree
*tree
;
178 struct rb_node
*node
;
179 struct btrfs_ordered_extent
*entry
;
181 tree
= &BTRFS_I(inode
)->ordered_tree
;
182 entry
= kzalloc(sizeof(*entry
), GFP_NOFS
);
186 entry
->file_offset
= file_offset
;
187 entry
->start
= start
;
189 entry
->disk_len
= disk_len
;
190 entry
->bytes_left
= len
;
191 entry
->inode
= inode
;
192 entry
->compress_type
= compress_type
;
193 if (type
!= BTRFS_ORDERED_IO_DONE
&& type
!= BTRFS_ORDERED_COMPLETE
)
194 set_bit(type
, &entry
->flags
);
197 set_bit(BTRFS_ORDERED_DIRECT
, &entry
->flags
);
199 /* one ref for the tree */
200 atomic_set(&entry
->refs
, 1);
201 init_waitqueue_head(&entry
->wait
);
202 INIT_LIST_HEAD(&entry
->list
);
203 INIT_LIST_HEAD(&entry
->root_extent_list
);
205 spin_lock(&tree
->lock
);
206 node
= tree_insert(&tree
->tree
, file_offset
,
209 spin_unlock(&tree
->lock
);
211 spin_lock(&BTRFS_I(inode
)->root
->fs_info
->ordered_extent_lock
);
212 list_add_tail(&entry
->root_extent_list
,
213 &BTRFS_I(inode
)->root
->fs_info
->ordered_extents
);
214 spin_unlock(&BTRFS_I(inode
)->root
->fs_info
->ordered_extent_lock
);
220 int btrfs_add_ordered_extent(struct inode
*inode
, u64 file_offset
,
221 u64 start
, u64 len
, u64 disk_len
, int type
)
223 return __btrfs_add_ordered_extent(inode
, file_offset
, start
, len
,
225 BTRFS_COMPRESS_NONE
);
228 int btrfs_add_ordered_extent_dio(struct inode
*inode
, u64 file_offset
,
229 u64 start
, u64 len
, u64 disk_len
, int type
)
231 return __btrfs_add_ordered_extent(inode
, file_offset
, start
, len
,
233 BTRFS_COMPRESS_NONE
);
236 int btrfs_add_ordered_extent_compress(struct inode
*inode
, u64 file_offset
,
237 u64 start
, u64 len
, u64 disk_len
,
238 int type
, int compress_type
)
240 return __btrfs_add_ordered_extent(inode
, file_offset
, start
, len
,
246 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
247 * when an ordered extent is finished. If the list covers more than one
248 * ordered extent, it is split across multiples.
250 int btrfs_add_ordered_sum(struct inode
*inode
,
251 struct btrfs_ordered_extent
*entry
,
252 struct btrfs_ordered_sum
*sum
)
254 struct btrfs_ordered_inode_tree
*tree
;
256 tree
= &BTRFS_I(inode
)->ordered_tree
;
257 spin_lock(&tree
->lock
);
258 list_add_tail(&sum
->list
, &entry
->list
);
259 spin_unlock(&tree
->lock
);
264 * this is used to account for finished IO across a given range
265 * of the file. The IO may span ordered extents. If
266 * a given ordered_extent is completely done, 1 is returned, otherwise
269 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
270 * to make sure this function only returns 1 once for a given ordered extent.
272 * file_offset is updated to one byte past the range that is recorded as
273 * complete. This allows you to walk forward in the file.
275 int btrfs_dec_test_first_ordered_pending(struct inode
*inode
,
276 struct btrfs_ordered_extent
**cached
,
277 u64
*file_offset
, u64 io_size
)
279 struct btrfs_ordered_inode_tree
*tree
;
280 struct rb_node
*node
;
281 struct btrfs_ordered_extent
*entry
= NULL
;
287 tree
= &BTRFS_I(inode
)->ordered_tree
;
288 spin_lock(&tree
->lock
);
289 node
= tree_search(tree
, *file_offset
);
295 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
296 if (!offset_in_entry(entry
, *file_offset
)) {
301 dec_start
= max(*file_offset
, entry
->file_offset
);
302 dec_end
= min(*file_offset
+ io_size
, entry
->file_offset
+
304 *file_offset
= dec_end
;
305 if (dec_start
> dec_end
) {
306 printk(KERN_CRIT
"bad ordering dec_start %llu end %llu\n",
307 (unsigned long long)dec_start
,
308 (unsigned long long)dec_end
);
310 to_dec
= dec_end
- dec_start
;
311 if (to_dec
> entry
->bytes_left
) {
312 printk(KERN_CRIT
"bad ordered accounting left %llu size %llu\n",
313 (unsigned long long)entry
->bytes_left
,
314 (unsigned long long)to_dec
);
316 entry
->bytes_left
-= to_dec
;
317 if (entry
->bytes_left
== 0)
318 ret
= test_and_set_bit(BTRFS_ORDERED_IO_DONE
, &entry
->flags
);
322 if (!ret
&& cached
&& entry
) {
324 atomic_inc(&entry
->refs
);
326 spin_unlock(&tree
->lock
);
331 * this is used to account for finished IO across a given range
332 * of the file. The IO should not span ordered extents. If
333 * a given ordered_extent is completely done, 1 is returned, otherwise
336 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
337 * to make sure this function only returns 1 once for a given ordered extent.
339 int btrfs_dec_test_ordered_pending(struct inode
*inode
,
340 struct btrfs_ordered_extent
**cached
,
341 u64 file_offset
, u64 io_size
)
343 struct btrfs_ordered_inode_tree
*tree
;
344 struct rb_node
*node
;
345 struct btrfs_ordered_extent
*entry
= NULL
;
348 tree
= &BTRFS_I(inode
)->ordered_tree
;
349 spin_lock(&tree
->lock
);
350 node
= tree_search(tree
, file_offset
);
356 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
357 if (!offset_in_entry(entry
, file_offset
)) {
362 if (io_size
> entry
->bytes_left
) {
363 printk(KERN_CRIT
"bad ordered accounting left %llu size %llu\n",
364 (unsigned long long)entry
->bytes_left
,
365 (unsigned long long)io_size
);
367 entry
->bytes_left
-= io_size
;
368 if (entry
->bytes_left
== 0)
369 ret
= test_and_set_bit(BTRFS_ORDERED_IO_DONE
, &entry
->flags
);
373 if (!ret
&& cached
&& entry
) {
375 atomic_inc(&entry
->refs
);
377 spin_unlock(&tree
->lock
);
382 * used to drop a reference on an ordered extent. This will free
383 * the extent if the last reference is dropped
385 int btrfs_put_ordered_extent(struct btrfs_ordered_extent
*entry
)
387 struct list_head
*cur
;
388 struct btrfs_ordered_sum
*sum
;
390 if (atomic_dec_and_test(&entry
->refs
)) {
391 while (!list_empty(&entry
->list
)) {
392 cur
= entry
->list
.next
;
393 sum
= list_entry(cur
, struct btrfs_ordered_sum
, list
);
394 list_del(&sum
->list
);
403 * remove an ordered extent from the tree. No references are dropped
404 * and you must wake_up entry->wait. You must hold the tree lock
405 * while you call this function.
407 static int __btrfs_remove_ordered_extent(struct inode
*inode
,
408 struct btrfs_ordered_extent
*entry
)
410 struct btrfs_ordered_inode_tree
*tree
;
411 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
412 struct rb_node
*node
;
414 tree
= &BTRFS_I(inode
)->ordered_tree
;
415 node
= &entry
->rb_node
;
416 rb_erase(node
, &tree
->tree
);
418 set_bit(BTRFS_ORDERED_COMPLETE
, &entry
->flags
);
420 spin_lock(&root
->fs_info
->ordered_extent_lock
);
421 list_del_init(&entry
->root_extent_list
);
424 * we have no more ordered extents for this inode and
425 * no dirty pages. We can safely remove it from the
426 * list of ordered extents
428 if (RB_EMPTY_ROOT(&tree
->tree
) &&
429 !mapping_tagged(inode
->i_mapping
, PAGECACHE_TAG_DIRTY
)) {
430 list_del_init(&BTRFS_I(inode
)->ordered_operations
);
432 spin_unlock(&root
->fs_info
->ordered_extent_lock
);
438 * remove an ordered extent from the tree. No references are dropped
439 * but any waiters are woken.
441 int btrfs_remove_ordered_extent(struct inode
*inode
,
442 struct btrfs_ordered_extent
*entry
)
444 struct btrfs_ordered_inode_tree
*tree
;
447 tree
= &BTRFS_I(inode
)->ordered_tree
;
448 spin_lock(&tree
->lock
);
449 ret
= __btrfs_remove_ordered_extent(inode
, entry
);
450 spin_unlock(&tree
->lock
);
451 wake_up(&entry
->wait
);
457 * wait for all the ordered extents in a root. This is done when balancing
458 * space between drives.
460 int btrfs_wait_ordered_extents(struct btrfs_root
*root
,
461 int nocow_only
, int delay_iput
)
463 struct list_head splice
;
464 struct list_head
*cur
;
465 struct btrfs_ordered_extent
*ordered
;
468 INIT_LIST_HEAD(&splice
);
470 spin_lock(&root
->fs_info
->ordered_extent_lock
);
471 list_splice_init(&root
->fs_info
->ordered_extents
, &splice
);
472 while (!list_empty(&splice
)) {
474 ordered
= list_entry(cur
, struct btrfs_ordered_extent
,
477 !test_bit(BTRFS_ORDERED_NOCOW
, &ordered
->flags
) &&
478 !test_bit(BTRFS_ORDERED_PREALLOC
, &ordered
->flags
)) {
479 list_move(&ordered
->root_extent_list
,
480 &root
->fs_info
->ordered_extents
);
481 cond_resched_lock(&root
->fs_info
->ordered_extent_lock
);
485 list_del_init(&ordered
->root_extent_list
);
486 atomic_inc(&ordered
->refs
);
489 * the inode may be getting freed (in sys_unlink path).
491 inode
= igrab(ordered
->inode
);
493 spin_unlock(&root
->fs_info
->ordered_extent_lock
);
496 btrfs_start_ordered_extent(inode
, ordered
, 1);
497 btrfs_put_ordered_extent(ordered
);
499 btrfs_add_delayed_iput(inode
);
503 btrfs_put_ordered_extent(ordered
);
506 spin_lock(&root
->fs_info
->ordered_extent_lock
);
508 spin_unlock(&root
->fs_info
->ordered_extent_lock
);
513 * this is used during transaction commit to write all the inodes
514 * added to the ordered operation list. These files must be fully on
515 * disk before the transaction commits.
517 * we have two modes here, one is to just start the IO via filemap_flush
518 * and the other is to wait for all the io. When we wait, we have an
519 * extra check to make sure the ordered operation list really is empty
522 int btrfs_run_ordered_operations(struct btrfs_root
*root
, int wait
)
524 struct btrfs_inode
*btrfs_inode
;
526 struct list_head splice
;
528 INIT_LIST_HEAD(&splice
);
530 mutex_lock(&root
->fs_info
->ordered_operations_mutex
);
531 spin_lock(&root
->fs_info
->ordered_extent_lock
);
533 list_splice_init(&root
->fs_info
->ordered_operations
, &splice
);
535 while (!list_empty(&splice
)) {
536 btrfs_inode
= list_entry(splice
.next
, struct btrfs_inode
,
539 inode
= &btrfs_inode
->vfs_inode
;
541 list_del_init(&btrfs_inode
->ordered_operations
);
544 * the inode may be getting freed (in sys_unlink path).
546 inode
= igrab(inode
);
548 if (!wait
&& inode
) {
549 list_add_tail(&BTRFS_I(inode
)->ordered_operations
,
550 &root
->fs_info
->ordered_operations
);
552 spin_unlock(&root
->fs_info
->ordered_extent_lock
);
556 btrfs_wait_ordered_range(inode
, 0, (u64
)-1);
558 filemap_flush(inode
->i_mapping
);
559 btrfs_add_delayed_iput(inode
);
563 spin_lock(&root
->fs_info
->ordered_extent_lock
);
565 if (wait
&& !list_empty(&root
->fs_info
->ordered_operations
))
568 spin_unlock(&root
->fs_info
->ordered_extent_lock
);
569 mutex_unlock(&root
->fs_info
->ordered_operations_mutex
);
575 * Used to start IO or wait for a given ordered extent to finish.
577 * If wait is one, this effectively waits on page writeback for all the pages
578 * in the extent, and it waits on the io completion code to insert
579 * metadata into the btree corresponding to the extent
581 void btrfs_start_ordered_extent(struct inode
*inode
,
582 struct btrfs_ordered_extent
*entry
,
585 u64 start
= entry
->file_offset
;
586 u64 end
= start
+ entry
->len
- 1;
589 * pages in the range can be dirty, clean or writeback. We
590 * start IO on any dirty ones so the wait doesn't stall waiting
591 * for pdflush to find them
593 if (!test_bit(BTRFS_ORDERED_DIRECT
, &entry
->flags
))
594 filemap_fdatawrite_range(inode
->i_mapping
, start
, end
);
596 wait_event(entry
->wait
, test_bit(BTRFS_ORDERED_COMPLETE
,
602 * Used to wait on ordered extents across a large range of bytes.
604 int btrfs_wait_ordered_range(struct inode
*inode
, u64 start
, u64 len
)
608 struct btrfs_ordered_extent
*ordered
;
611 if (start
+ len
< start
) {
612 orig_end
= INT_LIMIT(loff_t
);
614 orig_end
= start
+ len
- 1;
615 if (orig_end
> INT_LIMIT(loff_t
))
616 orig_end
= INT_LIMIT(loff_t
);
619 /* start IO across the range first to instantiate any delalloc
622 filemap_fdatawrite_range(inode
->i_mapping
, start
, orig_end
);
624 /* The compression code will leave pages locked but return from
625 * writepage without setting the page writeback. Starting again
626 * with WB_SYNC_ALL will end up waiting for the IO to actually start.
628 filemap_fdatawrite_range(inode
->i_mapping
, start
, orig_end
);
630 filemap_fdatawait_range(inode
->i_mapping
, start
, orig_end
);
635 ordered
= btrfs_lookup_first_ordered_extent(inode
, end
);
638 if (ordered
->file_offset
> orig_end
) {
639 btrfs_put_ordered_extent(ordered
);
642 if (ordered
->file_offset
+ ordered
->len
< start
) {
643 btrfs_put_ordered_extent(ordered
);
647 btrfs_start_ordered_extent(inode
, ordered
, 1);
648 end
= ordered
->file_offset
;
649 btrfs_put_ordered_extent(ordered
);
650 if (end
== 0 || end
== start
)
654 if (found
|| test_range_bit(&BTRFS_I(inode
)->io_tree
, start
, orig_end
,
655 EXTENT_DELALLOC
, 0, NULL
)) {
663 * find an ordered extent corresponding to file_offset. return NULL if
664 * nothing is found, otherwise take a reference on the extent and return it
666 struct btrfs_ordered_extent
*btrfs_lookup_ordered_extent(struct inode
*inode
,
669 struct btrfs_ordered_inode_tree
*tree
;
670 struct rb_node
*node
;
671 struct btrfs_ordered_extent
*entry
= NULL
;
673 tree
= &BTRFS_I(inode
)->ordered_tree
;
674 spin_lock(&tree
->lock
);
675 node
= tree_search(tree
, file_offset
);
679 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
680 if (!offset_in_entry(entry
, file_offset
))
683 atomic_inc(&entry
->refs
);
685 spin_unlock(&tree
->lock
);
689 /* Since the DIO code tries to lock a wide area we need to look for any ordered
690 * extents that exist in the range, rather than just the start of the range.
692 struct btrfs_ordered_extent
*btrfs_lookup_ordered_range(struct inode
*inode
,
696 struct btrfs_ordered_inode_tree
*tree
;
697 struct rb_node
*node
;
698 struct btrfs_ordered_extent
*entry
= NULL
;
700 tree
= &BTRFS_I(inode
)->ordered_tree
;
701 spin_lock(&tree
->lock
);
702 node
= tree_search(tree
, file_offset
);
704 node
= tree_search(tree
, file_offset
+ len
);
710 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
711 if (range_overlaps(entry
, file_offset
, len
))
714 if (entry
->file_offset
>= file_offset
+ len
) {
719 node
= rb_next(node
);
725 atomic_inc(&entry
->refs
);
726 spin_unlock(&tree
->lock
);
731 * lookup and return any extent before 'file_offset'. NULL is returned
734 struct btrfs_ordered_extent
*
735 btrfs_lookup_first_ordered_extent(struct inode
*inode
, u64 file_offset
)
737 struct btrfs_ordered_inode_tree
*tree
;
738 struct rb_node
*node
;
739 struct btrfs_ordered_extent
*entry
= NULL
;
741 tree
= &BTRFS_I(inode
)->ordered_tree
;
742 spin_lock(&tree
->lock
);
743 node
= tree_search(tree
, file_offset
);
747 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
748 atomic_inc(&entry
->refs
);
750 spin_unlock(&tree
->lock
);
755 * After an extent is done, call this to conditionally update the on disk
756 * i_size. i_size is updated to cover any fully written part of the file.
758 int btrfs_ordered_update_i_size(struct inode
*inode
, u64 offset
,
759 struct btrfs_ordered_extent
*ordered
)
761 struct btrfs_ordered_inode_tree
*tree
= &BTRFS_I(inode
)->ordered_tree
;
762 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
766 u64 i_size
= i_size_read(inode
);
767 struct rb_node
*node
;
768 struct rb_node
*prev
= NULL
;
769 struct btrfs_ordered_extent
*test
;
773 offset
= entry_end(ordered
);
775 offset
= ALIGN(offset
, BTRFS_I(inode
)->root
->sectorsize
);
777 spin_lock(&tree
->lock
);
778 disk_i_size
= BTRFS_I(inode
)->disk_i_size
;
781 if (disk_i_size
> i_size
) {
782 BTRFS_I(inode
)->disk_i_size
= i_size
;
788 * if the disk i_size is already at the inode->i_size, or
789 * this ordered extent is inside the disk i_size, we're done
791 if (disk_i_size
== i_size
|| offset
<= disk_i_size
) {
796 * we can't update the disk_isize if there are delalloc bytes
797 * between disk_i_size and this ordered extent
799 if (test_range_bit(io_tree
, disk_i_size
, offset
- 1,
800 EXTENT_DELALLOC
, 0, NULL
)) {
804 * walk backward from this ordered extent to disk_i_size.
805 * if we find an ordered extent then we can't update disk i_size
809 node
= rb_prev(&ordered
->rb_node
);
811 prev
= tree_search(tree
, offset
);
813 * we insert file extents without involving ordered struct,
814 * so there should be no ordered struct cover this offset
817 test
= rb_entry(prev
, struct btrfs_ordered_extent
,
819 BUG_ON(offset_in_entry(test
, offset
));
824 test
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
825 if (test
->file_offset
+ test
->len
<= disk_i_size
)
827 if (test
->file_offset
>= i_size
)
829 if (test
->file_offset
>= disk_i_size
)
831 node
= rb_prev(node
);
833 new_i_size
= min_t(u64
, offset
, i_size
);
836 * at this point, we know we can safely update i_size to at least
837 * the offset from this ordered extent. But, we need to
838 * walk forward and see if ios from higher up in the file have
842 node
= rb_next(&ordered
->rb_node
);
845 node
= rb_next(prev
);
847 node
= rb_first(&tree
->tree
);
852 * do we have an area where IO might have finished
853 * between our ordered extent and the next one.
855 test
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
856 if (test
->file_offset
> offset
)
857 i_size_test
= test
->file_offset
;
859 i_size_test
= i_size
;
863 * i_size_test is the end of a region after this ordered
864 * extent where there are no ordered extents. As long as there
865 * are no delalloc bytes in this area, it is safe to update
866 * disk_i_size to the end of the region.
868 if (i_size_test
> offset
&&
869 !test_range_bit(io_tree
, offset
, i_size_test
- 1,
870 EXTENT_DELALLOC
, 0, NULL
)) {
871 new_i_size
= min_t(u64
, i_size_test
, i_size
);
873 BTRFS_I(inode
)->disk_i_size
= new_i_size
;
877 * we need to remove the ordered extent with the tree lock held
878 * so that other people calling this function don't find our fully
879 * processed ordered entry and skip updating the i_size
882 __btrfs_remove_ordered_extent(inode
, ordered
);
883 spin_unlock(&tree
->lock
);
885 wake_up(&ordered
->wait
);
890 * search the ordered extents for one corresponding to 'offset' and
891 * try to find a checksum. This is used because we allow pages to
892 * be reclaimed before their checksum is actually put into the btree
894 int btrfs_find_ordered_sum(struct inode
*inode
, u64 offset
, u64 disk_bytenr
,
897 struct btrfs_ordered_sum
*ordered_sum
;
898 struct btrfs_sector_sum
*sector_sums
;
899 struct btrfs_ordered_extent
*ordered
;
900 struct btrfs_ordered_inode_tree
*tree
= &BTRFS_I(inode
)->ordered_tree
;
901 unsigned long num_sectors
;
903 u32 sectorsize
= BTRFS_I(inode
)->root
->sectorsize
;
906 ordered
= btrfs_lookup_ordered_extent(inode
, offset
);
910 spin_lock(&tree
->lock
);
911 list_for_each_entry_reverse(ordered_sum
, &ordered
->list
, list
) {
912 if (disk_bytenr
>= ordered_sum
->bytenr
) {
913 num_sectors
= ordered_sum
->len
/ sectorsize
;
914 sector_sums
= ordered_sum
->sums
;
915 for (i
= 0; i
< num_sectors
; i
++) {
916 if (sector_sums
[i
].bytenr
== disk_bytenr
) {
917 *sum
= sector_sums
[i
].sum
;
925 spin_unlock(&tree
->lock
);
926 btrfs_put_ordered_extent(ordered
);
932 * add a given inode to the list of inodes that must be fully on
933 * disk before a transaction commit finishes.
935 * This basically gives us the ext3 style data=ordered mode, and it is mostly
936 * used to make sure renamed files are fully on disk.
938 * It is a noop if the inode is already fully on disk.
940 * If trans is not null, we'll do a friendly check for a transaction that
941 * is already flushing things and force the IO down ourselves.
943 int btrfs_add_ordered_operation(struct btrfs_trans_handle
*trans
,
944 struct btrfs_root
*root
,
949 last_mod
= max(BTRFS_I(inode
)->generation
, BTRFS_I(inode
)->last_trans
);
952 * if this file hasn't been changed since the last transaction
953 * commit, we can safely return without doing anything
955 if (last_mod
< root
->fs_info
->last_trans_committed
)
959 * the transaction is already committing. Just start the IO and
960 * don't bother with all of this list nonsense
962 if (trans
&& root
->fs_info
->running_transaction
->blocked
) {
963 btrfs_wait_ordered_range(inode
, 0, (u64
)-1);
967 spin_lock(&root
->fs_info
->ordered_extent_lock
);
968 if (list_empty(&BTRFS_I(inode
)->ordered_operations
)) {
969 list_add_tail(&BTRFS_I(inode
)->ordered_operations
,
970 &root
->fs_info
->ordered_operations
);
972 spin_unlock(&root
->fs_info
->ordered_extent_lock
);