2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/slab.h>
20 #include <linux/blkdev.h>
21 #include <linux/writeback.h>
22 #include <linux/pagevec.h>
24 #include "transaction.h"
25 #include "btrfs_inode.h"
26 #include "extent_io.h"
28 static struct kmem_cache
*btrfs_ordered_extent_cache
;
30 static u64
entry_end(struct btrfs_ordered_extent
*entry
)
32 if (entry
->file_offset
+ entry
->len
< entry
->file_offset
)
34 return entry
->file_offset
+ entry
->len
;
37 /* returns NULL if the insertion worked, or it returns the node it did find
40 static struct rb_node
*tree_insert(struct rb_root
*root
, u64 file_offset
,
43 struct rb_node
**p
= &root
->rb_node
;
44 struct rb_node
*parent
= NULL
;
45 struct btrfs_ordered_extent
*entry
;
49 entry
= rb_entry(parent
, struct btrfs_ordered_extent
, rb_node
);
51 if (file_offset
< entry
->file_offset
)
53 else if (file_offset
>= entry_end(entry
))
59 rb_link_node(node
, parent
, p
);
60 rb_insert_color(node
, root
);
64 static void ordered_data_tree_panic(struct inode
*inode
, int errno
,
67 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
68 btrfs_panic(fs_info
, errno
, "Inconsistency in ordered tree at offset "
69 "%llu\n", (unsigned long long)offset
);
73 * look for a given offset in the tree, and if it can't be found return the
76 static struct rb_node
*__tree_search(struct rb_root
*root
, u64 file_offset
,
77 struct rb_node
**prev_ret
)
79 struct rb_node
*n
= root
->rb_node
;
80 struct rb_node
*prev
= NULL
;
82 struct btrfs_ordered_extent
*entry
;
83 struct btrfs_ordered_extent
*prev_entry
= NULL
;
86 entry
= rb_entry(n
, struct btrfs_ordered_extent
, rb_node
);
90 if (file_offset
< entry
->file_offset
)
92 else if (file_offset
>= entry_end(entry
))
100 while (prev
&& file_offset
>= entry_end(prev_entry
)) {
101 test
= rb_next(prev
);
104 prev_entry
= rb_entry(test
, struct btrfs_ordered_extent
,
106 if (file_offset
< entry_end(prev_entry
))
112 prev_entry
= rb_entry(prev
, struct btrfs_ordered_extent
,
114 while (prev
&& file_offset
< entry_end(prev_entry
)) {
115 test
= rb_prev(prev
);
118 prev_entry
= rb_entry(test
, struct btrfs_ordered_extent
,
127 * helper to check if a given offset is inside a given entry
129 static int offset_in_entry(struct btrfs_ordered_extent
*entry
, u64 file_offset
)
131 if (file_offset
< entry
->file_offset
||
132 entry
->file_offset
+ entry
->len
<= file_offset
)
137 static int range_overlaps(struct btrfs_ordered_extent
*entry
, u64 file_offset
,
140 if (file_offset
+ len
<= entry
->file_offset
||
141 entry
->file_offset
+ entry
->len
<= file_offset
)
147 * look find the first ordered struct that has this offset, otherwise
148 * the first one less than this offset
150 static inline struct rb_node
*tree_search(struct btrfs_ordered_inode_tree
*tree
,
153 struct rb_root
*root
= &tree
->tree
;
154 struct rb_node
*prev
= NULL
;
156 struct btrfs_ordered_extent
*entry
;
159 entry
= rb_entry(tree
->last
, struct btrfs_ordered_extent
,
161 if (offset_in_entry(entry
, file_offset
))
164 ret
= __tree_search(root
, file_offset
, &prev
);
172 /* allocate and add a new ordered_extent into the per-inode tree.
173 * file_offset is the logical offset in the file
175 * start is the disk block number of an extent already reserved in the
176 * extent allocation tree
178 * len is the length of the extent
180 * The tree is given a single reference on the ordered extent that was
183 static int __btrfs_add_ordered_extent(struct inode
*inode
, u64 file_offset
,
184 u64 start
, u64 len
, u64 disk_len
,
185 int type
, int dio
, int compress_type
)
187 struct btrfs_ordered_inode_tree
*tree
;
188 struct rb_node
*node
;
189 struct btrfs_ordered_extent
*entry
;
191 tree
= &BTRFS_I(inode
)->ordered_tree
;
192 entry
= kmem_cache_zalloc(btrfs_ordered_extent_cache
, GFP_NOFS
);
196 entry
->file_offset
= file_offset
;
197 entry
->start
= start
;
199 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
) &&
200 !(type
== BTRFS_ORDERED_NOCOW
))
201 entry
->csum_bytes_left
= disk_len
;
202 entry
->disk_len
= disk_len
;
203 entry
->bytes_left
= len
;
204 entry
->inode
= igrab(inode
);
205 entry
->compress_type
= compress_type
;
206 if (type
!= BTRFS_ORDERED_IO_DONE
&& type
!= BTRFS_ORDERED_COMPLETE
)
207 set_bit(type
, &entry
->flags
);
210 set_bit(BTRFS_ORDERED_DIRECT
, &entry
->flags
);
212 /* one ref for the tree */
213 atomic_set(&entry
->refs
, 1);
214 init_waitqueue_head(&entry
->wait
);
215 INIT_LIST_HEAD(&entry
->list
);
216 INIT_LIST_HEAD(&entry
->root_extent_list
);
217 INIT_LIST_HEAD(&entry
->work_list
);
218 init_completion(&entry
->completion
);
219 INIT_LIST_HEAD(&entry
->log_list
);
221 trace_btrfs_ordered_extent_add(inode
, entry
);
223 spin_lock_irq(&tree
->lock
);
224 node
= tree_insert(&tree
->tree
, file_offset
,
227 ordered_data_tree_panic(inode
, -EEXIST
, file_offset
);
228 spin_unlock_irq(&tree
->lock
);
230 spin_lock(&BTRFS_I(inode
)->root
->fs_info
->ordered_extent_lock
);
231 list_add_tail(&entry
->root_extent_list
,
232 &BTRFS_I(inode
)->root
->fs_info
->ordered_extents
);
233 spin_unlock(&BTRFS_I(inode
)->root
->fs_info
->ordered_extent_lock
);
238 int btrfs_add_ordered_extent(struct inode
*inode
, u64 file_offset
,
239 u64 start
, u64 len
, u64 disk_len
, int type
)
241 return __btrfs_add_ordered_extent(inode
, file_offset
, start
, len
,
243 BTRFS_COMPRESS_NONE
);
246 int btrfs_add_ordered_extent_dio(struct inode
*inode
, u64 file_offset
,
247 u64 start
, u64 len
, u64 disk_len
, int type
)
249 return __btrfs_add_ordered_extent(inode
, file_offset
, start
, len
,
251 BTRFS_COMPRESS_NONE
);
254 int btrfs_add_ordered_extent_compress(struct inode
*inode
, u64 file_offset
,
255 u64 start
, u64 len
, u64 disk_len
,
256 int type
, int compress_type
)
258 return __btrfs_add_ordered_extent(inode
, file_offset
, start
, len
,
264 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
265 * when an ordered extent is finished. If the list covers more than one
266 * ordered extent, it is split across multiples.
268 void btrfs_add_ordered_sum(struct inode
*inode
,
269 struct btrfs_ordered_extent
*entry
,
270 struct btrfs_ordered_sum
*sum
)
272 struct btrfs_ordered_inode_tree
*tree
;
274 tree
= &BTRFS_I(inode
)->ordered_tree
;
275 spin_lock_irq(&tree
->lock
);
276 list_add_tail(&sum
->list
, &entry
->list
);
277 WARN_ON(entry
->csum_bytes_left
< sum
->len
);
278 entry
->csum_bytes_left
-= sum
->len
;
279 if (entry
->csum_bytes_left
== 0)
280 wake_up(&entry
->wait
);
281 spin_unlock_irq(&tree
->lock
);
285 * this is used to account for finished IO across a given range
286 * of the file. The IO may span ordered extents. If
287 * a given ordered_extent is completely done, 1 is returned, otherwise
290 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
291 * to make sure this function only returns 1 once for a given ordered extent.
293 * file_offset is updated to one byte past the range that is recorded as
294 * complete. This allows you to walk forward in the file.
296 int btrfs_dec_test_first_ordered_pending(struct inode
*inode
,
297 struct btrfs_ordered_extent
**cached
,
298 u64
*file_offset
, u64 io_size
, int uptodate
)
300 struct btrfs_ordered_inode_tree
*tree
;
301 struct rb_node
*node
;
302 struct btrfs_ordered_extent
*entry
= NULL
;
309 tree
= &BTRFS_I(inode
)->ordered_tree
;
310 spin_lock_irqsave(&tree
->lock
, flags
);
311 node
= tree_search(tree
, *file_offset
);
317 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
318 if (!offset_in_entry(entry
, *file_offset
)) {
323 dec_start
= max(*file_offset
, entry
->file_offset
);
324 dec_end
= min(*file_offset
+ io_size
, entry
->file_offset
+
326 *file_offset
= dec_end
;
327 if (dec_start
> dec_end
) {
328 printk(KERN_CRIT
"bad ordering dec_start %llu end %llu\n",
329 (unsigned long long)dec_start
,
330 (unsigned long long)dec_end
);
332 to_dec
= dec_end
- dec_start
;
333 if (to_dec
> entry
->bytes_left
) {
334 printk(KERN_CRIT
"bad ordered accounting left %llu size %llu\n",
335 (unsigned long long)entry
->bytes_left
,
336 (unsigned long long)to_dec
);
338 entry
->bytes_left
-= to_dec
;
340 set_bit(BTRFS_ORDERED_IOERR
, &entry
->flags
);
342 if (entry
->bytes_left
== 0)
343 ret
= test_and_set_bit(BTRFS_ORDERED_IO_DONE
, &entry
->flags
);
347 if (!ret
&& cached
&& entry
) {
349 atomic_inc(&entry
->refs
);
351 spin_unlock_irqrestore(&tree
->lock
, flags
);
356 * this is used to account for finished IO across a given range
357 * of the file. The IO should not span ordered extents. If
358 * a given ordered_extent is completely done, 1 is returned, otherwise
361 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
362 * to make sure this function only returns 1 once for a given ordered extent.
364 int btrfs_dec_test_ordered_pending(struct inode
*inode
,
365 struct btrfs_ordered_extent
**cached
,
366 u64 file_offset
, u64 io_size
, int uptodate
)
368 struct btrfs_ordered_inode_tree
*tree
;
369 struct rb_node
*node
;
370 struct btrfs_ordered_extent
*entry
= NULL
;
374 tree
= &BTRFS_I(inode
)->ordered_tree
;
375 spin_lock_irqsave(&tree
->lock
, flags
);
376 if (cached
&& *cached
) {
381 node
= tree_search(tree
, file_offset
);
387 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
389 if (!offset_in_entry(entry
, file_offset
)) {
394 if (io_size
> entry
->bytes_left
) {
395 printk(KERN_CRIT
"bad ordered accounting left %llu size %llu\n",
396 (unsigned long long)entry
->bytes_left
,
397 (unsigned long long)io_size
);
399 entry
->bytes_left
-= io_size
;
401 set_bit(BTRFS_ORDERED_IOERR
, &entry
->flags
);
403 if (entry
->bytes_left
== 0)
404 ret
= test_and_set_bit(BTRFS_ORDERED_IO_DONE
, &entry
->flags
);
408 if (!ret
&& cached
&& entry
) {
410 atomic_inc(&entry
->refs
);
412 spin_unlock_irqrestore(&tree
->lock
, flags
);
416 /* Needs to either be called under a log transaction or the log_mutex */
417 void btrfs_get_logged_extents(struct btrfs_root
*log
, struct inode
*inode
)
419 struct btrfs_ordered_inode_tree
*tree
;
420 struct btrfs_ordered_extent
*ordered
;
422 int index
= log
->log_transid
% 2;
424 tree
= &BTRFS_I(inode
)->ordered_tree
;
425 spin_lock_irq(&tree
->lock
);
426 for (n
= rb_first(&tree
->tree
); n
; n
= rb_next(n
)) {
427 ordered
= rb_entry(n
, struct btrfs_ordered_extent
, rb_node
);
428 spin_lock(&log
->log_extents_lock
[index
]);
429 if (list_empty(&ordered
->log_list
)) {
430 list_add_tail(&ordered
->log_list
, &log
->logged_list
[index
]);
431 atomic_inc(&ordered
->refs
);
433 spin_unlock(&log
->log_extents_lock
[index
]);
435 spin_unlock_irq(&tree
->lock
);
438 void btrfs_wait_logged_extents(struct btrfs_root
*log
, u64 transid
)
440 struct btrfs_ordered_extent
*ordered
;
441 int index
= transid
% 2;
443 spin_lock_irq(&log
->log_extents_lock
[index
]);
444 while (!list_empty(&log
->logged_list
[index
])) {
445 ordered
= list_first_entry(&log
->logged_list
[index
],
446 struct btrfs_ordered_extent
,
448 list_del_init(&ordered
->log_list
);
449 spin_unlock_irq(&log
->log_extents_lock
[index
]);
450 wait_event(ordered
->wait
, test_bit(BTRFS_ORDERED_IO_DONE
,
452 btrfs_put_ordered_extent(ordered
);
453 spin_lock_irq(&log
->log_extents_lock
[index
]);
455 spin_unlock_irq(&log
->log_extents_lock
[index
]);
458 void btrfs_free_logged_extents(struct btrfs_root
*log
, u64 transid
)
460 struct btrfs_ordered_extent
*ordered
;
461 int index
= transid
% 2;
463 spin_lock_irq(&log
->log_extents_lock
[index
]);
464 while (!list_empty(&log
->logged_list
[index
])) {
465 ordered
= list_first_entry(&log
->logged_list
[index
],
466 struct btrfs_ordered_extent
,
468 list_del_init(&ordered
->log_list
);
469 spin_unlock_irq(&log
->log_extents_lock
[index
]);
470 btrfs_put_ordered_extent(ordered
);
471 spin_lock_irq(&log
->log_extents_lock
[index
]);
473 spin_unlock_irq(&log
->log_extents_lock
[index
]);
477 * used to drop a reference on an ordered extent. This will free
478 * the extent if the last reference is dropped
480 void btrfs_put_ordered_extent(struct btrfs_ordered_extent
*entry
)
482 struct list_head
*cur
;
483 struct btrfs_ordered_sum
*sum
;
485 trace_btrfs_ordered_extent_put(entry
->inode
, entry
);
487 if (atomic_dec_and_test(&entry
->refs
)) {
489 btrfs_add_delayed_iput(entry
->inode
);
490 while (!list_empty(&entry
->list
)) {
491 cur
= entry
->list
.next
;
492 sum
= list_entry(cur
, struct btrfs_ordered_sum
, list
);
493 list_del(&sum
->list
);
496 kmem_cache_free(btrfs_ordered_extent_cache
, entry
);
501 * remove an ordered extent from the tree. No references are dropped
502 * and waiters are woken up.
504 void btrfs_remove_ordered_extent(struct inode
*inode
,
505 struct btrfs_ordered_extent
*entry
)
507 struct btrfs_ordered_inode_tree
*tree
;
508 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
509 struct rb_node
*node
;
511 tree
= &BTRFS_I(inode
)->ordered_tree
;
512 spin_lock_irq(&tree
->lock
);
513 node
= &entry
->rb_node
;
514 rb_erase(node
, &tree
->tree
);
516 set_bit(BTRFS_ORDERED_COMPLETE
, &entry
->flags
);
517 spin_unlock_irq(&tree
->lock
);
519 spin_lock(&root
->fs_info
->ordered_extent_lock
);
520 list_del_init(&entry
->root_extent_list
);
522 trace_btrfs_ordered_extent_remove(inode
, entry
);
525 * we have no more ordered extents for this inode and
526 * no dirty pages. We can safely remove it from the
527 * list of ordered extents
529 if (RB_EMPTY_ROOT(&tree
->tree
) &&
530 !mapping_tagged(inode
->i_mapping
, PAGECACHE_TAG_DIRTY
)) {
531 list_del_init(&BTRFS_I(inode
)->ordered_operations
);
533 spin_unlock(&root
->fs_info
->ordered_extent_lock
);
534 wake_up(&entry
->wait
);
537 static void btrfs_run_ordered_extent_work(struct btrfs_work
*work
)
539 struct btrfs_ordered_extent
*ordered
;
541 ordered
= container_of(work
, struct btrfs_ordered_extent
, flush_work
);
542 btrfs_start_ordered_extent(ordered
->inode
, ordered
, 1);
543 complete(&ordered
->completion
);
547 * wait for all the ordered extents in a root. This is done when balancing
548 * space between drives.
550 void btrfs_wait_ordered_extents(struct btrfs_root
*root
, int delay_iput
)
552 struct list_head splice
, works
;
553 struct list_head
*cur
;
554 struct btrfs_ordered_extent
*ordered
, *next
;
557 INIT_LIST_HEAD(&splice
);
558 INIT_LIST_HEAD(&works
);
560 mutex_lock(&root
->fs_info
->ordered_operations_mutex
);
561 spin_lock(&root
->fs_info
->ordered_extent_lock
);
562 list_splice_init(&root
->fs_info
->ordered_extents
, &splice
);
563 while (!list_empty(&splice
)) {
565 ordered
= list_entry(cur
, struct btrfs_ordered_extent
,
567 list_del_init(&ordered
->root_extent_list
);
568 atomic_inc(&ordered
->refs
);
571 * the inode may be getting freed (in sys_unlink path).
573 inode
= igrab(ordered
->inode
);
575 spin_unlock(&root
->fs_info
->ordered_extent_lock
);
578 ordered
->flush_work
.func
= btrfs_run_ordered_extent_work
;
579 list_add_tail(&ordered
->work_list
, &works
);
580 btrfs_queue_worker(&root
->fs_info
->flush_workers
,
581 &ordered
->flush_work
);
583 btrfs_put_ordered_extent(ordered
);
587 spin_lock(&root
->fs_info
->ordered_extent_lock
);
589 spin_unlock(&root
->fs_info
->ordered_extent_lock
);
591 list_for_each_entry_safe(ordered
, next
, &works
, work_list
) {
592 list_del_init(&ordered
->work_list
);
593 wait_for_completion(&ordered
->completion
);
595 inode
= ordered
->inode
;
596 btrfs_put_ordered_extent(ordered
);
598 btrfs_add_delayed_iput(inode
);
604 mutex_unlock(&root
->fs_info
->ordered_operations_mutex
);
608 * this is used during transaction commit to write all the inodes
609 * added to the ordered operation list. These files must be fully on
610 * disk before the transaction commits.
612 * we have two modes here, one is to just start the IO via filemap_flush
613 * and the other is to wait for all the io. When we wait, we have an
614 * extra check to make sure the ordered operation list really is empty
617 int btrfs_run_ordered_operations(struct btrfs_trans_handle
*trans
,
618 struct btrfs_root
*root
, int wait
)
620 struct btrfs_inode
*btrfs_inode
;
622 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
623 struct list_head splice
;
624 struct list_head works
;
625 struct btrfs_delalloc_work
*work
, *next
;
628 INIT_LIST_HEAD(&splice
);
629 INIT_LIST_HEAD(&works
);
631 mutex_lock(&root
->fs_info
->ordered_operations_mutex
);
632 spin_lock(&root
->fs_info
->ordered_extent_lock
);
633 list_splice_init(&cur_trans
->ordered_operations
, &splice
);
634 while (!list_empty(&splice
)) {
635 btrfs_inode
= list_entry(splice
.next
, struct btrfs_inode
,
637 inode
= &btrfs_inode
->vfs_inode
;
639 list_del_init(&btrfs_inode
->ordered_operations
);
642 * the inode may be getting freed (in sys_unlink path).
644 inode
= igrab(inode
);
649 list_add_tail(&BTRFS_I(inode
)->ordered_operations
,
650 &cur_trans
->ordered_operations
);
651 spin_unlock(&root
->fs_info
->ordered_extent_lock
);
653 work
= btrfs_alloc_delalloc_work(inode
, wait
, 1);
655 spin_lock(&root
->fs_info
->ordered_extent_lock
);
656 if (list_empty(&BTRFS_I(inode
)->ordered_operations
))
657 list_add_tail(&btrfs_inode
->ordered_operations
,
659 list_splice_tail(&splice
,
660 &cur_trans
->ordered_operations
);
661 spin_unlock(&root
->fs_info
->ordered_extent_lock
);
665 list_add_tail(&work
->list
, &works
);
666 btrfs_queue_worker(&root
->fs_info
->flush_workers
,
670 spin_lock(&root
->fs_info
->ordered_extent_lock
);
672 spin_unlock(&root
->fs_info
->ordered_extent_lock
);
674 list_for_each_entry_safe(work
, next
, &works
, list
) {
675 list_del_init(&work
->list
);
676 btrfs_wait_and_free_delalloc_work(work
);
678 mutex_unlock(&root
->fs_info
->ordered_operations_mutex
);
683 * Used to start IO or wait for a given ordered extent to finish.
685 * If wait is one, this effectively waits on page writeback for all the pages
686 * in the extent, and it waits on the io completion code to insert
687 * metadata into the btree corresponding to the extent
689 void btrfs_start_ordered_extent(struct inode
*inode
,
690 struct btrfs_ordered_extent
*entry
,
693 u64 start
= entry
->file_offset
;
694 u64 end
= start
+ entry
->len
- 1;
696 trace_btrfs_ordered_extent_start(inode
, entry
);
699 * pages in the range can be dirty, clean or writeback. We
700 * start IO on any dirty ones so the wait doesn't stall waiting
701 * for the flusher thread to find them
703 if (!test_bit(BTRFS_ORDERED_DIRECT
, &entry
->flags
))
704 filemap_fdatawrite_range(inode
->i_mapping
, start
, end
);
706 wait_event(entry
->wait
, test_bit(BTRFS_ORDERED_COMPLETE
,
712 * Used to wait on ordered extents across a large range of bytes.
714 void btrfs_wait_ordered_range(struct inode
*inode
, u64 start
, u64 len
)
718 struct btrfs_ordered_extent
*ordered
;
720 if (start
+ len
< start
) {
721 orig_end
= INT_LIMIT(loff_t
);
723 orig_end
= start
+ len
- 1;
724 if (orig_end
> INT_LIMIT(loff_t
))
725 orig_end
= INT_LIMIT(loff_t
);
728 /* start IO across the range first to instantiate any delalloc
731 filemap_fdatawrite_range(inode
->i_mapping
, start
, orig_end
);
734 * So with compression we will find and lock a dirty page and clear the
735 * first one as dirty, setup an async extent, and immediately return
736 * with the entire range locked but with nobody actually marked with
737 * writeback. So we can't just filemap_write_and_wait_range() and
738 * expect it to work since it will just kick off a thread to do the
739 * actual work. So we need to call filemap_fdatawrite_range _again_
740 * since it will wait on the page lock, which won't be unlocked until
741 * after the pages have been marked as writeback and so we're good to go
742 * from there. We have to do this otherwise we'll miss the ordered
743 * extents and that results in badness. Please Josef, do not think you
744 * know better and pull this out at some point in the future, it is
745 * right and you are wrong.
747 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT
,
748 &BTRFS_I(inode
)->runtime_flags
))
749 filemap_fdatawrite_range(inode
->i_mapping
, start
, orig_end
);
751 filemap_fdatawait_range(inode
->i_mapping
, start
, orig_end
);
755 ordered
= btrfs_lookup_first_ordered_extent(inode
, end
);
758 if (ordered
->file_offset
> orig_end
) {
759 btrfs_put_ordered_extent(ordered
);
762 if (ordered
->file_offset
+ ordered
->len
< start
) {
763 btrfs_put_ordered_extent(ordered
);
766 btrfs_start_ordered_extent(inode
, ordered
, 1);
767 end
= ordered
->file_offset
;
768 btrfs_put_ordered_extent(ordered
);
769 if (end
== 0 || end
== start
)
776 * find an ordered extent corresponding to file_offset. return NULL if
777 * nothing is found, otherwise take a reference on the extent and return it
779 struct btrfs_ordered_extent
*btrfs_lookup_ordered_extent(struct inode
*inode
,
782 struct btrfs_ordered_inode_tree
*tree
;
783 struct rb_node
*node
;
784 struct btrfs_ordered_extent
*entry
= NULL
;
786 tree
= &BTRFS_I(inode
)->ordered_tree
;
787 spin_lock_irq(&tree
->lock
);
788 node
= tree_search(tree
, file_offset
);
792 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
793 if (!offset_in_entry(entry
, file_offset
))
796 atomic_inc(&entry
->refs
);
798 spin_unlock_irq(&tree
->lock
);
802 /* Since the DIO code tries to lock a wide area we need to look for any ordered
803 * extents that exist in the range, rather than just the start of the range.
805 struct btrfs_ordered_extent
*btrfs_lookup_ordered_range(struct inode
*inode
,
809 struct btrfs_ordered_inode_tree
*tree
;
810 struct rb_node
*node
;
811 struct btrfs_ordered_extent
*entry
= NULL
;
813 tree
= &BTRFS_I(inode
)->ordered_tree
;
814 spin_lock_irq(&tree
->lock
);
815 node
= tree_search(tree
, file_offset
);
817 node
= tree_search(tree
, file_offset
+ len
);
823 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
824 if (range_overlaps(entry
, file_offset
, len
))
827 if (entry
->file_offset
>= file_offset
+ len
) {
832 node
= rb_next(node
);
838 atomic_inc(&entry
->refs
);
839 spin_unlock_irq(&tree
->lock
);
844 * lookup and return any extent before 'file_offset'. NULL is returned
847 struct btrfs_ordered_extent
*
848 btrfs_lookup_first_ordered_extent(struct inode
*inode
, u64 file_offset
)
850 struct btrfs_ordered_inode_tree
*tree
;
851 struct rb_node
*node
;
852 struct btrfs_ordered_extent
*entry
= NULL
;
854 tree
= &BTRFS_I(inode
)->ordered_tree
;
855 spin_lock_irq(&tree
->lock
);
856 node
= tree_search(tree
, file_offset
);
860 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
861 atomic_inc(&entry
->refs
);
863 spin_unlock_irq(&tree
->lock
);
868 * After an extent is done, call this to conditionally update the on disk
869 * i_size. i_size is updated to cover any fully written part of the file.
871 int btrfs_ordered_update_i_size(struct inode
*inode
, u64 offset
,
872 struct btrfs_ordered_extent
*ordered
)
874 struct btrfs_ordered_inode_tree
*tree
= &BTRFS_I(inode
)->ordered_tree
;
877 u64 i_size
= i_size_read(inode
);
878 struct rb_node
*node
;
879 struct rb_node
*prev
= NULL
;
880 struct btrfs_ordered_extent
*test
;
884 offset
= entry_end(ordered
);
886 offset
= ALIGN(offset
, BTRFS_I(inode
)->root
->sectorsize
);
888 spin_lock_irq(&tree
->lock
);
889 disk_i_size
= BTRFS_I(inode
)->disk_i_size
;
892 if (disk_i_size
> i_size
) {
893 BTRFS_I(inode
)->disk_i_size
= i_size
;
899 * if the disk i_size is already at the inode->i_size, or
900 * this ordered extent is inside the disk i_size, we're done
902 if (disk_i_size
== i_size
)
906 * We still need to update disk_i_size if outstanding_isize is greater
909 if (offset
<= disk_i_size
&&
910 (!ordered
|| ordered
->outstanding_isize
<= disk_i_size
))
914 * walk backward from this ordered extent to disk_i_size.
915 * if we find an ordered extent then we can't update disk i_size
919 node
= rb_prev(&ordered
->rb_node
);
921 prev
= tree_search(tree
, offset
);
923 * we insert file extents without involving ordered struct,
924 * so there should be no ordered struct cover this offset
927 test
= rb_entry(prev
, struct btrfs_ordered_extent
,
929 BUG_ON(offset_in_entry(test
, offset
));
933 for (; node
; node
= rb_prev(node
)) {
934 test
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
936 /* We treat this entry as if it doesnt exist */
937 if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE
, &test
->flags
))
939 if (test
->file_offset
+ test
->len
<= disk_i_size
)
941 if (test
->file_offset
>= i_size
)
943 if (entry_end(test
) > disk_i_size
) {
945 * we don't update disk_i_size now, so record this
946 * undealt i_size. Or we will not know the real
949 if (test
->outstanding_isize
< offset
)
950 test
->outstanding_isize
= offset
;
952 ordered
->outstanding_isize
>
953 test
->outstanding_isize
)
954 test
->outstanding_isize
=
955 ordered
->outstanding_isize
;
959 new_i_size
= min_t(u64
, offset
, i_size
);
962 * Some ordered extents may completed before the current one, and
963 * we hold the real i_size in ->outstanding_isize.
965 if (ordered
&& ordered
->outstanding_isize
> new_i_size
)
966 new_i_size
= min_t(u64
, ordered
->outstanding_isize
, i_size
);
967 BTRFS_I(inode
)->disk_i_size
= new_i_size
;
971 * We need to do this because we can't remove ordered extents until
972 * after the i_disk_size has been updated and then the inode has been
973 * updated to reflect the change, so we need to tell anybody who finds
974 * this ordered extent that we've already done all the real work, we
975 * just haven't completed all the other work.
978 set_bit(BTRFS_ORDERED_UPDATED_ISIZE
, &ordered
->flags
);
979 spin_unlock_irq(&tree
->lock
);
984 * search the ordered extents for one corresponding to 'offset' and
985 * try to find a checksum. This is used because we allow pages to
986 * be reclaimed before their checksum is actually put into the btree
988 int btrfs_find_ordered_sum(struct inode
*inode
, u64 offset
, u64 disk_bytenr
,
991 struct btrfs_ordered_sum
*ordered_sum
;
992 struct btrfs_sector_sum
*sector_sums
;
993 struct btrfs_ordered_extent
*ordered
;
994 struct btrfs_ordered_inode_tree
*tree
= &BTRFS_I(inode
)->ordered_tree
;
995 unsigned long num_sectors
;
997 u32 sectorsize
= BTRFS_I(inode
)->root
->sectorsize
;
1000 ordered
= btrfs_lookup_ordered_extent(inode
, offset
);
1004 spin_lock_irq(&tree
->lock
);
1005 list_for_each_entry_reverse(ordered_sum
, &ordered
->list
, list
) {
1006 if (disk_bytenr
>= ordered_sum
->bytenr
) {
1007 num_sectors
= ordered_sum
->len
/ sectorsize
;
1008 sector_sums
= ordered_sum
->sums
;
1009 for (i
= 0; i
< num_sectors
; i
++) {
1010 if (sector_sums
[i
].bytenr
== disk_bytenr
) {
1011 *sum
= sector_sums
[i
].sum
;
1019 spin_unlock_irq(&tree
->lock
);
1020 btrfs_put_ordered_extent(ordered
);
1026 * add a given inode to the list of inodes that must be fully on
1027 * disk before a transaction commit finishes.
1029 * This basically gives us the ext3 style data=ordered mode, and it is mostly
1030 * used to make sure renamed files are fully on disk.
1032 * It is a noop if the inode is already fully on disk.
1034 * If trans is not null, we'll do a friendly check for a transaction that
1035 * is already flushing things and force the IO down ourselves.
1037 void btrfs_add_ordered_operation(struct btrfs_trans_handle
*trans
,
1038 struct btrfs_root
*root
, struct inode
*inode
)
1040 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
1043 last_mod
= max(BTRFS_I(inode
)->generation
, BTRFS_I(inode
)->last_trans
);
1046 * if this file hasn't been changed since the last transaction
1047 * commit, we can safely return without doing anything
1049 if (last_mod
< root
->fs_info
->last_trans_committed
)
1052 spin_lock(&root
->fs_info
->ordered_extent_lock
);
1053 if (list_empty(&BTRFS_I(inode
)->ordered_operations
)) {
1054 list_add_tail(&BTRFS_I(inode
)->ordered_operations
,
1055 &cur_trans
->ordered_operations
);
1057 spin_unlock(&root
->fs_info
->ordered_extent_lock
);
1060 int __init
ordered_data_init(void)
1062 btrfs_ordered_extent_cache
= kmem_cache_create("btrfs_ordered_extent",
1063 sizeof(struct btrfs_ordered_extent
), 0,
1064 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
,
1066 if (!btrfs_ordered_extent_cache
)
1072 void ordered_data_exit(void)
1074 if (btrfs_ordered_extent_cache
)
1075 kmem_cache_destroy(btrfs_ordered_extent_cache
);