2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/slab.h>
20 #include <linux/blkdev.h>
21 #include <linux/writeback.h>
22 #include <linux/pagevec.h>
24 #include "transaction.h"
25 #include "btrfs_inode.h"
26 #include "extent_io.h"
29 static struct kmem_cache
*btrfs_ordered_extent_cache
;
31 static u64
entry_end(struct btrfs_ordered_extent
*entry
)
33 if (entry
->file_offset
+ entry
->len
< entry
->file_offset
)
35 return entry
->file_offset
+ entry
->len
;
38 /* returns NULL if the insertion worked, or it returns the node it did find
41 static struct rb_node
*tree_insert(struct rb_root
*root
, u64 file_offset
,
44 struct rb_node
**p
= &root
->rb_node
;
45 struct rb_node
*parent
= NULL
;
46 struct btrfs_ordered_extent
*entry
;
50 entry
= rb_entry(parent
, struct btrfs_ordered_extent
, rb_node
);
52 if (file_offset
< entry
->file_offset
)
54 else if (file_offset
>= entry_end(entry
))
60 rb_link_node(node
, parent
, p
);
61 rb_insert_color(node
, root
);
65 static void ordered_data_tree_panic(struct inode
*inode
, int errno
,
68 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
69 btrfs_panic(fs_info
, errno
, "Inconsistency in ordered tree at offset "
74 * look for a given offset in the tree, and if it can't be found return the
77 static struct rb_node
*__tree_search(struct rb_root
*root
, u64 file_offset
,
78 struct rb_node
**prev_ret
)
80 struct rb_node
*n
= root
->rb_node
;
81 struct rb_node
*prev
= NULL
;
83 struct btrfs_ordered_extent
*entry
;
84 struct btrfs_ordered_extent
*prev_entry
= NULL
;
87 entry
= rb_entry(n
, struct btrfs_ordered_extent
, rb_node
);
91 if (file_offset
< entry
->file_offset
)
93 else if (file_offset
>= entry_end(entry
))
101 while (prev
&& file_offset
>= entry_end(prev_entry
)) {
102 test
= rb_next(prev
);
105 prev_entry
= rb_entry(test
, struct btrfs_ordered_extent
,
107 if (file_offset
< entry_end(prev_entry
))
113 prev_entry
= rb_entry(prev
, struct btrfs_ordered_extent
,
115 while (prev
&& file_offset
< entry_end(prev_entry
)) {
116 test
= rb_prev(prev
);
119 prev_entry
= rb_entry(test
, struct btrfs_ordered_extent
,
128 * helper to check if a given offset is inside a given entry
130 static int offset_in_entry(struct btrfs_ordered_extent
*entry
, u64 file_offset
)
132 if (file_offset
< entry
->file_offset
||
133 entry
->file_offset
+ entry
->len
<= file_offset
)
138 static int range_overlaps(struct btrfs_ordered_extent
*entry
, u64 file_offset
,
141 if (file_offset
+ len
<= entry
->file_offset
||
142 entry
->file_offset
+ entry
->len
<= file_offset
)
148 * look find the first ordered struct that has this offset, otherwise
149 * the first one less than this offset
151 static inline struct rb_node
*tree_search(struct btrfs_ordered_inode_tree
*tree
,
154 struct rb_root
*root
= &tree
->tree
;
155 struct rb_node
*prev
= NULL
;
157 struct btrfs_ordered_extent
*entry
;
160 entry
= rb_entry(tree
->last
, struct btrfs_ordered_extent
,
162 if (offset_in_entry(entry
, file_offset
))
165 ret
= __tree_search(root
, file_offset
, &prev
);
173 /* allocate and add a new ordered_extent into the per-inode tree.
174 * file_offset is the logical offset in the file
176 * start is the disk block number of an extent already reserved in the
177 * extent allocation tree
179 * len is the length of the extent
181 * The tree is given a single reference on the ordered extent that was
184 static int __btrfs_add_ordered_extent(struct inode
*inode
, u64 file_offset
,
185 u64 start
, u64 len
, u64 disk_len
,
186 int type
, int dio
, int compress_type
)
188 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
189 struct btrfs_ordered_inode_tree
*tree
;
190 struct rb_node
*node
;
191 struct btrfs_ordered_extent
*entry
;
193 tree
= &BTRFS_I(inode
)->ordered_tree
;
194 entry
= kmem_cache_zalloc(btrfs_ordered_extent_cache
, GFP_NOFS
);
198 entry
->file_offset
= file_offset
;
199 entry
->start
= start
;
201 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
) &&
202 !(type
== BTRFS_ORDERED_NOCOW
))
203 entry
->csum_bytes_left
= disk_len
;
204 entry
->disk_len
= disk_len
;
205 entry
->bytes_left
= len
;
206 entry
->inode
= igrab(inode
);
207 entry
->compress_type
= compress_type
;
208 entry
->truncated_len
= (u64
)-1;
209 if (type
!= BTRFS_ORDERED_IO_DONE
&& type
!= BTRFS_ORDERED_COMPLETE
)
210 set_bit(type
, &entry
->flags
);
213 set_bit(BTRFS_ORDERED_DIRECT
, &entry
->flags
);
215 /* one ref for the tree */
216 atomic_set(&entry
->refs
, 1);
217 init_waitqueue_head(&entry
->wait
);
218 INIT_LIST_HEAD(&entry
->list
);
219 INIT_LIST_HEAD(&entry
->root_extent_list
);
220 INIT_LIST_HEAD(&entry
->work_list
);
221 init_completion(&entry
->completion
);
222 INIT_LIST_HEAD(&entry
->log_list
);
224 trace_btrfs_ordered_extent_add(inode
, entry
);
226 spin_lock_irq(&tree
->lock
);
227 node
= tree_insert(&tree
->tree
, file_offset
,
230 ordered_data_tree_panic(inode
, -EEXIST
, file_offset
);
231 spin_unlock_irq(&tree
->lock
);
233 spin_lock(&root
->ordered_extent_lock
);
234 list_add_tail(&entry
->root_extent_list
,
235 &root
->ordered_extents
);
236 root
->nr_ordered_extents
++;
237 if (root
->nr_ordered_extents
== 1) {
238 spin_lock(&root
->fs_info
->ordered_root_lock
);
239 BUG_ON(!list_empty(&root
->ordered_root
));
240 list_add_tail(&root
->ordered_root
,
241 &root
->fs_info
->ordered_roots
);
242 spin_unlock(&root
->fs_info
->ordered_root_lock
);
244 spin_unlock(&root
->ordered_extent_lock
);
249 int btrfs_add_ordered_extent(struct inode
*inode
, u64 file_offset
,
250 u64 start
, u64 len
, u64 disk_len
, int type
)
252 return __btrfs_add_ordered_extent(inode
, file_offset
, start
, len
,
254 BTRFS_COMPRESS_NONE
);
257 int btrfs_add_ordered_extent_dio(struct inode
*inode
, u64 file_offset
,
258 u64 start
, u64 len
, u64 disk_len
, int type
)
260 return __btrfs_add_ordered_extent(inode
, file_offset
, start
, len
,
262 BTRFS_COMPRESS_NONE
);
265 int btrfs_add_ordered_extent_compress(struct inode
*inode
, u64 file_offset
,
266 u64 start
, u64 len
, u64 disk_len
,
267 int type
, int compress_type
)
269 return __btrfs_add_ordered_extent(inode
, file_offset
, start
, len
,
275 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
276 * when an ordered extent is finished. If the list covers more than one
277 * ordered extent, it is split across multiples.
279 void btrfs_add_ordered_sum(struct inode
*inode
,
280 struct btrfs_ordered_extent
*entry
,
281 struct btrfs_ordered_sum
*sum
)
283 struct btrfs_ordered_inode_tree
*tree
;
285 tree
= &BTRFS_I(inode
)->ordered_tree
;
286 spin_lock_irq(&tree
->lock
);
287 list_add_tail(&sum
->list
, &entry
->list
);
288 WARN_ON(entry
->csum_bytes_left
< sum
->len
);
289 entry
->csum_bytes_left
-= sum
->len
;
290 if (entry
->csum_bytes_left
== 0)
291 wake_up(&entry
->wait
);
292 spin_unlock_irq(&tree
->lock
);
296 * this is used to account for finished IO across a given range
297 * of the file. The IO may span ordered extents. If
298 * a given ordered_extent is completely done, 1 is returned, otherwise
301 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
302 * to make sure this function only returns 1 once for a given ordered extent.
304 * file_offset is updated to one byte past the range that is recorded as
305 * complete. This allows you to walk forward in the file.
307 int btrfs_dec_test_first_ordered_pending(struct inode
*inode
,
308 struct btrfs_ordered_extent
**cached
,
309 u64
*file_offset
, u64 io_size
, int uptodate
)
311 struct btrfs_ordered_inode_tree
*tree
;
312 struct rb_node
*node
;
313 struct btrfs_ordered_extent
*entry
= NULL
;
320 tree
= &BTRFS_I(inode
)->ordered_tree
;
321 spin_lock_irqsave(&tree
->lock
, flags
);
322 node
= tree_search(tree
, *file_offset
);
328 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
329 if (!offset_in_entry(entry
, *file_offset
)) {
334 dec_start
= max(*file_offset
, entry
->file_offset
);
335 dec_end
= min(*file_offset
+ io_size
, entry
->file_offset
+
337 *file_offset
= dec_end
;
338 if (dec_start
> dec_end
) {
339 btrfs_crit(BTRFS_I(inode
)->root
->fs_info
,
340 "bad ordering dec_start %llu end %llu", dec_start
, dec_end
);
342 to_dec
= dec_end
- dec_start
;
343 if (to_dec
> entry
->bytes_left
) {
344 btrfs_crit(BTRFS_I(inode
)->root
->fs_info
,
345 "bad ordered accounting left %llu size %llu",
346 entry
->bytes_left
, to_dec
);
348 entry
->bytes_left
-= to_dec
;
350 set_bit(BTRFS_ORDERED_IOERR
, &entry
->flags
);
352 if (entry
->bytes_left
== 0) {
353 ret
= test_and_set_bit(BTRFS_ORDERED_IO_DONE
, &entry
->flags
);
354 if (waitqueue_active(&entry
->wait
))
355 wake_up(&entry
->wait
);
360 if (!ret
&& cached
&& entry
) {
362 atomic_inc(&entry
->refs
);
364 spin_unlock_irqrestore(&tree
->lock
, flags
);
369 * this is used to account for finished IO across a given range
370 * of the file. The IO should not span ordered extents. If
371 * a given ordered_extent is completely done, 1 is returned, otherwise
374 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
375 * to make sure this function only returns 1 once for a given ordered extent.
377 int btrfs_dec_test_ordered_pending(struct inode
*inode
,
378 struct btrfs_ordered_extent
**cached
,
379 u64 file_offset
, u64 io_size
, int uptodate
)
381 struct btrfs_ordered_inode_tree
*tree
;
382 struct rb_node
*node
;
383 struct btrfs_ordered_extent
*entry
= NULL
;
387 tree
= &BTRFS_I(inode
)->ordered_tree
;
388 spin_lock_irqsave(&tree
->lock
, flags
);
389 if (cached
&& *cached
) {
394 node
= tree_search(tree
, file_offset
);
400 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
402 if (!offset_in_entry(entry
, file_offset
)) {
407 if (io_size
> entry
->bytes_left
) {
408 btrfs_crit(BTRFS_I(inode
)->root
->fs_info
,
409 "bad ordered accounting left %llu size %llu",
410 entry
->bytes_left
, io_size
);
412 entry
->bytes_left
-= io_size
;
414 set_bit(BTRFS_ORDERED_IOERR
, &entry
->flags
);
416 if (entry
->bytes_left
== 0) {
417 ret
= test_and_set_bit(BTRFS_ORDERED_IO_DONE
, &entry
->flags
);
418 if (waitqueue_active(&entry
->wait
))
419 wake_up(&entry
->wait
);
424 if (!ret
&& cached
&& entry
) {
426 atomic_inc(&entry
->refs
);
428 spin_unlock_irqrestore(&tree
->lock
, flags
);
432 /* Needs to either be called under a log transaction or the log_mutex */
433 void btrfs_get_logged_extents(struct inode
*inode
,
434 struct list_head
*logged_list
)
436 struct btrfs_ordered_inode_tree
*tree
;
437 struct btrfs_ordered_extent
*ordered
;
440 tree
= &BTRFS_I(inode
)->ordered_tree
;
441 spin_lock_irq(&tree
->lock
);
442 for (n
= rb_first(&tree
->tree
); n
; n
= rb_next(n
)) {
443 ordered
= rb_entry(n
, struct btrfs_ordered_extent
, rb_node
);
444 if (!list_empty(&ordered
->log_list
))
446 list_add_tail(&ordered
->log_list
, logged_list
);
447 atomic_inc(&ordered
->refs
);
449 spin_unlock_irq(&tree
->lock
);
452 void btrfs_put_logged_extents(struct list_head
*logged_list
)
454 struct btrfs_ordered_extent
*ordered
;
456 while (!list_empty(logged_list
)) {
457 ordered
= list_first_entry(logged_list
,
458 struct btrfs_ordered_extent
,
460 list_del_init(&ordered
->log_list
);
461 btrfs_put_ordered_extent(ordered
);
465 void btrfs_submit_logged_extents(struct list_head
*logged_list
,
466 struct btrfs_root
*log
)
468 int index
= log
->log_transid
% 2;
470 spin_lock_irq(&log
->log_extents_lock
[index
]);
471 list_splice_tail(logged_list
, &log
->logged_list
[index
]);
472 spin_unlock_irq(&log
->log_extents_lock
[index
]);
475 void btrfs_wait_logged_extents(struct btrfs_root
*log
, u64 transid
)
477 struct btrfs_ordered_extent
*ordered
;
478 int index
= transid
% 2;
480 spin_lock_irq(&log
->log_extents_lock
[index
]);
481 while (!list_empty(&log
->logged_list
[index
])) {
482 ordered
= list_first_entry(&log
->logged_list
[index
],
483 struct btrfs_ordered_extent
,
485 list_del_init(&ordered
->log_list
);
486 spin_unlock_irq(&log
->log_extents_lock
[index
]);
487 wait_event(ordered
->wait
, test_bit(BTRFS_ORDERED_IO_DONE
,
489 btrfs_put_ordered_extent(ordered
);
490 spin_lock_irq(&log
->log_extents_lock
[index
]);
492 spin_unlock_irq(&log
->log_extents_lock
[index
]);
495 void btrfs_free_logged_extents(struct btrfs_root
*log
, u64 transid
)
497 struct btrfs_ordered_extent
*ordered
;
498 int index
= transid
% 2;
500 spin_lock_irq(&log
->log_extents_lock
[index
]);
501 while (!list_empty(&log
->logged_list
[index
])) {
502 ordered
= list_first_entry(&log
->logged_list
[index
],
503 struct btrfs_ordered_extent
,
505 list_del_init(&ordered
->log_list
);
506 spin_unlock_irq(&log
->log_extents_lock
[index
]);
507 btrfs_put_ordered_extent(ordered
);
508 spin_lock_irq(&log
->log_extents_lock
[index
]);
510 spin_unlock_irq(&log
->log_extents_lock
[index
]);
514 * used to drop a reference on an ordered extent. This will free
515 * the extent if the last reference is dropped
517 void btrfs_put_ordered_extent(struct btrfs_ordered_extent
*entry
)
519 struct list_head
*cur
;
520 struct btrfs_ordered_sum
*sum
;
522 trace_btrfs_ordered_extent_put(entry
->inode
, entry
);
524 if (atomic_dec_and_test(&entry
->refs
)) {
526 btrfs_add_delayed_iput(entry
->inode
);
527 while (!list_empty(&entry
->list
)) {
528 cur
= entry
->list
.next
;
529 sum
= list_entry(cur
, struct btrfs_ordered_sum
, list
);
530 list_del(&sum
->list
);
533 kmem_cache_free(btrfs_ordered_extent_cache
, entry
);
538 * remove an ordered extent from the tree. No references are dropped
539 * and waiters are woken up.
541 void btrfs_remove_ordered_extent(struct inode
*inode
,
542 struct btrfs_ordered_extent
*entry
)
544 struct btrfs_ordered_inode_tree
*tree
;
545 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
546 struct rb_node
*node
;
548 tree
= &BTRFS_I(inode
)->ordered_tree
;
549 spin_lock_irq(&tree
->lock
);
550 node
= &entry
->rb_node
;
551 rb_erase(node
, &tree
->tree
);
552 if (tree
->last
== node
)
554 set_bit(BTRFS_ORDERED_COMPLETE
, &entry
->flags
);
555 spin_unlock_irq(&tree
->lock
);
557 spin_lock(&root
->ordered_extent_lock
);
558 list_del_init(&entry
->root_extent_list
);
559 root
->nr_ordered_extents
--;
561 trace_btrfs_ordered_extent_remove(inode
, entry
);
564 * we have no more ordered extents for this inode and
565 * no dirty pages. We can safely remove it from the
566 * list of ordered extents
568 if (RB_EMPTY_ROOT(&tree
->tree
) &&
569 !mapping_tagged(inode
->i_mapping
, PAGECACHE_TAG_DIRTY
)) {
570 spin_lock(&root
->fs_info
->ordered_root_lock
);
571 list_del_init(&BTRFS_I(inode
)->ordered_operations
);
572 spin_unlock(&root
->fs_info
->ordered_root_lock
);
575 if (!root
->nr_ordered_extents
) {
576 spin_lock(&root
->fs_info
->ordered_root_lock
);
577 BUG_ON(list_empty(&root
->ordered_root
));
578 list_del_init(&root
->ordered_root
);
579 spin_unlock(&root
->fs_info
->ordered_root_lock
);
581 spin_unlock(&root
->ordered_extent_lock
);
582 wake_up(&entry
->wait
);
585 static void btrfs_run_ordered_extent_work(struct btrfs_work
*work
)
587 struct btrfs_ordered_extent
*ordered
;
589 ordered
= container_of(work
, struct btrfs_ordered_extent
, flush_work
);
590 btrfs_start_ordered_extent(ordered
->inode
, ordered
, 1);
591 complete(&ordered
->completion
);
595 * wait for all the ordered extents in a root. This is done when balancing
596 * space between drives.
598 int btrfs_wait_ordered_extents(struct btrfs_root
*root
, int nr
)
600 struct list_head splice
, works
;
601 struct btrfs_ordered_extent
*ordered
, *next
;
604 INIT_LIST_HEAD(&splice
);
605 INIT_LIST_HEAD(&works
);
607 mutex_lock(&root
->ordered_extent_mutex
);
608 spin_lock(&root
->ordered_extent_lock
);
609 list_splice_init(&root
->ordered_extents
, &splice
);
610 while (!list_empty(&splice
) && nr
) {
611 ordered
= list_first_entry(&splice
, struct btrfs_ordered_extent
,
613 list_move_tail(&ordered
->root_extent_list
,
614 &root
->ordered_extents
);
615 atomic_inc(&ordered
->refs
);
616 spin_unlock(&root
->ordered_extent_lock
);
618 btrfs_init_work(&ordered
->flush_work
,
619 btrfs_run_ordered_extent_work
, NULL
, NULL
);
620 list_add_tail(&ordered
->work_list
, &works
);
621 btrfs_queue_work(root
->fs_info
->flush_workers
,
622 &ordered
->flush_work
);
625 spin_lock(&root
->ordered_extent_lock
);
630 list_splice_tail(&splice
, &root
->ordered_extents
);
631 spin_unlock(&root
->ordered_extent_lock
);
633 list_for_each_entry_safe(ordered
, next
, &works
, work_list
) {
634 list_del_init(&ordered
->work_list
);
635 wait_for_completion(&ordered
->completion
);
636 btrfs_put_ordered_extent(ordered
);
639 mutex_unlock(&root
->ordered_extent_mutex
);
644 void btrfs_wait_ordered_roots(struct btrfs_fs_info
*fs_info
, int nr
)
646 struct btrfs_root
*root
;
647 struct list_head splice
;
650 INIT_LIST_HEAD(&splice
);
652 mutex_lock(&fs_info
->ordered_operations_mutex
);
653 spin_lock(&fs_info
->ordered_root_lock
);
654 list_splice_init(&fs_info
->ordered_roots
, &splice
);
655 while (!list_empty(&splice
) && nr
) {
656 root
= list_first_entry(&splice
, struct btrfs_root
,
658 root
= btrfs_grab_fs_root(root
);
660 list_move_tail(&root
->ordered_root
,
661 &fs_info
->ordered_roots
);
662 spin_unlock(&fs_info
->ordered_root_lock
);
664 done
= btrfs_wait_ordered_extents(root
, nr
);
665 btrfs_put_fs_root(root
);
667 spin_lock(&fs_info
->ordered_root_lock
);
673 list_splice_tail(&splice
, &fs_info
->ordered_roots
);
674 spin_unlock(&fs_info
->ordered_root_lock
);
675 mutex_unlock(&fs_info
->ordered_operations_mutex
);
679 * this is used during transaction commit to write all the inodes
680 * added to the ordered operation list. These files must be fully on
681 * disk before the transaction commits.
683 * we have two modes here, one is to just start the IO via filemap_flush
684 * and the other is to wait for all the io. When we wait, we have an
685 * extra check to make sure the ordered operation list really is empty
688 int btrfs_run_ordered_operations(struct btrfs_trans_handle
*trans
,
689 struct btrfs_root
*root
, int wait
)
691 struct btrfs_inode
*btrfs_inode
;
693 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
694 struct list_head splice
;
695 struct list_head works
;
696 struct btrfs_delalloc_work
*work
, *next
;
699 INIT_LIST_HEAD(&splice
);
700 INIT_LIST_HEAD(&works
);
702 mutex_lock(&root
->fs_info
->ordered_extent_flush_mutex
);
703 spin_lock(&root
->fs_info
->ordered_root_lock
);
704 list_splice_init(&cur_trans
->ordered_operations
, &splice
);
705 while (!list_empty(&splice
)) {
706 btrfs_inode
= list_entry(splice
.next
, struct btrfs_inode
,
708 inode
= &btrfs_inode
->vfs_inode
;
710 list_del_init(&btrfs_inode
->ordered_operations
);
713 * the inode may be getting freed (in sys_unlink path).
715 inode
= igrab(inode
);
720 list_add_tail(&BTRFS_I(inode
)->ordered_operations
,
721 &cur_trans
->ordered_operations
);
722 spin_unlock(&root
->fs_info
->ordered_root_lock
);
724 work
= btrfs_alloc_delalloc_work(inode
, wait
, 1);
726 spin_lock(&root
->fs_info
->ordered_root_lock
);
727 if (list_empty(&BTRFS_I(inode
)->ordered_operations
))
728 list_add_tail(&btrfs_inode
->ordered_operations
,
730 list_splice_tail(&splice
,
731 &cur_trans
->ordered_operations
);
732 spin_unlock(&root
->fs_info
->ordered_root_lock
);
736 list_add_tail(&work
->list
, &works
);
737 btrfs_queue_work(root
->fs_info
->flush_workers
,
741 spin_lock(&root
->fs_info
->ordered_root_lock
);
743 spin_unlock(&root
->fs_info
->ordered_root_lock
);
745 list_for_each_entry_safe(work
, next
, &works
, list
) {
746 list_del_init(&work
->list
);
747 btrfs_wait_and_free_delalloc_work(work
);
749 mutex_unlock(&root
->fs_info
->ordered_extent_flush_mutex
);
754 * Used to start IO or wait for a given ordered extent to finish.
756 * If wait is one, this effectively waits on page writeback for all the pages
757 * in the extent, and it waits on the io completion code to insert
758 * metadata into the btree corresponding to the extent
760 void btrfs_start_ordered_extent(struct inode
*inode
,
761 struct btrfs_ordered_extent
*entry
,
764 u64 start
= entry
->file_offset
;
765 u64 end
= start
+ entry
->len
- 1;
767 trace_btrfs_ordered_extent_start(inode
, entry
);
770 * pages in the range can be dirty, clean or writeback. We
771 * start IO on any dirty ones so the wait doesn't stall waiting
772 * for the flusher thread to find them
774 if (!test_bit(BTRFS_ORDERED_DIRECT
, &entry
->flags
))
775 filemap_fdatawrite_range(inode
->i_mapping
, start
, end
);
777 wait_event(entry
->wait
, test_bit(BTRFS_ORDERED_COMPLETE
,
783 * Used to wait on ordered extents across a large range of bytes.
785 int btrfs_wait_ordered_range(struct inode
*inode
, u64 start
, u64 len
)
790 struct btrfs_ordered_extent
*ordered
;
792 if (start
+ len
< start
) {
793 orig_end
= INT_LIMIT(loff_t
);
795 orig_end
= start
+ len
- 1;
796 if (orig_end
> INT_LIMIT(loff_t
))
797 orig_end
= INT_LIMIT(loff_t
);
800 /* start IO across the range first to instantiate any delalloc
803 ret
= filemap_fdatawrite_range(inode
->i_mapping
, start
, orig_end
);
807 * So with compression we will find and lock a dirty page and clear the
808 * first one as dirty, setup an async extent, and immediately return
809 * with the entire range locked but with nobody actually marked with
810 * writeback. So we can't just filemap_write_and_wait_range() and
811 * expect it to work since it will just kick off a thread to do the
812 * actual work. So we need to call filemap_fdatawrite_range _again_
813 * since it will wait on the page lock, which won't be unlocked until
814 * after the pages have been marked as writeback and so we're good to go
815 * from there. We have to do this otherwise we'll miss the ordered
816 * extents and that results in badness. Please Josef, do not think you
817 * know better and pull this out at some point in the future, it is
818 * right and you are wrong.
820 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT
,
821 &BTRFS_I(inode
)->runtime_flags
)) {
822 ret
= filemap_fdatawrite_range(inode
->i_mapping
, start
,
827 ret
= filemap_fdatawait_range(inode
->i_mapping
, start
, orig_end
);
833 ordered
= btrfs_lookup_first_ordered_extent(inode
, end
);
836 if (ordered
->file_offset
> orig_end
) {
837 btrfs_put_ordered_extent(ordered
);
840 if (ordered
->file_offset
+ ordered
->len
<= start
) {
841 btrfs_put_ordered_extent(ordered
);
844 btrfs_start_ordered_extent(inode
, ordered
, 1);
845 end
= ordered
->file_offset
;
846 if (test_bit(BTRFS_ORDERED_IOERR
, &ordered
->flags
))
848 btrfs_put_ordered_extent(ordered
);
849 if (ret
|| end
== 0 || end
== start
)
857 * find an ordered extent corresponding to file_offset. return NULL if
858 * nothing is found, otherwise take a reference on the extent and return it
860 struct btrfs_ordered_extent
*btrfs_lookup_ordered_extent(struct inode
*inode
,
863 struct btrfs_ordered_inode_tree
*tree
;
864 struct rb_node
*node
;
865 struct btrfs_ordered_extent
*entry
= NULL
;
867 tree
= &BTRFS_I(inode
)->ordered_tree
;
868 spin_lock_irq(&tree
->lock
);
869 node
= tree_search(tree
, file_offset
);
873 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
874 if (!offset_in_entry(entry
, file_offset
))
877 atomic_inc(&entry
->refs
);
879 spin_unlock_irq(&tree
->lock
);
883 /* Since the DIO code tries to lock a wide area we need to look for any ordered
884 * extents that exist in the range, rather than just the start of the range.
886 struct btrfs_ordered_extent
*btrfs_lookup_ordered_range(struct inode
*inode
,
890 struct btrfs_ordered_inode_tree
*tree
;
891 struct rb_node
*node
;
892 struct btrfs_ordered_extent
*entry
= NULL
;
894 tree
= &BTRFS_I(inode
)->ordered_tree
;
895 spin_lock_irq(&tree
->lock
);
896 node
= tree_search(tree
, file_offset
);
898 node
= tree_search(tree
, file_offset
+ len
);
904 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
905 if (range_overlaps(entry
, file_offset
, len
))
908 if (entry
->file_offset
>= file_offset
+ len
) {
913 node
= rb_next(node
);
919 atomic_inc(&entry
->refs
);
920 spin_unlock_irq(&tree
->lock
);
925 * lookup and return any extent before 'file_offset'. NULL is returned
928 struct btrfs_ordered_extent
*
929 btrfs_lookup_first_ordered_extent(struct inode
*inode
, u64 file_offset
)
931 struct btrfs_ordered_inode_tree
*tree
;
932 struct rb_node
*node
;
933 struct btrfs_ordered_extent
*entry
= NULL
;
935 tree
= &BTRFS_I(inode
)->ordered_tree
;
936 spin_lock_irq(&tree
->lock
);
937 node
= tree_search(tree
, file_offset
);
941 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
942 atomic_inc(&entry
->refs
);
944 spin_unlock_irq(&tree
->lock
);
949 * After an extent is done, call this to conditionally update the on disk
950 * i_size. i_size is updated to cover any fully written part of the file.
952 int btrfs_ordered_update_i_size(struct inode
*inode
, u64 offset
,
953 struct btrfs_ordered_extent
*ordered
)
955 struct btrfs_ordered_inode_tree
*tree
= &BTRFS_I(inode
)->ordered_tree
;
958 u64 i_size
= i_size_read(inode
);
959 struct rb_node
*node
;
960 struct rb_node
*prev
= NULL
;
961 struct btrfs_ordered_extent
*test
;
964 spin_lock_irq(&tree
->lock
);
966 offset
= entry_end(ordered
);
967 if (test_bit(BTRFS_ORDERED_TRUNCATED
, &ordered
->flags
))
969 ordered
->file_offset
+
970 ordered
->truncated_len
);
972 offset
= ALIGN(offset
, BTRFS_I(inode
)->root
->sectorsize
);
974 disk_i_size
= BTRFS_I(inode
)->disk_i_size
;
977 if (disk_i_size
> i_size
) {
978 BTRFS_I(inode
)->disk_i_size
= i_size
;
984 * if the disk i_size is already at the inode->i_size, or
985 * this ordered extent is inside the disk i_size, we're done
987 if (disk_i_size
== i_size
)
991 * We still need to update disk_i_size if outstanding_isize is greater
994 if (offset
<= disk_i_size
&&
995 (!ordered
|| ordered
->outstanding_isize
<= disk_i_size
))
999 * walk backward from this ordered extent to disk_i_size.
1000 * if we find an ordered extent then we can't update disk i_size
1004 node
= rb_prev(&ordered
->rb_node
);
1006 prev
= tree_search(tree
, offset
);
1008 * we insert file extents without involving ordered struct,
1009 * so there should be no ordered struct cover this offset
1012 test
= rb_entry(prev
, struct btrfs_ordered_extent
,
1014 BUG_ON(offset_in_entry(test
, offset
));
1018 for (; node
; node
= rb_prev(node
)) {
1019 test
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
1021 /* We treat this entry as if it doesnt exist */
1022 if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE
, &test
->flags
))
1024 if (test
->file_offset
+ test
->len
<= disk_i_size
)
1026 if (test
->file_offset
>= i_size
)
1028 if (entry_end(test
) > disk_i_size
) {
1030 * we don't update disk_i_size now, so record this
1031 * undealt i_size. Or we will not know the real
1034 if (test
->outstanding_isize
< offset
)
1035 test
->outstanding_isize
= offset
;
1037 ordered
->outstanding_isize
>
1038 test
->outstanding_isize
)
1039 test
->outstanding_isize
=
1040 ordered
->outstanding_isize
;
1044 new_i_size
= min_t(u64
, offset
, i_size
);
1047 * Some ordered extents may completed before the current one, and
1048 * we hold the real i_size in ->outstanding_isize.
1050 if (ordered
&& ordered
->outstanding_isize
> new_i_size
)
1051 new_i_size
= min_t(u64
, ordered
->outstanding_isize
, i_size
);
1052 BTRFS_I(inode
)->disk_i_size
= new_i_size
;
1056 * We need to do this because we can't remove ordered extents until
1057 * after the i_disk_size has been updated and then the inode has been
1058 * updated to reflect the change, so we need to tell anybody who finds
1059 * this ordered extent that we've already done all the real work, we
1060 * just haven't completed all the other work.
1063 set_bit(BTRFS_ORDERED_UPDATED_ISIZE
, &ordered
->flags
);
1064 spin_unlock_irq(&tree
->lock
);
1069 * search the ordered extents for one corresponding to 'offset' and
1070 * try to find a checksum. This is used because we allow pages to
1071 * be reclaimed before their checksum is actually put into the btree
1073 int btrfs_find_ordered_sum(struct inode
*inode
, u64 offset
, u64 disk_bytenr
,
1076 struct btrfs_ordered_sum
*ordered_sum
;
1077 struct btrfs_ordered_extent
*ordered
;
1078 struct btrfs_ordered_inode_tree
*tree
= &BTRFS_I(inode
)->ordered_tree
;
1079 unsigned long num_sectors
;
1081 u32 sectorsize
= BTRFS_I(inode
)->root
->sectorsize
;
1084 ordered
= btrfs_lookup_ordered_extent(inode
, offset
);
1088 spin_lock_irq(&tree
->lock
);
1089 list_for_each_entry_reverse(ordered_sum
, &ordered
->list
, list
) {
1090 if (disk_bytenr
>= ordered_sum
->bytenr
&&
1091 disk_bytenr
< ordered_sum
->bytenr
+ ordered_sum
->len
) {
1092 i
= (disk_bytenr
- ordered_sum
->bytenr
) >>
1093 inode
->i_sb
->s_blocksize_bits
;
1094 num_sectors
= ordered_sum
->len
>>
1095 inode
->i_sb
->s_blocksize_bits
;
1096 num_sectors
= min_t(int, len
- index
, num_sectors
- i
);
1097 memcpy(sum
+ index
, ordered_sum
->sums
+ i
,
1100 index
+= (int)num_sectors
;
1103 disk_bytenr
+= num_sectors
* sectorsize
;
1107 spin_unlock_irq(&tree
->lock
);
1108 btrfs_put_ordered_extent(ordered
);
1114 * add a given inode to the list of inodes that must be fully on
1115 * disk before a transaction commit finishes.
1117 * This basically gives us the ext3 style data=ordered mode, and it is mostly
1118 * used to make sure renamed files are fully on disk.
1120 * It is a noop if the inode is already fully on disk.
1122 * If trans is not null, we'll do a friendly check for a transaction that
1123 * is already flushing things and force the IO down ourselves.
1125 void btrfs_add_ordered_operation(struct btrfs_trans_handle
*trans
,
1126 struct btrfs_root
*root
, struct inode
*inode
)
1128 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
1131 last_mod
= max(BTRFS_I(inode
)->generation
, BTRFS_I(inode
)->last_trans
);
1134 * if this file hasn't been changed since the last transaction
1135 * commit, we can safely return without doing anything
1137 if (last_mod
<= root
->fs_info
->last_trans_committed
)
1140 spin_lock(&root
->fs_info
->ordered_root_lock
);
1141 if (list_empty(&BTRFS_I(inode
)->ordered_operations
)) {
1142 list_add_tail(&BTRFS_I(inode
)->ordered_operations
,
1143 &cur_trans
->ordered_operations
);
1145 spin_unlock(&root
->fs_info
->ordered_root_lock
);
1148 int __init
ordered_data_init(void)
1150 btrfs_ordered_extent_cache
= kmem_cache_create("btrfs_ordered_extent",
1151 sizeof(struct btrfs_ordered_extent
), 0,
1152 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
,
1154 if (!btrfs_ordered_extent_cache
)
1160 void ordered_data_exit(void)
1162 if (btrfs_ordered_extent_cache
)
1163 kmem_cache_destroy(btrfs_ordered_extent_cache
);