Fix gcc 4.5.1 miscompiling drivers/char/i8k.c (again)
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / btrfs / ordered-data.c
blob5c2a9e78a949a03ff239db3b99843c258ce7cd84
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/gfp.h>
20 #include <linux/slab.h>
21 #include <linux/blkdev.h>
22 #include <linux/writeback.h>
23 #include <linux/pagevec.h>
24 #include "ctree.h"
25 #include "transaction.h"
26 #include "btrfs_inode.h"
27 #include "extent_io.h"
29 static u64 entry_end(struct btrfs_ordered_extent *entry)
31 if (entry->file_offset + entry->len < entry->file_offset)
32 return (u64)-1;
33 return entry->file_offset + entry->len;
36 /* returns NULL if the insertion worked, or it returns the node it did find
37 * in the tree
39 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
40 struct rb_node *node)
42 struct rb_node **p = &root->rb_node;
43 struct rb_node *parent = NULL;
44 struct btrfs_ordered_extent *entry;
46 while (*p) {
47 parent = *p;
48 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
50 if (file_offset < entry->file_offset)
51 p = &(*p)->rb_left;
52 else if (file_offset >= entry_end(entry))
53 p = &(*p)->rb_right;
54 else
55 return parent;
58 rb_link_node(node, parent, p);
59 rb_insert_color(node, root);
60 return NULL;
64 * look for a given offset in the tree, and if it can't be found return the
65 * first lesser offset
67 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
68 struct rb_node **prev_ret)
70 struct rb_node *n = root->rb_node;
71 struct rb_node *prev = NULL;
72 struct rb_node *test;
73 struct btrfs_ordered_extent *entry;
74 struct btrfs_ordered_extent *prev_entry = NULL;
76 while (n) {
77 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
78 prev = n;
79 prev_entry = entry;
81 if (file_offset < entry->file_offset)
82 n = n->rb_left;
83 else if (file_offset >= entry_end(entry))
84 n = n->rb_right;
85 else
86 return n;
88 if (!prev_ret)
89 return NULL;
91 while (prev && file_offset >= entry_end(prev_entry)) {
92 test = rb_next(prev);
93 if (!test)
94 break;
95 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
96 rb_node);
97 if (file_offset < entry_end(prev_entry))
98 break;
100 prev = test;
102 if (prev)
103 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
104 rb_node);
105 while (prev && file_offset < entry_end(prev_entry)) {
106 test = rb_prev(prev);
107 if (!test)
108 break;
109 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
110 rb_node);
111 prev = test;
113 *prev_ret = prev;
114 return NULL;
118 * helper to check if a given offset is inside a given entry
120 static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
122 if (file_offset < entry->file_offset ||
123 entry->file_offset + entry->len <= file_offset)
124 return 0;
125 return 1;
129 * look find the first ordered struct that has this offset, otherwise
130 * the first one less than this offset
132 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
133 u64 file_offset)
135 struct rb_root *root = &tree->tree;
136 struct rb_node *prev;
137 struct rb_node *ret;
138 struct btrfs_ordered_extent *entry;
140 if (tree->last) {
141 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
142 rb_node);
143 if (offset_in_entry(entry, file_offset))
144 return tree->last;
146 ret = __tree_search(root, file_offset, &prev);
147 if (!ret)
148 ret = prev;
149 if (ret)
150 tree->last = ret;
151 return ret;
154 /* allocate and add a new ordered_extent into the per-inode tree.
155 * file_offset is the logical offset in the file
157 * start is the disk block number of an extent already reserved in the
158 * extent allocation tree
160 * len is the length of the extent
162 * The tree is given a single reference on the ordered extent that was
163 * inserted.
165 int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
166 u64 start, u64 len, u64 disk_len, int type)
168 struct btrfs_ordered_inode_tree *tree;
169 struct rb_node *node;
170 struct btrfs_ordered_extent *entry;
172 tree = &BTRFS_I(inode)->ordered_tree;
173 entry = kzalloc(sizeof(*entry), GFP_NOFS);
174 if (!entry)
175 return -ENOMEM;
177 mutex_lock(&tree->mutex);
178 entry->file_offset = file_offset;
179 entry->start = start;
180 entry->len = len;
181 entry->disk_len = disk_len;
182 entry->bytes_left = len;
183 entry->inode = inode;
184 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
185 set_bit(type, &entry->flags);
187 /* one ref for the tree */
188 atomic_set(&entry->refs, 1);
189 init_waitqueue_head(&entry->wait);
190 INIT_LIST_HEAD(&entry->list);
191 INIT_LIST_HEAD(&entry->root_extent_list);
193 node = tree_insert(&tree->tree, file_offset,
194 &entry->rb_node);
195 BUG_ON(node);
197 spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
198 list_add_tail(&entry->root_extent_list,
199 &BTRFS_I(inode)->root->fs_info->ordered_extents);
200 spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
202 mutex_unlock(&tree->mutex);
203 BUG_ON(node);
204 return 0;
208 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
209 * when an ordered extent is finished. If the list covers more than one
210 * ordered extent, it is split across multiples.
212 int btrfs_add_ordered_sum(struct inode *inode,
213 struct btrfs_ordered_extent *entry,
214 struct btrfs_ordered_sum *sum)
216 struct btrfs_ordered_inode_tree *tree;
218 tree = &BTRFS_I(inode)->ordered_tree;
219 mutex_lock(&tree->mutex);
220 list_add_tail(&sum->list, &entry->list);
221 mutex_unlock(&tree->mutex);
222 return 0;
226 * this is used to account for finished IO across a given range
227 * of the file. The IO should not span ordered extents. If
228 * a given ordered_extent is completely done, 1 is returned, otherwise
229 * 0.
231 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
232 * to make sure this function only returns 1 once for a given ordered extent.
234 int btrfs_dec_test_ordered_pending(struct inode *inode,
235 u64 file_offset, u64 io_size)
237 struct btrfs_ordered_inode_tree *tree;
238 struct rb_node *node;
239 struct btrfs_ordered_extent *entry;
240 int ret;
242 tree = &BTRFS_I(inode)->ordered_tree;
243 mutex_lock(&tree->mutex);
244 node = tree_search(tree, file_offset);
245 if (!node) {
246 ret = 1;
247 goto out;
250 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
251 if (!offset_in_entry(entry, file_offset)) {
252 ret = 1;
253 goto out;
256 if (io_size > entry->bytes_left) {
257 printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
258 (unsigned long long)entry->bytes_left,
259 (unsigned long long)io_size);
261 entry->bytes_left -= io_size;
262 if (entry->bytes_left == 0)
263 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
264 else
265 ret = 1;
266 out:
267 mutex_unlock(&tree->mutex);
268 return ret == 0;
272 * used to drop a reference on an ordered extent. This will free
273 * the extent if the last reference is dropped
275 int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
277 struct list_head *cur;
278 struct btrfs_ordered_sum *sum;
280 if (atomic_dec_and_test(&entry->refs)) {
281 while (!list_empty(&entry->list)) {
282 cur = entry->list.next;
283 sum = list_entry(cur, struct btrfs_ordered_sum, list);
284 list_del(&sum->list);
285 kfree(sum);
287 kfree(entry);
289 return 0;
293 * remove an ordered extent from the tree. No references are dropped
294 * and you must wake_up entry->wait. You must hold the tree mutex
295 * while you call this function.
297 static int __btrfs_remove_ordered_extent(struct inode *inode,
298 struct btrfs_ordered_extent *entry)
300 struct btrfs_ordered_inode_tree *tree;
301 struct rb_node *node;
303 tree = &BTRFS_I(inode)->ordered_tree;
304 node = &entry->rb_node;
305 rb_erase(node, &tree->tree);
306 tree->last = NULL;
307 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
309 spin_lock(&BTRFS_I(inode)->accounting_lock);
310 BTRFS_I(inode)->outstanding_extents--;
311 spin_unlock(&BTRFS_I(inode)->accounting_lock);
312 btrfs_unreserve_metadata_for_delalloc(BTRFS_I(inode)->root,
313 inode, 1);
315 spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
316 list_del_init(&entry->root_extent_list);
319 * we have no more ordered extents for this inode and
320 * no dirty pages. We can safely remove it from the
321 * list of ordered extents
323 if (RB_EMPTY_ROOT(&tree->tree) &&
324 !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
325 list_del_init(&BTRFS_I(inode)->ordered_operations);
327 spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
329 return 0;
333 * remove an ordered extent from the tree. No references are dropped
334 * but any waiters are woken.
336 int btrfs_remove_ordered_extent(struct inode *inode,
337 struct btrfs_ordered_extent *entry)
339 struct btrfs_ordered_inode_tree *tree;
340 int ret;
342 tree = &BTRFS_I(inode)->ordered_tree;
343 mutex_lock(&tree->mutex);
344 ret = __btrfs_remove_ordered_extent(inode, entry);
345 mutex_unlock(&tree->mutex);
346 wake_up(&entry->wait);
348 return ret;
352 * wait for all the ordered extents in a root. This is done when balancing
353 * space between drives.
355 int btrfs_wait_ordered_extents(struct btrfs_root *root,
356 int nocow_only, int delay_iput)
358 struct list_head splice;
359 struct list_head *cur;
360 struct btrfs_ordered_extent *ordered;
361 struct inode *inode;
363 INIT_LIST_HEAD(&splice);
365 spin_lock(&root->fs_info->ordered_extent_lock);
366 list_splice_init(&root->fs_info->ordered_extents, &splice);
367 while (!list_empty(&splice)) {
368 cur = splice.next;
369 ordered = list_entry(cur, struct btrfs_ordered_extent,
370 root_extent_list);
371 if (nocow_only &&
372 !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) &&
373 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
374 list_move(&ordered->root_extent_list,
375 &root->fs_info->ordered_extents);
376 cond_resched_lock(&root->fs_info->ordered_extent_lock);
377 continue;
380 list_del_init(&ordered->root_extent_list);
381 atomic_inc(&ordered->refs);
384 * the inode may be getting freed (in sys_unlink path).
386 inode = igrab(ordered->inode);
388 spin_unlock(&root->fs_info->ordered_extent_lock);
390 if (inode) {
391 btrfs_start_ordered_extent(inode, ordered, 1);
392 btrfs_put_ordered_extent(ordered);
393 if (delay_iput)
394 btrfs_add_delayed_iput(inode);
395 else
396 iput(inode);
397 } else {
398 btrfs_put_ordered_extent(ordered);
401 spin_lock(&root->fs_info->ordered_extent_lock);
403 spin_unlock(&root->fs_info->ordered_extent_lock);
404 return 0;
408 * this is used during transaction commit to write all the inodes
409 * added to the ordered operation list. These files must be fully on
410 * disk before the transaction commits.
412 * we have two modes here, one is to just start the IO via filemap_flush
413 * and the other is to wait for all the io. When we wait, we have an
414 * extra check to make sure the ordered operation list really is empty
415 * before we return
417 int btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
419 struct btrfs_inode *btrfs_inode;
420 struct inode *inode;
421 struct list_head splice;
423 INIT_LIST_HEAD(&splice);
425 mutex_lock(&root->fs_info->ordered_operations_mutex);
426 spin_lock(&root->fs_info->ordered_extent_lock);
427 again:
428 list_splice_init(&root->fs_info->ordered_operations, &splice);
430 while (!list_empty(&splice)) {
431 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
432 ordered_operations);
434 inode = &btrfs_inode->vfs_inode;
436 list_del_init(&btrfs_inode->ordered_operations);
439 * the inode may be getting freed (in sys_unlink path).
441 inode = igrab(inode);
443 if (!wait && inode) {
444 list_add_tail(&BTRFS_I(inode)->ordered_operations,
445 &root->fs_info->ordered_operations);
447 spin_unlock(&root->fs_info->ordered_extent_lock);
449 if (inode) {
450 if (wait)
451 btrfs_wait_ordered_range(inode, 0, (u64)-1);
452 else
453 filemap_flush(inode->i_mapping);
454 btrfs_add_delayed_iput(inode);
457 cond_resched();
458 spin_lock(&root->fs_info->ordered_extent_lock);
460 if (wait && !list_empty(&root->fs_info->ordered_operations))
461 goto again;
463 spin_unlock(&root->fs_info->ordered_extent_lock);
464 mutex_unlock(&root->fs_info->ordered_operations_mutex);
466 return 0;
470 * Used to start IO or wait for a given ordered extent to finish.
472 * If wait is one, this effectively waits on page writeback for all the pages
473 * in the extent, and it waits on the io completion code to insert
474 * metadata into the btree corresponding to the extent
476 void btrfs_start_ordered_extent(struct inode *inode,
477 struct btrfs_ordered_extent *entry,
478 int wait)
480 u64 start = entry->file_offset;
481 u64 end = start + entry->len - 1;
484 * pages in the range can be dirty, clean or writeback. We
485 * start IO on any dirty ones so the wait doesn't stall waiting
486 * for pdflush to find them
488 filemap_fdatawrite_range(inode->i_mapping, start, end);
489 if (wait) {
490 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
491 &entry->flags));
496 * Used to wait on ordered extents across a large range of bytes.
498 int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
500 u64 end;
501 u64 orig_end;
502 u64 wait_end;
503 struct btrfs_ordered_extent *ordered;
504 int found;
506 if (start + len < start) {
507 orig_end = INT_LIMIT(loff_t);
508 } else {
509 orig_end = start + len - 1;
510 if (orig_end > INT_LIMIT(loff_t))
511 orig_end = INT_LIMIT(loff_t);
513 wait_end = orig_end;
514 again:
515 /* start IO across the range first to instantiate any delalloc
516 * extents
518 filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
520 /* The compression code will leave pages locked but return from
521 * writepage without setting the page writeback. Starting again
522 * with WB_SYNC_ALL will end up waiting for the IO to actually start.
524 filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
526 filemap_fdatawait_range(inode->i_mapping, start, orig_end);
528 end = orig_end;
529 found = 0;
530 while (1) {
531 ordered = btrfs_lookup_first_ordered_extent(inode, end);
532 if (!ordered)
533 break;
534 if (ordered->file_offset > orig_end) {
535 btrfs_put_ordered_extent(ordered);
536 break;
538 if (ordered->file_offset + ordered->len < start) {
539 btrfs_put_ordered_extent(ordered);
540 break;
542 found++;
543 btrfs_start_ordered_extent(inode, ordered, 1);
544 end = ordered->file_offset;
545 btrfs_put_ordered_extent(ordered);
546 if (end == 0 || end == start)
547 break;
548 end--;
550 if (found || test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end,
551 EXTENT_DELALLOC, 0, NULL)) {
552 schedule_timeout(1);
553 goto again;
555 return 0;
559 * find an ordered extent corresponding to file_offset. return NULL if
560 * nothing is found, otherwise take a reference on the extent and return it
562 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
563 u64 file_offset)
565 struct btrfs_ordered_inode_tree *tree;
566 struct rb_node *node;
567 struct btrfs_ordered_extent *entry = NULL;
569 tree = &BTRFS_I(inode)->ordered_tree;
570 mutex_lock(&tree->mutex);
571 node = tree_search(tree, file_offset);
572 if (!node)
573 goto out;
575 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
576 if (!offset_in_entry(entry, file_offset))
577 entry = NULL;
578 if (entry)
579 atomic_inc(&entry->refs);
580 out:
581 mutex_unlock(&tree->mutex);
582 return entry;
586 * lookup and return any extent before 'file_offset'. NULL is returned
587 * if none is found
589 struct btrfs_ordered_extent *
590 btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
592 struct btrfs_ordered_inode_tree *tree;
593 struct rb_node *node;
594 struct btrfs_ordered_extent *entry = NULL;
596 tree = &BTRFS_I(inode)->ordered_tree;
597 mutex_lock(&tree->mutex);
598 node = tree_search(tree, file_offset);
599 if (!node)
600 goto out;
602 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
603 atomic_inc(&entry->refs);
604 out:
605 mutex_unlock(&tree->mutex);
606 return entry;
610 * After an extent is done, call this to conditionally update the on disk
611 * i_size. i_size is updated to cover any fully written part of the file.
613 int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
614 struct btrfs_ordered_extent *ordered)
616 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
617 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
618 u64 disk_i_size;
619 u64 new_i_size;
620 u64 i_size_test;
621 u64 i_size = i_size_read(inode);
622 struct rb_node *node;
623 struct rb_node *prev = NULL;
624 struct btrfs_ordered_extent *test;
625 int ret = 1;
627 if (ordered)
628 offset = entry_end(ordered);
629 else
630 offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
632 mutex_lock(&tree->mutex);
633 disk_i_size = BTRFS_I(inode)->disk_i_size;
635 /* truncate file */
636 if (disk_i_size > i_size) {
637 BTRFS_I(inode)->disk_i_size = i_size;
638 ret = 0;
639 goto out;
643 * if the disk i_size is already at the inode->i_size, or
644 * this ordered extent is inside the disk i_size, we're done
646 if (disk_i_size == i_size || offset <= disk_i_size) {
647 goto out;
651 * we can't update the disk_isize if there are delalloc bytes
652 * between disk_i_size and this ordered extent
654 if (test_range_bit(io_tree, disk_i_size, offset - 1,
655 EXTENT_DELALLOC, 0, NULL)) {
656 goto out;
659 * walk backward from this ordered extent to disk_i_size.
660 * if we find an ordered extent then we can't update disk i_size
661 * yet
663 if (ordered) {
664 node = rb_prev(&ordered->rb_node);
665 } else {
666 prev = tree_search(tree, offset);
668 * we insert file extents without involving ordered struct,
669 * so there should be no ordered struct cover this offset
671 if (prev) {
672 test = rb_entry(prev, struct btrfs_ordered_extent,
673 rb_node);
674 BUG_ON(offset_in_entry(test, offset));
676 node = prev;
678 while (node) {
679 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
680 if (test->file_offset + test->len <= disk_i_size)
681 break;
682 if (test->file_offset >= i_size)
683 break;
684 if (test->file_offset >= disk_i_size)
685 goto out;
686 node = rb_prev(node);
688 new_i_size = min_t(u64, offset, i_size);
691 * at this point, we know we can safely update i_size to at least
692 * the offset from this ordered extent. But, we need to
693 * walk forward and see if ios from higher up in the file have
694 * finished.
696 if (ordered) {
697 node = rb_next(&ordered->rb_node);
698 } else {
699 if (prev)
700 node = rb_next(prev);
701 else
702 node = rb_first(&tree->tree);
704 i_size_test = 0;
705 if (node) {
707 * do we have an area where IO might have finished
708 * between our ordered extent and the next one.
710 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
711 if (test->file_offset > offset)
712 i_size_test = test->file_offset;
713 } else {
714 i_size_test = i_size;
718 * i_size_test is the end of a region after this ordered
719 * extent where there are no ordered extents. As long as there
720 * are no delalloc bytes in this area, it is safe to update
721 * disk_i_size to the end of the region.
723 if (i_size_test > offset &&
724 !test_range_bit(io_tree, offset, i_size_test - 1,
725 EXTENT_DELALLOC, 0, NULL)) {
726 new_i_size = min_t(u64, i_size_test, i_size);
728 BTRFS_I(inode)->disk_i_size = new_i_size;
729 ret = 0;
730 out:
732 * we need to remove the ordered extent with the tree lock held
733 * so that other people calling this function don't find our fully
734 * processed ordered entry and skip updating the i_size
736 if (ordered)
737 __btrfs_remove_ordered_extent(inode, ordered);
738 mutex_unlock(&tree->mutex);
739 if (ordered)
740 wake_up(&ordered->wait);
741 return ret;
745 * search the ordered extents for one corresponding to 'offset' and
746 * try to find a checksum. This is used because we allow pages to
747 * be reclaimed before their checksum is actually put into the btree
749 int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
750 u32 *sum)
752 struct btrfs_ordered_sum *ordered_sum;
753 struct btrfs_sector_sum *sector_sums;
754 struct btrfs_ordered_extent *ordered;
755 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
756 unsigned long num_sectors;
757 unsigned long i;
758 u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
759 int ret = 1;
761 ordered = btrfs_lookup_ordered_extent(inode, offset);
762 if (!ordered)
763 return 1;
765 mutex_lock(&tree->mutex);
766 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
767 if (disk_bytenr >= ordered_sum->bytenr) {
768 num_sectors = ordered_sum->len / sectorsize;
769 sector_sums = ordered_sum->sums;
770 for (i = 0; i < num_sectors; i++) {
771 if (sector_sums[i].bytenr == disk_bytenr) {
772 *sum = sector_sums[i].sum;
773 ret = 0;
774 goto out;
779 out:
780 mutex_unlock(&tree->mutex);
781 btrfs_put_ordered_extent(ordered);
782 return ret;
787 * add a given inode to the list of inodes that must be fully on
788 * disk before a transaction commit finishes.
790 * This basically gives us the ext3 style data=ordered mode, and it is mostly
791 * used to make sure renamed files are fully on disk.
793 * It is a noop if the inode is already fully on disk.
795 * If trans is not null, we'll do a friendly check for a transaction that
796 * is already flushing things and force the IO down ourselves.
798 int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
799 struct btrfs_root *root,
800 struct inode *inode)
802 u64 last_mod;
804 last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans);
807 * if this file hasn't been changed since the last transaction
808 * commit, we can safely return without doing anything
810 if (last_mod < root->fs_info->last_trans_committed)
811 return 0;
814 * the transaction is already committing. Just start the IO and
815 * don't bother with all of this list nonsense
817 if (trans && root->fs_info->running_transaction->blocked) {
818 btrfs_wait_ordered_range(inode, 0, (u64)-1);
819 return 0;
822 spin_lock(&root->fs_info->ordered_extent_lock);
823 if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
824 list_add_tail(&BTRFS_I(inode)->ordered_operations,
825 &root->fs_info->ordered_operations);
827 spin_unlock(&root->fs_info->ordered_extent_lock);
829 return 0;