b43: reload phy and bss settings after core restarts
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / btrfs / transaction.c
blobeb55863bb4aee8a323783aa24536d17ec166f26d
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/fs.h>
20 #include <linux/slab.h>
21 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/pagemap.h>
24 #include <linux/blkdev.h>
25 #include "ctree.h"
26 #include "disk-io.h"
27 #include "transaction.h"
28 #include "locking.h"
29 #include "tree-log.h"
30 #include "inode-map.h"
32 #define BTRFS_ROOT_TRANS_TAG 0
34 static noinline void put_transaction(struct btrfs_transaction *transaction)
36 WARN_ON(atomic_read(&transaction->use_count) == 0);
37 if (atomic_dec_and_test(&transaction->use_count)) {
38 BUG_ON(!list_empty(&transaction->list));
39 memset(transaction, 0, sizeof(*transaction));
40 kmem_cache_free(btrfs_transaction_cachep, transaction);
44 static noinline void switch_commit_root(struct btrfs_root *root)
46 free_extent_buffer(root->commit_root);
47 root->commit_root = btrfs_root_node(root);
51 * either allocate a new transaction or hop into the existing one
53 static noinline int join_transaction(struct btrfs_root *root, int nofail)
55 struct btrfs_transaction *cur_trans;
57 spin_lock(&root->fs_info->trans_lock);
58 if (root->fs_info->trans_no_join) {
59 if (!nofail) {
60 spin_unlock(&root->fs_info->trans_lock);
61 return -EBUSY;
65 cur_trans = root->fs_info->running_transaction;
66 if (cur_trans) {
67 atomic_inc(&cur_trans->use_count);
68 atomic_inc(&cur_trans->num_writers);
69 cur_trans->num_joined++;
70 spin_unlock(&root->fs_info->trans_lock);
71 return 0;
73 spin_unlock(&root->fs_info->trans_lock);
75 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
76 if (!cur_trans)
77 return -ENOMEM;
78 spin_lock(&root->fs_info->trans_lock);
79 if (root->fs_info->running_transaction) {
80 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
81 cur_trans = root->fs_info->running_transaction;
82 atomic_inc(&cur_trans->use_count);
83 atomic_inc(&cur_trans->num_writers);
84 cur_trans->num_joined++;
85 spin_unlock(&root->fs_info->trans_lock);
86 return 0;
88 atomic_set(&cur_trans->num_writers, 1);
89 cur_trans->num_joined = 0;
90 init_waitqueue_head(&cur_trans->writer_wait);
91 init_waitqueue_head(&cur_trans->commit_wait);
92 cur_trans->in_commit = 0;
93 cur_trans->blocked = 0;
95 * One for this trans handle, one so it will live on until we
96 * commit the transaction.
98 atomic_set(&cur_trans->use_count, 2);
99 cur_trans->commit_done = 0;
100 cur_trans->start_time = get_seconds();
102 cur_trans->delayed_refs.root = RB_ROOT;
103 cur_trans->delayed_refs.num_entries = 0;
104 cur_trans->delayed_refs.num_heads_ready = 0;
105 cur_trans->delayed_refs.num_heads = 0;
106 cur_trans->delayed_refs.flushing = 0;
107 cur_trans->delayed_refs.run_delayed_start = 0;
108 spin_lock_init(&cur_trans->commit_lock);
109 spin_lock_init(&cur_trans->delayed_refs.lock);
111 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
112 list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
113 extent_io_tree_init(&cur_trans->dirty_pages,
114 root->fs_info->btree_inode->i_mapping);
115 root->fs_info->generation++;
116 cur_trans->transid = root->fs_info->generation;
117 root->fs_info->running_transaction = cur_trans;
118 spin_unlock(&root->fs_info->trans_lock);
120 return 0;
124 * this does all the record keeping required to make sure that a reference
125 * counted root is properly recorded in a given transaction. This is required
126 * to make sure the old root from before we joined the transaction is deleted
127 * when the transaction commits
129 static int record_root_in_trans(struct btrfs_trans_handle *trans,
130 struct btrfs_root *root)
132 if (root->ref_cows && root->last_trans < trans->transid) {
133 WARN_ON(root == root->fs_info->extent_root);
134 WARN_ON(root->commit_root != root->node);
137 * see below for in_trans_setup usage rules
138 * we have the reloc mutex held now, so there
139 * is only one writer in this function
141 root->in_trans_setup = 1;
143 /* make sure readers find in_trans_setup before
144 * they find our root->last_trans update
146 smp_wmb();
148 spin_lock(&root->fs_info->fs_roots_radix_lock);
149 if (root->last_trans == trans->transid) {
150 spin_unlock(&root->fs_info->fs_roots_radix_lock);
151 return 0;
153 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
154 (unsigned long)root->root_key.objectid,
155 BTRFS_ROOT_TRANS_TAG);
156 spin_unlock(&root->fs_info->fs_roots_radix_lock);
157 root->last_trans = trans->transid;
159 /* this is pretty tricky. We don't want to
160 * take the relocation lock in btrfs_record_root_in_trans
161 * unless we're really doing the first setup for this root in
162 * this transaction.
164 * Normally we'd use root->last_trans as a flag to decide
165 * if we want to take the expensive mutex.
167 * But, we have to set root->last_trans before we
168 * init the relocation root, otherwise, we trip over warnings
169 * in ctree.c. The solution used here is to flag ourselves
170 * with root->in_trans_setup. When this is 1, we're still
171 * fixing up the reloc trees and everyone must wait.
173 * When this is zero, they can trust root->last_trans and fly
174 * through btrfs_record_root_in_trans without having to take the
175 * lock. smp_wmb() makes sure that all the writes above are
176 * done before we pop in the zero below
178 btrfs_init_reloc_root(trans, root);
179 smp_wmb();
180 root->in_trans_setup = 0;
182 return 0;
186 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
187 struct btrfs_root *root)
189 if (!root->ref_cows)
190 return 0;
193 * see record_root_in_trans for comments about in_trans_setup usage
194 * and barriers
196 smp_rmb();
197 if (root->last_trans == trans->transid &&
198 !root->in_trans_setup)
199 return 0;
201 mutex_lock(&root->fs_info->reloc_mutex);
202 record_root_in_trans(trans, root);
203 mutex_unlock(&root->fs_info->reloc_mutex);
205 return 0;
208 /* wait for commit against the current transaction to become unblocked
209 * when this is done, it is safe to start a new transaction, but the current
210 * transaction might not be fully on disk.
212 static void wait_current_trans(struct btrfs_root *root)
214 struct btrfs_transaction *cur_trans;
216 spin_lock(&root->fs_info->trans_lock);
217 cur_trans = root->fs_info->running_transaction;
218 if (cur_trans && cur_trans->blocked) {
219 DEFINE_WAIT(wait);
220 atomic_inc(&cur_trans->use_count);
221 spin_unlock(&root->fs_info->trans_lock);
222 while (1) {
223 prepare_to_wait(&root->fs_info->transaction_wait, &wait,
224 TASK_UNINTERRUPTIBLE);
225 if (!cur_trans->blocked)
226 break;
227 schedule();
229 finish_wait(&root->fs_info->transaction_wait, &wait);
230 put_transaction(cur_trans);
231 } else {
232 spin_unlock(&root->fs_info->trans_lock);
236 enum btrfs_trans_type {
237 TRANS_START,
238 TRANS_JOIN,
239 TRANS_USERSPACE,
240 TRANS_JOIN_NOLOCK,
243 static int may_wait_transaction(struct btrfs_root *root, int type)
245 if (root->fs_info->log_root_recovering)
246 return 0;
248 if (type == TRANS_USERSPACE)
249 return 1;
251 if (type == TRANS_START &&
252 !atomic_read(&root->fs_info->open_ioctl_trans))
253 return 1;
255 return 0;
258 static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
259 u64 num_items, int type)
261 struct btrfs_trans_handle *h;
262 struct btrfs_transaction *cur_trans;
263 u64 num_bytes = 0;
264 int ret;
266 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
267 return ERR_PTR(-EROFS);
269 if (current->journal_info) {
270 WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
271 h = current->journal_info;
272 h->use_count++;
273 h->orig_rsv = h->block_rsv;
274 h->block_rsv = NULL;
275 goto got_it;
279 * Do the reservation before we join the transaction so we can do all
280 * the appropriate flushing if need be.
282 if (num_items > 0 && root != root->fs_info->chunk_root) {
283 num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
284 ret = btrfs_block_rsv_add(NULL, root,
285 &root->fs_info->trans_block_rsv,
286 num_bytes);
287 if (ret)
288 return ERR_PTR(ret);
290 again:
291 h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
292 if (!h)
293 return ERR_PTR(-ENOMEM);
295 if (may_wait_transaction(root, type))
296 wait_current_trans(root);
298 do {
299 ret = join_transaction(root, type == TRANS_JOIN_NOLOCK);
300 if (ret == -EBUSY)
301 wait_current_trans(root);
302 } while (ret == -EBUSY);
304 if (ret < 0) {
305 kmem_cache_free(btrfs_trans_handle_cachep, h);
306 return ERR_PTR(ret);
309 cur_trans = root->fs_info->running_transaction;
311 h->transid = cur_trans->transid;
312 h->transaction = cur_trans;
313 h->blocks_used = 0;
314 h->bytes_reserved = 0;
315 h->delayed_ref_updates = 0;
316 h->use_count = 1;
317 h->block_rsv = NULL;
318 h->orig_rsv = NULL;
320 smp_mb();
321 if (cur_trans->blocked && may_wait_transaction(root, type)) {
322 btrfs_commit_transaction(h, root);
323 goto again;
326 if (num_bytes) {
327 h->block_rsv = &root->fs_info->trans_block_rsv;
328 h->bytes_reserved = num_bytes;
331 got_it:
332 btrfs_record_root_in_trans(h, root);
334 if (!current->journal_info && type != TRANS_USERSPACE)
335 current->journal_info = h;
336 return h;
339 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
340 int num_items)
342 return start_transaction(root, num_items, TRANS_START);
344 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
346 return start_transaction(root, 0, TRANS_JOIN);
349 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
351 return start_transaction(root, 0, TRANS_JOIN_NOLOCK);
354 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
356 return start_transaction(root, 0, TRANS_USERSPACE);
359 /* wait for a transaction commit to be fully complete */
360 static noinline int wait_for_commit(struct btrfs_root *root,
361 struct btrfs_transaction *commit)
363 DEFINE_WAIT(wait);
364 while (!commit->commit_done) {
365 prepare_to_wait(&commit->commit_wait, &wait,
366 TASK_UNINTERRUPTIBLE);
367 if (commit->commit_done)
368 break;
369 schedule();
371 finish_wait(&commit->commit_wait, &wait);
372 return 0;
375 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
377 struct btrfs_transaction *cur_trans = NULL, *t;
378 int ret;
380 ret = 0;
381 if (transid) {
382 if (transid <= root->fs_info->last_trans_committed)
383 goto out;
385 /* find specified transaction */
386 spin_lock(&root->fs_info->trans_lock);
387 list_for_each_entry(t, &root->fs_info->trans_list, list) {
388 if (t->transid == transid) {
389 cur_trans = t;
390 atomic_inc(&cur_trans->use_count);
391 break;
393 if (t->transid > transid)
394 break;
396 spin_unlock(&root->fs_info->trans_lock);
397 ret = -EINVAL;
398 if (!cur_trans)
399 goto out; /* bad transid */
400 } else {
401 /* find newest transaction that is committing | committed */
402 spin_lock(&root->fs_info->trans_lock);
403 list_for_each_entry_reverse(t, &root->fs_info->trans_list,
404 list) {
405 if (t->in_commit) {
406 if (t->commit_done)
407 break;
408 cur_trans = t;
409 atomic_inc(&cur_trans->use_count);
410 break;
413 spin_unlock(&root->fs_info->trans_lock);
414 if (!cur_trans)
415 goto out; /* nothing committing|committed */
418 wait_for_commit(root, cur_trans);
420 put_transaction(cur_trans);
421 ret = 0;
422 out:
423 return ret;
426 void btrfs_throttle(struct btrfs_root *root)
428 if (!atomic_read(&root->fs_info->open_ioctl_trans))
429 wait_current_trans(root);
432 static int should_end_transaction(struct btrfs_trans_handle *trans,
433 struct btrfs_root *root)
435 int ret;
436 ret = btrfs_block_rsv_check(trans, root,
437 &root->fs_info->global_block_rsv, 0, 5);
438 return ret ? 1 : 0;
441 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
442 struct btrfs_root *root)
444 struct btrfs_transaction *cur_trans = trans->transaction;
445 int updates;
447 smp_mb();
448 if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
449 return 1;
451 updates = trans->delayed_ref_updates;
452 trans->delayed_ref_updates = 0;
453 if (updates)
454 btrfs_run_delayed_refs(trans, root, updates);
456 return should_end_transaction(trans, root);
459 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
460 struct btrfs_root *root, int throttle, int lock)
462 struct btrfs_transaction *cur_trans = trans->transaction;
463 struct btrfs_fs_info *info = root->fs_info;
464 int count = 0;
466 if (--trans->use_count) {
467 trans->block_rsv = trans->orig_rsv;
468 return 0;
471 while (count < 4) {
472 unsigned long cur = trans->delayed_ref_updates;
473 trans->delayed_ref_updates = 0;
474 if (cur &&
475 trans->transaction->delayed_refs.num_heads_ready > 64) {
476 trans->delayed_ref_updates = 0;
479 * do a full flush if the transaction is trying
480 * to close
482 if (trans->transaction->delayed_refs.flushing)
483 cur = 0;
484 btrfs_run_delayed_refs(trans, root, cur);
485 } else {
486 break;
488 count++;
491 btrfs_trans_release_metadata(trans, root);
493 if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
494 should_end_transaction(trans, root)) {
495 trans->transaction->blocked = 1;
496 smp_wmb();
499 if (lock && cur_trans->blocked && !cur_trans->in_commit) {
500 if (throttle) {
502 * We may race with somebody else here so end up having
503 * to call end_transaction on ourselves again, so inc
504 * our use_count.
506 trans->use_count++;
507 return btrfs_commit_transaction(trans, root);
508 } else {
509 wake_up_process(info->transaction_kthread);
513 WARN_ON(cur_trans != info->running_transaction);
514 WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
515 atomic_dec(&cur_trans->num_writers);
517 smp_mb();
518 if (waitqueue_active(&cur_trans->writer_wait))
519 wake_up(&cur_trans->writer_wait);
520 put_transaction(cur_trans);
522 if (current->journal_info == trans)
523 current->journal_info = NULL;
524 memset(trans, 0, sizeof(*trans));
525 kmem_cache_free(btrfs_trans_handle_cachep, trans);
527 if (throttle)
528 btrfs_run_delayed_iputs(root);
530 return 0;
533 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
534 struct btrfs_root *root)
536 int ret;
538 ret = __btrfs_end_transaction(trans, root, 0, 1);
539 if (ret)
540 return ret;
541 return 0;
544 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
545 struct btrfs_root *root)
547 int ret;
549 ret = __btrfs_end_transaction(trans, root, 1, 1);
550 if (ret)
551 return ret;
552 return 0;
555 int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans,
556 struct btrfs_root *root)
558 int ret;
560 ret = __btrfs_end_transaction(trans, root, 0, 0);
561 if (ret)
562 return ret;
563 return 0;
566 int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
567 struct btrfs_root *root)
569 return __btrfs_end_transaction(trans, root, 1, 1);
573 * when btree blocks are allocated, they have some corresponding bits set for
574 * them in one of two extent_io trees. This is used to make sure all of
575 * those extents are sent to disk but does not wait on them
577 int btrfs_write_marked_extents(struct btrfs_root *root,
578 struct extent_io_tree *dirty_pages, int mark)
580 int ret;
581 int err = 0;
582 int werr = 0;
583 struct page *page;
584 struct inode *btree_inode = root->fs_info->btree_inode;
585 u64 start = 0;
586 u64 end;
587 unsigned long index;
589 while (1) {
590 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
591 mark);
592 if (ret)
593 break;
594 while (start <= end) {
595 cond_resched();
597 index = start >> PAGE_CACHE_SHIFT;
598 start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
599 page = find_get_page(btree_inode->i_mapping, index);
600 if (!page)
601 continue;
603 btree_lock_page_hook(page);
604 if (!page->mapping) {
605 unlock_page(page);
606 page_cache_release(page);
607 continue;
610 if (PageWriteback(page)) {
611 if (PageDirty(page))
612 wait_on_page_writeback(page);
613 else {
614 unlock_page(page);
615 page_cache_release(page);
616 continue;
619 err = write_one_page(page, 0);
620 if (err)
621 werr = err;
622 page_cache_release(page);
625 if (err)
626 werr = err;
627 return werr;
631 * when btree blocks are allocated, they have some corresponding bits set for
632 * them in one of two extent_io trees. This is used to make sure all of
633 * those extents are on disk for transaction or log commit. We wait
634 * on all the pages and clear them from the dirty pages state tree
636 int btrfs_wait_marked_extents(struct btrfs_root *root,
637 struct extent_io_tree *dirty_pages, int mark)
639 int ret;
640 int err = 0;
641 int werr = 0;
642 struct page *page;
643 struct inode *btree_inode = root->fs_info->btree_inode;
644 u64 start = 0;
645 u64 end;
646 unsigned long index;
648 while (1) {
649 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
650 mark);
651 if (ret)
652 break;
654 clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
655 while (start <= end) {
656 index = start >> PAGE_CACHE_SHIFT;
657 start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
658 page = find_get_page(btree_inode->i_mapping, index);
659 if (!page)
660 continue;
661 if (PageDirty(page)) {
662 btree_lock_page_hook(page);
663 wait_on_page_writeback(page);
664 err = write_one_page(page, 0);
665 if (err)
666 werr = err;
668 wait_on_page_writeback(page);
669 page_cache_release(page);
670 cond_resched();
673 if (err)
674 werr = err;
675 return werr;
679 * when btree blocks are allocated, they have some corresponding bits set for
680 * them in one of two extent_io trees. This is used to make sure all of
681 * those extents are on disk for transaction or log commit
683 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
684 struct extent_io_tree *dirty_pages, int mark)
686 int ret;
687 int ret2;
689 ret = btrfs_write_marked_extents(root, dirty_pages, mark);
690 ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
691 return ret || ret2;
694 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
695 struct btrfs_root *root)
697 if (!trans || !trans->transaction) {
698 struct inode *btree_inode;
699 btree_inode = root->fs_info->btree_inode;
700 return filemap_write_and_wait(btree_inode->i_mapping);
702 return btrfs_write_and_wait_marked_extents(root,
703 &trans->transaction->dirty_pages,
704 EXTENT_DIRTY);
708 * this is used to update the root pointer in the tree of tree roots.
710 * But, in the case of the extent allocation tree, updating the root
711 * pointer may allocate blocks which may change the root of the extent
712 * allocation tree.
714 * So, this loops and repeats and makes sure the cowonly root didn't
715 * change while the root pointer was being updated in the metadata.
717 static int update_cowonly_root(struct btrfs_trans_handle *trans,
718 struct btrfs_root *root)
720 int ret;
721 u64 old_root_bytenr;
722 u64 old_root_used;
723 struct btrfs_root *tree_root = root->fs_info->tree_root;
725 old_root_used = btrfs_root_used(&root->root_item);
726 btrfs_write_dirty_block_groups(trans, root);
728 while (1) {
729 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
730 if (old_root_bytenr == root->node->start &&
731 old_root_used == btrfs_root_used(&root->root_item))
732 break;
734 btrfs_set_root_node(&root->root_item, root->node);
735 ret = btrfs_update_root(trans, tree_root,
736 &root->root_key,
737 &root->root_item);
738 BUG_ON(ret);
740 old_root_used = btrfs_root_used(&root->root_item);
741 ret = btrfs_write_dirty_block_groups(trans, root);
742 BUG_ON(ret);
745 if (root != root->fs_info->extent_root)
746 switch_commit_root(root);
748 return 0;
752 * update all the cowonly tree roots on disk
754 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
755 struct btrfs_root *root)
757 struct btrfs_fs_info *fs_info = root->fs_info;
758 struct list_head *next;
759 struct extent_buffer *eb;
760 int ret;
762 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
763 BUG_ON(ret);
765 eb = btrfs_lock_root_node(fs_info->tree_root);
766 btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb);
767 btrfs_tree_unlock(eb);
768 free_extent_buffer(eb);
770 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
771 BUG_ON(ret);
773 while (!list_empty(&fs_info->dirty_cowonly_roots)) {
774 next = fs_info->dirty_cowonly_roots.next;
775 list_del_init(next);
776 root = list_entry(next, struct btrfs_root, dirty_list);
778 update_cowonly_root(trans, root);
781 down_write(&fs_info->extent_commit_sem);
782 switch_commit_root(fs_info->extent_root);
783 up_write(&fs_info->extent_commit_sem);
785 return 0;
789 * dead roots are old snapshots that need to be deleted. This allocates
790 * a dirty root struct and adds it into the list of dead roots that need to
791 * be deleted
793 int btrfs_add_dead_root(struct btrfs_root *root)
795 spin_lock(&root->fs_info->trans_lock);
796 list_add(&root->root_list, &root->fs_info->dead_roots);
797 spin_unlock(&root->fs_info->trans_lock);
798 return 0;
802 * update all the cowonly tree roots on disk
804 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
805 struct btrfs_root *root)
807 struct btrfs_root *gang[8];
808 struct btrfs_fs_info *fs_info = root->fs_info;
809 int i;
810 int ret;
811 int err = 0;
813 spin_lock(&fs_info->fs_roots_radix_lock);
814 while (1) {
815 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
816 (void **)gang, 0,
817 ARRAY_SIZE(gang),
818 BTRFS_ROOT_TRANS_TAG);
819 if (ret == 0)
820 break;
821 for (i = 0; i < ret; i++) {
822 root = gang[i];
823 radix_tree_tag_clear(&fs_info->fs_roots_radix,
824 (unsigned long)root->root_key.objectid,
825 BTRFS_ROOT_TRANS_TAG);
826 spin_unlock(&fs_info->fs_roots_radix_lock);
828 btrfs_free_log(trans, root);
829 btrfs_update_reloc_root(trans, root);
830 btrfs_orphan_commit_root(trans, root);
832 btrfs_save_ino_cache(root, trans);
834 if (root->commit_root != root->node) {
835 mutex_lock(&root->fs_commit_mutex);
836 switch_commit_root(root);
837 btrfs_unpin_free_ino(root);
838 mutex_unlock(&root->fs_commit_mutex);
840 btrfs_set_root_node(&root->root_item,
841 root->node);
844 err = btrfs_update_root(trans, fs_info->tree_root,
845 &root->root_key,
846 &root->root_item);
847 spin_lock(&fs_info->fs_roots_radix_lock);
848 if (err)
849 break;
852 spin_unlock(&fs_info->fs_roots_radix_lock);
853 return err;
857 * defrag a given btree. If cacheonly == 1, this won't read from the disk,
858 * otherwise every leaf in the btree is read and defragged.
860 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
862 struct btrfs_fs_info *info = root->fs_info;
863 struct btrfs_trans_handle *trans;
864 int ret;
865 unsigned long nr;
867 if (xchg(&root->defrag_running, 1))
868 return 0;
870 while (1) {
871 trans = btrfs_start_transaction(root, 0);
872 if (IS_ERR(trans))
873 return PTR_ERR(trans);
875 ret = btrfs_defrag_leaves(trans, root, cacheonly);
877 nr = trans->blocks_used;
878 btrfs_end_transaction(trans, root);
879 btrfs_btree_balance_dirty(info->tree_root, nr);
880 cond_resched();
882 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
883 break;
885 root->defrag_running = 0;
886 return ret;
890 * new snapshots need to be created at a very specific time in the
891 * transaction commit. This does the actual creation
893 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
894 struct btrfs_fs_info *fs_info,
895 struct btrfs_pending_snapshot *pending)
897 struct btrfs_key key;
898 struct btrfs_root_item *new_root_item;
899 struct btrfs_root *tree_root = fs_info->tree_root;
900 struct btrfs_root *root = pending->root;
901 struct btrfs_root *parent_root;
902 struct inode *parent_inode;
903 struct dentry *parent;
904 struct dentry *dentry;
905 struct extent_buffer *tmp;
906 struct extent_buffer *old;
907 int ret;
908 u64 to_reserve = 0;
909 u64 index = 0;
910 u64 objectid;
911 u64 root_flags;
913 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
914 if (!new_root_item) {
915 pending->error = -ENOMEM;
916 goto fail;
919 ret = btrfs_find_free_objectid(tree_root, &objectid);
920 if (ret) {
921 pending->error = ret;
922 goto fail;
925 btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
926 btrfs_orphan_pre_snapshot(trans, pending, &to_reserve);
928 if (to_reserve > 0) {
929 ret = btrfs_block_rsv_add(trans, root, &pending->block_rsv,
930 to_reserve);
931 if (ret) {
932 pending->error = ret;
933 goto fail;
937 key.objectid = objectid;
938 key.offset = (u64)-1;
939 key.type = BTRFS_ROOT_ITEM_KEY;
941 trans->block_rsv = &pending->block_rsv;
943 dentry = pending->dentry;
944 parent = dget_parent(dentry);
945 parent_inode = parent->d_inode;
946 parent_root = BTRFS_I(parent_inode)->root;
947 record_root_in_trans(trans, parent_root);
950 * insert the directory item
952 ret = btrfs_set_inode_index(parent_inode, &index);
953 BUG_ON(ret);
954 ret = btrfs_insert_dir_item(trans, parent_root,
955 dentry->d_name.name, dentry->d_name.len,
956 parent_inode, &key,
957 BTRFS_FT_DIR, index);
958 BUG_ON(ret);
960 btrfs_i_size_write(parent_inode, parent_inode->i_size +
961 dentry->d_name.len * 2);
962 ret = btrfs_update_inode(trans, parent_root, parent_inode);
963 BUG_ON(ret);
966 * pull in the delayed directory update
967 * and the delayed inode item
968 * otherwise we corrupt the FS during
969 * snapshot
971 ret = btrfs_run_delayed_items(trans, root);
972 BUG_ON(ret);
974 record_root_in_trans(trans, root);
975 btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
976 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
977 btrfs_check_and_init_root_item(new_root_item);
979 root_flags = btrfs_root_flags(new_root_item);
980 if (pending->readonly)
981 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
982 else
983 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
984 btrfs_set_root_flags(new_root_item, root_flags);
986 old = btrfs_lock_root_node(root);
987 btrfs_cow_block(trans, root, old, NULL, 0, &old);
988 btrfs_set_lock_blocking(old);
990 btrfs_copy_root(trans, root, old, &tmp, objectid);
991 btrfs_tree_unlock(old);
992 free_extent_buffer(old);
994 btrfs_set_root_node(new_root_item, tmp);
995 /* record when the snapshot was created in key.offset */
996 key.offset = trans->transid;
997 ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
998 btrfs_tree_unlock(tmp);
999 free_extent_buffer(tmp);
1000 BUG_ON(ret);
1003 * insert root back/forward references
1005 ret = btrfs_add_root_ref(trans, tree_root, objectid,
1006 parent_root->root_key.objectid,
1007 btrfs_ino(parent_inode), index,
1008 dentry->d_name.name, dentry->d_name.len);
1009 BUG_ON(ret);
1010 dput(parent);
1012 key.offset = (u64)-1;
1013 pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
1014 BUG_ON(IS_ERR(pending->snap));
1016 btrfs_reloc_post_snapshot(trans, pending);
1017 btrfs_orphan_post_snapshot(trans, pending);
1018 fail:
1019 kfree(new_root_item);
1020 btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
1021 return 0;
1025 * create all the snapshots we've scheduled for creation
1027 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
1028 struct btrfs_fs_info *fs_info)
1030 struct btrfs_pending_snapshot *pending;
1031 struct list_head *head = &trans->transaction->pending_snapshots;
1032 int ret;
1034 list_for_each_entry(pending, head, list) {
1035 ret = create_pending_snapshot(trans, fs_info, pending);
1036 BUG_ON(ret);
1038 return 0;
1041 static void update_super_roots(struct btrfs_root *root)
1043 struct btrfs_root_item *root_item;
1044 struct btrfs_super_block *super;
1046 super = &root->fs_info->super_copy;
1048 root_item = &root->fs_info->chunk_root->root_item;
1049 super->chunk_root = root_item->bytenr;
1050 super->chunk_root_generation = root_item->generation;
1051 super->chunk_root_level = root_item->level;
1053 root_item = &root->fs_info->tree_root->root_item;
1054 super->root = root_item->bytenr;
1055 super->generation = root_item->generation;
1056 super->root_level = root_item->level;
1057 if (super->cache_generation != 0 || btrfs_test_opt(root, SPACE_CACHE))
1058 super->cache_generation = root_item->generation;
1061 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1063 int ret = 0;
1064 spin_lock(&info->trans_lock);
1065 if (info->running_transaction)
1066 ret = info->running_transaction->in_commit;
1067 spin_unlock(&info->trans_lock);
1068 return ret;
1071 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1073 int ret = 0;
1074 spin_lock(&info->trans_lock);
1075 if (info->running_transaction)
1076 ret = info->running_transaction->blocked;
1077 spin_unlock(&info->trans_lock);
1078 return ret;
1082 * wait for the current transaction commit to start and block subsequent
1083 * transaction joins
1085 static void wait_current_trans_commit_start(struct btrfs_root *root,
1086 struct btrfs_transaction *trans)
1088 DEFINE_WAIT(wait);
1090 if (trans->in_commit)
1091 return;
1093 while (1) {
1094 prepare_to_wait(&root->fs_info->transaction_blocked_wait, &wait,
1095 TASK_UNINTERRUPTIBLE);
1096 if (trans->in_commit) {
1097 finish_wait(&root->fs_info->transaction_blocked_wait,
1098 &wait);
1099 break;
1101 schedule();
1102 finish_wait(&root->fs_info->transaction_blocked_wait, &wait);
1107 * wait for the current transaction to start and then become unblocked.
1108 * caller holds ref.
1110 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
1111 struct btrfs_transaction *trans)
1113 DEFINE_WAIT(wait);
1115 if (trans->commit_done || (trans->in_commit && !trans->blocked))
1116 return;
1118 while (1) {
1119 prepare_to_wait(&root->fs_info->transaction_wait, &wait,
1120 TASK_UNINTERRUPTIBLE);
1121 if (trans->commit_done ||
1122 (trans->in_commit && !trans->blocked)) {
1123 finish_wait(&root->fs_info->transaction_wait,
1124 &wait);
1125 break;
1127 schedule();
1128 finish_wait(&root->fs_info->transaction_wait,
1129 &wait);
1134 * commit transactions asynchronously. once btrfs_commit_transaction_async
1135 * returns, any subsequent transaction will not be allowed to join.
1137 struct btrfs_async_commit {
1138 struct btrfs_trans_handle *newtrans;
1139 struct btrfs_root *root;
1140 struct delayed_work work;
1143 static void do_async_commit(struct work_struct *work)
1145 struct btrfs_async_commit *ac =
1146 container_of(work, struct btrfs_async_commit, work.work);
1148 btrfs_commit_transaction(ac->newtrans, ac->root);
1149 kfree(ac);
1152 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1153 struct btrfs_root *root,
1154 int wait_for_unblock)
1156 struct btrfs_async_commit *ac;
1157 struct btrfs_transaction *cur_trans;
1159 ac = kmalloc(sizeof(*ac), GFP_NOFS);
1160 if (!ac)
1161 return -ENOMEM;
1163 INIT_DELAYED_WORK(&ac->work, do_async_commit);
1164 ac->root = root;
1165 ac->newtrans = btrfs_join_transaction(root);
1166 if (IS_ERR(ac->newtrans)) {
1167 int err = PTR_ERR(ac->newtrans);
1168 kfree(ac);
1169 return err;
1172 /* take transaction reference */
1173 cur_trans = trans->transaction;
1174 atomic_inc(&cur_trans->use_count);
1176 btrfs_end_transaction(trans, root);
1177 schedule_delayed_work(&ac->work, 0);
1179 /* wait for transaction to start and unblock */
1180 if (wait_for_unblock)
1181 wait_current_trans_commit_start_and_unblock(root, cur_trans);
1182 else
1183 wait_current_trans_commit_start(root, cur_trans);
1185 if (current->journal_info == trans)
1186 current->journal_info = NULL;
1188 put_transaction(cur_trans);
1189 return 0;
1193 * btrfs_transaction state sequence:
1194 * in_commit = 0, blocked = 0 (initial)
1195 * in_commit = 1, blocked = 1
1196 * blocked = 0
1197 * commit_done = 1
1199 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1200 struct btrfs_root *root)
1202 unsigned long joined = 0;
1203 struct btrfs_transaction *cur_trans;
1204 struct btrfs_transaction *prev_trans = NULL;
1205 DEFINE_WAIT(wait);
1206 int ret;
1207 int should_grow = 0;
1208 unsigned long now = get_seconds();
1209 int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
1211 btrfs_run_ordered_operations(root, 0);
1213 /* make a pass through all the delayed refs we have so far
1214 * any runnings procs may add more while we are here
1216 ret = btrfs_run_delayed_refs(trans, root, 0);
1217 BUG_ON(ret);
1219 btrfs_trans_release_metadata(trans, root);
1221 cur_trans = trans->transaction;
1223 * set the flushing flag so procs in this transaction have to
1224 * start sending their work down.
1226 cur_trans->delayed_refs.flushing = 1;
1228 ret = btrfs_run_delayed_refs(trans, root, 0);
1229 BUG_ON(ret);
1231 spin_lock(&cur_trans->commit_lock);
1232 if (cur_trans->in_commit) {
1233 spin_unlock(&cur_trans->commit_lock);
1234 atomic_inc(&cur_trans->use_count);
1235 btrfs_end_transaction(trans, root);
1237 ret = wait_for_commit(root, cur_trans);
1238 BUG_ON(ret);
1240 put_transaction(cur_trans);
1242 return 0;
1245 trans->transaction->in_commit = 1;
1246 trans->transaction->blocked = 1;
1247 spin_unlock(&cur_trans->commit_lock);
1248 wake_up(&root->fs_info->transaction_blocked_wait);
1250 spin_lock(&root->fs_info->trans_lock);
1251 if (cur_trans->list.prev != &root->fs_info->trans_list) {
1252 prev_trans = list_entry(cur_trans->list.prev,
1253 struct btrfs_transaction, list);
1254 if (!prev_trans->commit_done) {
1255 atomic_inc(&prev_trans->use_count);
1256 spin_unlock(&root->fs_info->trans_lock);
1258 wait_for_commit(root, prev_trans);
1260 put_transaction(prev_trans);
1261 } else {
1262 spin_unlock(&root->fs_info->trans_lock);
1264 } else {
1265 spin_unlock(&root->fs_info->trans_lock);
1268 if (now < cur_trans->start_time || now - cur_trans->start_time < 1)
1269 should_grow = 1;
1271 do {
1272 int snap_pending = 0;
1274 joined = cur_trans->num_joined;
1275 if (!list_empty(&trans->transaction->pending_snapshots))
1276 snap_pending = 1;
1278 WARN_ON(cur_trans != trans->transaction);
1280 if (flush_on_commit || snap_pending) {
1281 btrfs_start_delalloc_inodes(root, 1);
1282 ret = btrfs_wait_ordered_extents(root, 0, 1);
1283 BUG_ON(ret);
1286 ret = btrfs_run_delayed_items(trans, root);
1287 BUG_ON(ret);
1290 * rename don't use btrfs_join_transaction, so, once we
1291 * set the transaction to blocked above, we aren't going
1292 * to get any new ordered operations. We can safely run
1293 * it here and no for sure that nothing new will be added
1294 * to the list
1296 btrfs_run_ordered_operations(root, 1);
1298 prepare_to_wait(&cur_trans->writer_wait, &wait,
1299 TASK_UNINTERRUPTIBLE);
1301 if (atomic_read(&cur_trans->num_writers) > 1)
1302 schedule_timeout(MAX_SCHEDULE_TIMEOUT);
1303 else if (should_grow)
1304 schedule_timeout(1);
1306 finish_wait(&cur_trans->writer_wait, &wait);
1307 } while (atomic_read(&cur_trans->num_writers) > 1 ||
1308 (should_grow && cur_trans->num_joined != joined));
1311 * Ok now we need to make sure to block out any other joins while we
1312 * commit the transaction. We could have started a join before setting
1313 * no_join so make sure to wait for num_writers to == 1 again.
1315 spin_lock(&root->fs_info->trans_lock);
1316 root->fs_info->trans_no_join = 1;
1317 spin_unlock(&root->fs_info->trans_lock);
1318 wait_event(cur_trans->writer_wait,
1319 atomic_read(&cur_trans->num_writers) == 1);
1322 * the reloc mutex makes sure that we stop
1323 * the balancing code from coming in and moving
1324 * extents around in the middle of the commit
1326 mutex_lock(&root->fs_info->reloc_mutex);
1328 ret = btrfs_run_delayed_items(trans, root);
1329 BUG_ON(ret);
1331 ret = create_pending_snapshots(trans, root->fs_info);
1332 BUG_ON(ret);
1334 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1335 BUG_ON(ret);
1338 * make sure none of the code above managed to slip in a
1339 * delayed item
1341 btrfs_assert_delayed_root_empty(root);
1343 WARN_ON(cur_trans != trans->transaction);
1345 btrfs_scrub_pause(root);
1346 /* btrfs_commit_tree_roots is responsible for getting the
1347 * various roots consistent with each other. Every pointer
1348 * in the tree of tree roots has to point to the most up to date
1349 * root for every subvolume and other tree. So, we have to keep
1350 * the tree logging code from jumping in and changing any
1351 * of the trees.
1353 * At this point in the commit, there can't be any tree-log
1354 * writers, but a little lower down we drop the trans mutex
1355 * and let new people in. By holding the tree_log_mutex
1356 * from now until after the super is written, we avoid races
1357 * with the tree-log code.
1359 mutex_lock(&root->fs_info->tree_log_mutex);
1361 ret = commit_fs_roots(trans, root);
1362 BUG_ON(ret);
1364 /* commit_fs_roots gets rid of all the tree log roots, it is now
1365 * safe to free the root of tree log roots
1367 btrfs_free_log_root_tree(trans, root->fs_info);
1369 ret = commit_cowonly_roots(trans, root);
1370 BUG_ON(ret);
1372 btrfs_prepare_extent_commit(trans, root);
1374 cur_trans = root->fs_info->running_transaction;
1376 btrfs_set_root_node(&root->fs_info->tree_root->root_item,
1377 root->fs_info->tree_root->node);
1378 switch_commit_root(root->fs_info->tree_root);
1380 btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
1381 root->fs_info->chunk_root->node);
1382 switch_commit_root(root->fs_info->chunk_root);
1384 update_super_roots(root);
1386 if (!root->fs_info->log_root_recovering) {
1387 btrfs_set_super_log_root(&root->fs_info->super_copy, 0);
1388 btrfs_set_super_log_root_level(&root->fs_info->super_copy, 0);
1391 memcpy(&root->fs_info->super_for_commit, &root->fs_info->super_copy,
1392 sizeof(root->fs_info->super_copy));
1394 trans->transaction->blocked = 0;
1395 spin_lock(&root->fs_info->trans_lock);
1396 root->fs_info->running_transaction = NULL;
1397 root->fs_info->trans_no_join = 0;
1398 spin_unlock(&root->fs_info->trans_lock);
1399 mutex_unlock(&root->fs_info->reloc_mutex);
1401 wake_up(&root->fs_info->transaction_wait);
1403 ret = btrfs_write_and_wait_transaction(trans, root);
1404 BUG_ON(ret);
1405 write_ctree_super(trans, root, 0);
1408 * the super is written, we can safely allow the tree-loggers
1409 * to go about their business
1411 mutex_unlock(&root->fs_info->tree_log_mutex);
1413 btrfs_finish_extent_commit(trans, root);
1415 cur_trans->commit_done = 1;
1417 root->fs_info->last_trans_committed = cur_trans->transid;
1419 wake_up(&cur_trans->commit_wait);
1421 spin_lock(&root->fs_info->trans_lock);
1422 list_del_init(&cur_trans->list);
1423 spin_unlock(&root->fs_info->trans_lock);
1425 put_transaction(cur_trans);
1426 put_transaction(cur_trans);
1428 trace_btrfs_transaction_commit(root);
1430 btrfs_scrub_continue(root);
1432 if (current->journal_info == trans)
1433 current->journal_info = NULL;
1435 kmem_cache_free(btrfs_trans_handle_cachep, trans);
1437 if (current != root->fs_info->transaction_kthread)
1438 btrfs_run_delayed_iputs(root);
1440 return ret;
1444 * interface function to delete all the snapshots we have scheduled for deletion
1446 int btrfs_clean_old_snapshots(struct btrfs_root *root)
1448 LIST_HEAD(list);
1449 struct btrfs_fs_info *fs_info = root->fs_info;
1451 spin_lock(&fs_info->trans_lock);
1452 list_splice_init(&fs_info->dead_roots, &list);
1453 spin_unlock(&fs_info->trans_lock);
1455 while (!list_empty(&list)) {
1456 root = list_entry(list.next, struct btrfs_root, root_list);
1457 list_del(&root->root_list);
1459 btrfs_kill_all_delayed_nodes(root);
1461 if (btrfs_header_backref_rev(root->node) <
1462 BTRFS_MIXED_BACKREF_REV)
1463 btrfs_drop_snapshot(root, NULL, 0);
1464 else
1465 btrfs_drop_snapshot(root, NULL, 1);
1467 return 0;