2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/sched.h>
21 #include <linux/writeback.h>
22 #include <linux/pagemap.h>
25 #include "transaction.h"
27 #include "ref-cache.h"
29 static int total_trans
= 0;
30 extern struct kmem_cache
*btrfs_trans_handle_cachep
;
31 extern struct kmem_cache
*btrfs_transaction_cachep
;
33 #define BTRFS_ROOT_TRANS_TAG 0
35 static noinline
void put_transaction(struct btrfs_transaction
*transaction
)
37 WARN_ON(transaction
->use_count
== 0);
38 transaction
->use_count
--;
39 if (transaction
->use_count
== 0) {
40 WARN_ON(total_trans
== 0);
42 list_del_init(&transaction
->list
);
43 memset(transaction
, 0, sizeof(*transaction
));
44 kmem_cache_free(btrfs_transaction_cachep
, transaction
);
48 static noinline
int join_transaction(struct btrfs_root
*root
)
50 struct btrfs_transaction
*cur_trans
;
51 cur_trans
= root
->fs_info
->running_transaction
;
53 cur_trans
= kmem_cache_alloc(btrfs_transaction_cachep
,
57 root
->fs_info
->generation
++;
58 root
->fs_info
->last_alloc
= 0;
59 root
->fs_info
->last_data_alloc
= 0;
60 cur_trans
->num_writers
= 1;
61 cur_trans
->num_joined
= 0;
62 cur_trans
->transid
= root
->fs_info
->generation
;
63 init_waitqueue_head(&cur_trans
->writer_wait
);
64 init_waitqueue_head(&cur_trans
->commit_wait
);
65 cur_trans
->in_commit
= 0;
66 cur_trans
->blocked
= 0;
67 cur_trans
->use_count
= 1;
68 cur_trans
->commit_done
= 0;
69 cur_trans
->start_time
= get_seconds();
70 INIT_LIST_HEAD(&cur_trans
->pending_snapshots
);
71 list_add_tail(&cur_trans
->list
, &root
->fs_info
->trans_list
);
72 extent_io_tree_init(&cur_trans
->dirty_pages
,
73 root
->fs_info
->btree_inode
->i_mapping
,
75 spin_lock(&root
->fs_info
->new_trans_lock
);
76 root
->fs_info
->running_transaction
= cur_trans
;
77 spin_unlock(&root
->fs_info
->new_trans_lock
);
79 cur_trans
->num_writers
++;
80 cur_trans
->num_joined
++;
86 static noinline
int record_root_in_trans(struct btrfs_root
*root
)
88 struct btrfs_dirty_root
*dirty
;
89 u64 running_trans_id
= root
->fs_info
->running_transaction
->transid
;
90 if (root
->ref_cows
&& root
->last_trans
< running_trans_id
) {
91 WARN_ON(root
== root
->fs_info
->extent_root
);
92 if (root
->root_item
.refs
!= 0) {
93 radix_tree_tag_set(&root
->fs_info
->fs_roots_radix
,
94 (unsigned long)root
->root_key
.objectid
,
95 BTRFS_ROOT_TRANS_TAG
);
97 dirty
= kmalloc(sizeof(*dirty
), GFP_NOFS
);
99 dirty
->root
= kmalloc(sizeof(*dirty
->root
), GFP_NOFS
);
100 BUG_ON(!dirty
->root
);
101 dirty
->latest_root
= root
;
102 INIT_LIST_HEAD(&dirty
->list
);
104 root
->commit_root
= btrfs_root_node(root
);
106 memcpy(dirty
->root
, root
, sizeof(*root
));
107 spin_lock_init(&dirty
->root
->node_lock
);
108 spin_lock_init(&dirty
->root
->list_lock
);
109 mutex_init(&dirty
->root
->objectid_mutex
);
110 INIT_LIST_HEAD(&dirty
->root
->dead_list
);
111 dirty
->root
->node
= root
->commit_root
;
112 dirty
->root
->commit_root
= NULL
;
114 spin_lock(&root
->list_lock
);
115 list_add(&dirty
->root
->dead_list
, &root
->dead_list
);
116 spin_unlock(&root
->list_lock
);
118 root
->dirty_root
= dirty
;
122 root
->last_trans
= running_trans_id
;
127 static void wait_current_trans(struct btrfs_root
*root
)
129 struct btrfs_transaction
*cur_trans
;
131 cur_trans
= root
->fs_info
->running_transaction
;
132 if (cur_trans
&& cur_trans
->blocked
) {
134 cur_trans
->use_count
++;
136 prepare_to_wait(&root
->fs_info
->transaction_wait
, &wait
,
137 TASK_UNINTERRUPTIBLE
);
138 if (cur_trans
->blocked
) {
139 mutex_unlock(&root
->fs_info
->trans_mutex
);
141 mutex_lock(&root
->fs_info
->trans_mutex
);
142 finish_wait(&root
->fs_info
->transaction_wait
,
145 finish_wait(&root
->fs_info
->transaction_wait
,
150 put_transaction(cur_trans
);
154 struct btrfs_trans_handle
*start_transaction(struct btrfs_root
*root
,
155 int num_blocks
, int wait
)
157 struct btrfs_trans_handle
*h
=
158 kmem_cache_alloc(btrfs_trans_handle_cachep
, GFP_NOFS
);
161 mutex_lock(&root
->fs_info
->trans_mutex
);
162 if ((wait
== 1 && !root
->fs_info
->open_ioctl_trans
) || wait
== 2)
163 wait_current_trans(root
);
164 ret
= join_transaction(root
);
167 record_root_in_trans(root
);
168 h
->transid
= root
->fs_info
->running_transaction
->transid
;
169 h
->transaction
= root
->fs_info
->running_transaction
;
170 h
->blocks_reserved
= num_blocks
;
172 h
->block_group
= NULL
;
173 h
->alloc_exclude_nr
= 0;
174 h
->alloc_exclude_start
= 0;
175 root
->fs_info
->running_transaction
->use_count
++;
176 mutex_unlock(&root
->fs_info
->trans_mutex
);
180 struct btrfs_trans_handle
*btrfs_start_transaction(struct btrfs_root
*root
,
183 return start_transaction(root
, num_blocks
, 1);
185 struct btrfs_trans_handle
*btrfs_join_transaction(struct btrfs_root
*root
,
188 return start_transaction(root
, num_blocks
, 0);
191 struct btrfs_trans_handle
*btrfs_start_ioctl_transaction(struct btrfs_root
*r
,
194 return start_transaction(r
, num_blocks
, 2);
198 static noinline
int wait_for_commit(struct btrfs_root
*root
,
199 struct btrfs_transaction
*commit
)
202 mutex_lock(&root
->fs_info
->trans_mutex
);
203 while(!commit
->commit_done
) {
204 prepare_to_wait(&commit
->commit_wait
, &wait
,
205 TASK_UNINTERRUPTIBLE
);
206 if (commit
->commit_done
)
208 mutex_unlock(&root
->fs_info
->trans_mutex
);
210 mutex_lock(&root
->fs_info
->trans_mutex
);
212 mutex_unlock(&root
->fs_info
->trans_mutex
);
213 finish_wait(&commit
->commit_wait
, &wait
);
217 static void throttle_on_drops(struct btrfs_root
*root
)
219 struct btrfs_fs_info
*info
= root
->fs_info
;
220 int harder_count
= 0;
223 if (atomic_read(&info
->throttles
)) {
226 thr
= atomic_read(&info
->throttle_gen
);
229 prepare_to_wait(&info
->transaction_throttle
,
230 &wait
, TASK_UNINTERRUPTIBLE
);
231 if (!atomic_read(&info
->throttles
)) {
232 finish_wait(&info
->transaction_throttle
, &wait
);
236 finish_wait(&info
->transaction_throttle
, &wait
);
237 } while (thr
== atomic_read(&info
->throttle_gen
));
240 if (root
->fs_info
->total_ref_cache_size
> 1 * 1024 * 1024 &&
244 if (root
->fs_info
->total_ref_cache_size
> 5 * 1024 * 1024 &&
248 if (root
->fs_info
->total_ref_cache_size
> 10 * 1024 * 1024 &&
254 void btrfs_throttle(struct btrfs_root
*root
)
256 mutex_lock(&root
->fs_info
->trans_mutex
);
257 if (!root
->fs_info
->open_ioctl_trans
)
258 wait_current_trans(root
);
259 mutex_unlock(&root
->fs_info
->trans_mutex
);
261 throttle_on_drops(root
);
264 static int __btrfs_end_transaction(struct btrfs_trans_handle
*trans
,
265 struct btrfs_root
*root
, int throttle
)
267 struct btrfs_transaction
*cur_trans
;
268 struct btrfs_fs_info
*info
= root
->fs_info
;
270 mutex_lock(&info
->trans_mutex
);
271 cur_trans
= info
->running_transaction
;
272 WARN_ON(cur_trans
!= trans
->transaction
);
273 WARN_ON(cur_trans
->num_writers
< 1);
274 cur_trans
->num_writers
--;
276 if (waitqueue_active(&cur_trans
->writer_wait
))
277 wake_up(&cur_trans
->writer_wait
);
278 put_transaction(cur_trans
);
279 mutex_unlock(&info
->trans_mutex
);
280 memset(trans
, 0, sizeof(*trans
));
281 kmem_cache_free(btrfs_trans_handle_cachep
, trans
);
284 throttle_on_drops(root
);
289 int btrfs_end_transaction(struct btrfs_trans_handle
*trans
,
290 struct btrfs_root
*root
)
292 return __btrfs_end_transaction(trans
, root
, 0);
295 int btrfs_end_transaction_throttle(struct btrfs_trans_handle
*trans
,
296 struct btrfs_root
*root
)
298 return __btrfs_end_transaction(trans
, root
, 1);
302 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle
*trans
,
303 struct btrfs_root
*root
)
308 struct extent_io_tree
*dirty_pages
;
310 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
315 if (!trans
|| !trans
->transaction
) {
316 return filemap_write_and_wait(btree_inode
->i_mapping
);
318 dirty_pages
= &trans
->transaction
->dirty_pages
;
320 ret
= find_first_extent_bit(dirty_pages
, 0, &start
, &end
,
324 clear_extent_dirty(dirty_pages
, start
, end
, GFP_NOFS
);
325 while(start
<= end
) {
326 index
= start
>> PAGE_CACHE_SHIFT
;
327 start
= (u64
)(index
+ 1) << PAGE_CACHE_SHIFT
;
328 page
= find_lock_page(btree_inode
->i_mapping
, index
);
331 if (PageWriteback(page
)) {
333 wait_on_page_writeback(page
);
336 page_cache_release(page
);
340 err
= write_one_page(page
, 0);
343 page_cache_release(page
);
346 err
= filemap_fdatawait(btree_inode
->i_mapping
);
352 static int update_cowonly_root(struct btrfs_trans_handle
*trans
,
353 struct btrfs_root
*root
)
357 struct btrfs_root
*tree_root
= root
->fs_info
->tree_root
;
359 btrfs_write_dirty_block_groups(trans
, root
);
361 old_root_bytenr
= btrfs_root_bytenr(&root
->root_item
);
362 if (old_root_bytenr
== root
->node
->start
)
364 btrfs_set_root_bytenr(&root
->root_item
,
366 btrfs_set_root_level(&root
->root_item
,
367 btrfs_header_level(root
->node
));
368 ret
= btrfs_update_root(trans
, tree_root
,
372 btrfs_write_dirty_block_groups(trans
, root
);
377 int btrfs_commit_tree_roots(struct btrfs_trans_handle
*trans
,
378 struct btrfs_root
*root
)
380 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
381 struct list_head
*next
;
383 while(!list_empty(&fs_info
->dirty_cowonly_roots
)) {
384 next
= fs_info
->dirty_cowonly_roots
.next
;
386 root
= list_entry(next
, struct btrfs_root
, dirty_list
);
387 update_cowonly_root(trans
, root
);
392 int btrfs_add_dead_root(struct btrfs_root
*root
, struct btrfs_root
*latest
)
394 struct btrfs_dirty_root
*dirty
;
396 dirty
= kmalloc(sizeof(*dirty
), GFP_NOFS
);
400 dirty
->latest_root
= latest
;
402 mutex_lock(&root
->fs_info
->trans_mutex
);
403 list_add(&dirty
->list
, &latest
->fs_info
->dead_roots
);
404 mutex_unlock(&root
->fs_info
->trans_mutex
);
408 static noinline
int add_dirty_roots(struct btrfs_trans_handle
*trans
,
409 struct radix_tree_root
*radix
,
410 struct list_head
*list
)
412 struct btrfs_dirty_root
*dirty
;
413 struct btrfs_root
*gang
[8];
414 struct btrfs_root
*root
;
421 ret
= radix_tree_gang_lookup_tag(radix
, (void **)gang
, 0,
423 BTRFS_ROOT_TRANS_TAG
);
426 for (i
= 0; i
< ret
; i
++) {
428 radix_tree_tag_clear(radix
,
429 (unsigned long)root
->root_key
.objectid
,
430 BTRFS_ROOT_TRANS_TAG
);
432 BUG_ON(!root
->ref_tree
);
433 dirty
= root
->dirty_root
;
435 if (root
->commit_root
== root
->node
) {
436 WARN_ON(root
->node
->start
!=
437 btrfs_root_bytenr(&root
->root_item
));
439 free_extent_buffer(root
->commit_root
);
440 root
->commit_root
= NULL
;
441 root
->dirty_root
= NULL
;
443 spin_lock(&root
->list_lock
);
444 list_del_init(&dirty
->root
->dead_list
);
445 spin_unlock(&root
->list_lock
);
450 /* make sure to update the root on disk
451 * so we get any updates to the block used
454 err
= btrfs_update_root(trans
,
455 root
->fs_info
->tree_root
,
461 memset(&root
->root_item
.drop_progress
, 0,
462 sizeof(struct btrfs_disk_key
));
463 root
->root_item
.drop_level
= 0;
464 root
->commit_root
= NULL
;
465 root
->dirty_root
= NULL
;
466 root
->root_key
.offset
= root
->fs_info
->generation
;
467 btrfs_set_root_bytenr(&root
->root_item
,
469 btrfs_set_root_level(&root
->root_item
,
470 btrfs_header_level(root
->node
));
471 err
= btrfs_insert_root(trans
, root
->fs_info
->tree_root
,
477 refs
= btrfs_root_refs(&dirty
->root
->root_item
);
478 btrfs_set_root_refs(&dirty
->root
->root_item
, refs
- 1);
479 err
= btrfs_update_root(trans
, root
->fs_info
->tree_root
,
480 &dirty
->root
->root_key
,
481 &dirty
->root
->root_item
);
485 list_add(&dirty
->list
, list
);
488 free_extent_buffer(dirty
->root
->node
);
497 int btrfs_defrag_root(struct btrfs_root
*root
, int cacheonly
)
499 struct btrfs_fs_info
*info
= root
->fs_info
;
501 struct btrfs_trans_handle
*trans
;
505 if (root
->defrag_running
)
507 trans
= btrfs_start_transaction(root
, 1);
509 root
->defrag_running
= 1;
510 ret
= btrfs_defrag_leaves(trans
, root
, cacheonly
);
511 nr
= trans
->blocks_used
;
512 btrfs_end_transaction(trans
, root
);
513 btrfs_btree_balance_dirty(info
->tree_root
, nr
);
516 trans
= btrfs_start_transaction(root
, 1);
517 if (root
->fs_info
->closing
|| ret
!= -EAGAIN
)
520 root
->defrag_running
= 0;
522 btrfs_end_transaction(trans
, root
);
526 static noinline
int drop_dirty_roots(struct btrfs_root
*tree_root
,
527 struct list_head
*list
)
529 struct btrfs_dirty_root
*dirty
;
530 struct btrfs_trans_handle
*trans
;
538 while(!list_empty(list
)) {
539 struct btrfs_root
*root
;
541 dirty
= list_entry(list
->prev
, struct btrfs_dirty_root
, list
);
542 list_del_init(&dirty
->list
);
544 num_bytes
= btrfs_root_used(&dirty
->root
->root_item
);
545 root
= dirty
->latest_root
;
546 atomic_inc(&root
->fs_info
->throttles
);
548 mutex_lock(&root
->fs_info
->drop_mutex
);
550 trans
= btrfs_start_transaction(tree_root
, 1);
551 ret
= btrfs_drop_snapshot(trans
, dirty
->root
);
552 if (ret
!= -EAGAIN
) {
556 err
= btrfs_update_root(trans
,
558 &dirty
->root
->root_key
,
559 &dirty
->root
->root_item
);
562 nr
= trans
->blocks_used
;
563 ret
= btrfs_end_transaction(trans
, tree_root
);
566 mutex_unlock(&root
->fs_info
->drop_mutex
);
567 btrfs_btree_balance_dirty(tree_root
, nr
);
569 mutex_lock(&root
->fs_info
->drop_mutex
);
572 atomic_dec(&root
->fs_info
->throttles
);
573 wake_up(&root
->fs_info
->transaction_throttle
);
575 mutex_lock(&root
->fs_info
->alloc_mutex
);
576 num_bytes
-= btrfs_root_used(&dirty
->root
->root_item
);
577 bytes_used
= btrfs_root_used(&root
->root_item
);
579 record_root_in_trans(root
);
580 btrfs_set_root_used(&root
->root_item
,
581 bytes_used
- num_bytes
);
583 mutex_unlock(&root
->fs_info
->alloc_mutex
);
585 ret
= btrfs_del_root(trans
, tree_root
, &dirty
->root
->root_key
);
590 mutex_unlock(&root
->fs_info
->drop_mutex
);
592 spin_lock(&root
->list_lock
);
593 list_del_init(&dirty
->root
->dead_list
);
594 if (!list_empty(&root
->dead_list
)) {
595 struct btrfs_root
*oldest
;
596 oldest
= list_entry(root
->dead_list
.prev
,
597 struct btrfs_root
, dead_list
);
598 max_useless
= oldest
->root_key
.offset
- 1;
600 max_useless
= root
->root_key
.offset
- 1;
602 spin_unlock(&root
->list_lock
);
604 nr
= trans
->blocks_used
;
605 ret
= btrfs_end_transaction(trans
, tree_root
);
608 ret
= btrfs_remove_leaf_refs(root
, max_useless
);
611 free_extent_buffer(dirty
->root
->node
);
615 btrfs_btree_balance_dirty(tree_root
, nr
);
621 static noinline
int create_pending_snapshot(struct btrfs_trans_handle
*trans
,
622 struct btrfs_fs_info
*fs_info
,
623 struct btrfs_pending_snapshot
*pending
)
625 struct btrfs_key key
;
626 struct btrfs_root_item
*new_root_item
;
627 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
628 struct btrfs_root
*root
= pending
->root
;
629 struct extent_buffer
*tmp
;
630 struct extent_buffer
*old
;
635 new_root_item
= kmalloc(sizeof(*new_root_item
), GFP_NOFS
);
636 if (!new_root_item
) {
640 ret
= btrfs_find_free_objectid(trans
, tree_root
, 0, &objectid
);
644 memcpy(new_root_item
, &root
->root_item
, sizeof(*new_root_item
));
646 key
.objectid
= objectid
;
648 btrfs_set_key_type(&key
, BTRFS_ROOT_ITEM_KEY
);
650 old
= btrfs_lock_root_node(root
);
651 btrfs_cow_block(trans
, root
, old
, NULL
, 0, &old
, 0);
653 btrfs_copy_root(trans
, root
, old
, &tmp
, objectid
);
654 btrfs_tree_unlock(old
);
655 free_extent_buffer(old
);
657 btrfs_set_root_bytenr(new_root_item
, tmp
->start
);
658 btrfs_set_root_level(new_root_item
, btrfs_header_level(tmp
));
659 ret
= btrfs_insert_root(trans
, root
->fs_info
->tree_root
, &key
,
661 btrfs_tree_unlock(tmp
);
662 free_extent_buffer(tmp
);
667 * insert the directory item
669 key
.offset
= (u64
)-1;
670 namelen
= strlen(pending
->name
);
671 ret
= btrfs_insert_dir_item(trans
, root
->fs_info
->tree_root
,
672 pending
->name
, namelen
,
673 root
->fs_info
->sb
->s_root
->d_inode
->i_ino
,
674 &key
, BTRFS_FT_DIR
, 0);
679 ret
= btrfs_insert_inode_ref(trans
, root
->fs_info
->tree_root
,
680 pending
->name
, strlen(pending
->name
), objectid
,
681 root
->fs_info
->sb
->s_root
->d_inode
->i_ino
, 0);
683 /* Invalidate existing dcache entry for new snapshot. */
684 btrfs_invalidate_dcache_root(root
, pending
->name
, namelen
);
687 kfree(new_root_item
);
691 static noinline
int create_pending_snapshots(struct btrfs_trans_handle
*trans
,
692 struct btrfs_fs_info
*fs_info
)
694 struct btrfs_pending_snapshot
*pending
;
695 struct list_head
*head
= &trans
->transaction
->pending_snapshots
;
698 while(!list_empty(head
)) {
699 pending
= list_entry(head
->next
,
700 struct btrfs_pending_snapshot
, list
);
701 ret
= create_pending_snapshot(trans
, fs_info
, pending
);
703 list_del(&pending
->list
);
704 kfree(pending
->name
);
710 int btrfs_commit_transaction(struct btrfs_trans_handle
*trans
,
711 struct btrfs_root
*root
)
713 unsigned long joined
= 0;
714 unsigned long timeout
= 1;
715 struct btrfs_transaction
*cur_trans
;
716 struct btrfs_transaction
*prev_trans
= NULL
;
717 struct btrfs_root
*chunk_root
= root
->fs_info
->chunk_root
;
718 struct list_head dirty_fs_roots
;
719 struct extent_io_tree
*pinned_copy
;
723 INIT_LIST_HEAD(&dirty_fs_roots
);
725 mutex_lock(&root
->fs_info
->trans_mutex
);
726 if (trans
->transaction
->in_commit
) {
727 cur_trans
= trans
->transaction
;
728 trans
->transaction
->use_count
++;
729 mutex_unlock(&root
->fs_info
->trans_mutex
);
730 btrfs_end_transaction(trans
, root
);
732 ret
= wait_for_commit(root
, cur_trans
);
735 mutex_lock(&root
->fs_info
->trans_mutex
);
736 put_transaction(cur_trans
);
737 mutex_unlock(&root
->fs_info
->trans_mutex
);
742 pinned_copy
= kmalloc(sizeof(*pinned_copy
), GFP_NOFS
);
746 extent_io_tree_init(pinned_copy
,
747 root
->fs_info
->btree_inode
->i_mapping
, GFP_NOFS
);
749 trans
->transaction
->in_commit
= 1;
750 trans
->transaction
->blocked
= 1;
751 cur_trans
= trans
->transaction
;
752 if (cur_trans
->list
.prev
!= &root
->fs_info
->trans_list
) {
753 prev_trans
= list_entry(cur_trans
->list
.prev
,
754 struct btrfs_transaction
, list
);
755 if (!prev_trans
->commit_done
) {
756 prev_trans
->use_count
++;
757 mutex_unlock(&root
->fs_info
->trans_mutex
);
759 wait_for_commit(root
, prev_trans
);
761 mutex_lock(&root
->fs_info
->trans_mutex
);
762 put_transaction(prev_trans
);
767 int snap_pending
= 0;
768 joined
= cur_trans
->num_joined
;
769 if (!list_empty(&trans
->transaction
->pending_snapshots
))
772 WARN_ON(cur_trans
!= trans
->transaction
);
773 prepare_to_wait(&cur_trans
->writer_wait
, &wait
,
774 TASK_UNINTERRUPTIBLE
);
776 if (cur_trans
->num_writers
> 1)
777 timeout
= MAX_SCHEDULE_TIMEOUT
;
781 mutex_unlock(&root
->fs_info
->trans_mutex
);
784 ret
= btrfs_wait_ordered_extents(root
, 1);
788 schedule_timeout(timeout
);
790 mutex_lock(&root
->fs_info
->trans_mutex
);
791 finish_wait(&cur_trans
->writer_wait
, &wait
);
792 } while (cur_trans
->num_writers
> 1 ||
793 (cur_trans
->num_joined
!= joined
));
795 ret
= create_pending_snapshots(trans
, root
->fs_info
);
798 WARN_ON(cur_trans
!= trans
->transaction
);
800 ret
= add_dirty_roots(trans
, &root
->fs_info
->fs_roots_radix
,
804 ret
= btrfs_commit_tree_roots(trans
, root
);
807 cur_trans
= root
->fs_info
->running_transaction
;
808 spin_lock(&root
->fs_info
->new_trans_lock
);
809 root
->fs_info
->running_transaction
= NULL
;
810 spin_unlock(&root
->fs_info
->new_trans_lock
);
811 btrfs_set_super_generation(&root
->fs_info
->super_copy
,
813 btrfs_set_super_root(&root
->fs_info
->super_copy
,
814 root
->fs_info
->tree_root
->node
->start
);
815 btrfs_set_super_root_level(&root
->fs_info
->super_copy
,
816 btrfs_header_level(root
->fs_info
->tree_root
->node
));
818 btrfs_set_super_chunk_root(&root
->fs_info
->super_copy
,
819 chunk_root
->node
->start
);
820 btrfs_set_super_chunk_root_level(&root
->fs_info
->super_copy
,
821 btrfs_header_level(chunk_root
->node
));
822 memcpy(&root
->fs_info
->super_for_commit
, &root
->fs_info
->super_copy
,
823 sizeof(root
->fs_info
->super_copy
));
825 btrfs_copy_pinned(root
, pinned_copy
);
827 trans
->transaction
->blocked
= 0;
828 wake_up(&root
->fs_info
->transaction_throttle
);
829 wake_up(&root
->fs_info
->transaction_wait
);
831 mutex_unlock(&root
->fs_info
->trans_mutex
);
832 ret
= btrfs_write_and_wait_transaction(trans
, root
);
834 write_ctree_super(trans
, root
);
836 btrfs_finish_extent_commit(trans
, root
, pinned_copy
);
837 mutex_lock(&root
->fs_info
->trans_mutex
);
841 cur_trans
->commit_done
= 1;
842 root
->fs_info
->last_trans_committed
= cur_trans
->transid
;
843 wake_up(&cur_trans
->commit_wait
);
844 put_transaction(cur_trans
);
845 put_transaction(cur_trans
);
847 list_splice_init(&dirty_fs_roots
, &root
->fs_info
->dead_roots
);
848 if (root
->fs_info
->closing
)
849 list_splice_init(&root
->fs_info
->dead_roots
, &dirty_fs_roots
);
851 mutex_unlock(&root
->fs_info
->trans_mutex
);
852 kmem_cache_free(btrfs_trans_handle_cachep
, trans
);
854 if (root
->fs_info
->closing
) {
855 drop_dirty_roots(root
->fs_info
->tree_root
, &dirty_fs_roots
);
860 int btrfs_clean_old_snapshots(struct btrfs_root
*root
)
862 struct list_head dirty_roots
;
863 INIT_LIST_HEAD(&dirty_roots
);
865 mutex_lock(&root
->fs_info
->trans_mutex
);
866 list_splice_init(&root
->fs_info
->dead_roots
, &dirty_roots
);
867 mutex_unlock(&root
->fs_info
->trans_mutex
);
869 if (!list_empty(&dirty_roots
)) {
870 drop_dirty_roots(root
, &dirty_roots
);