2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/blkdev.h>
21 #include <linux/scatterlist.h>
22 #include <linux/swap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/writeback.h>
25 #include <linux/buffer_head.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/freezer.h>
29 #include <linux/crc32c.h>
30 #include <linux/slab.h>
31 #include <linux/migrate.h>
32 #include <linux/ratelimit.h>
33 #include <asm/unaligned.h>
37 #include "transaction.h"
38 #include "btrfs_inode.h"
40 #include "print-tree.h"
41 #include "async-thread.h"
44 #include "free-space-cache.h"
45 #include "inode-map.h"
47 static struct extent_io_ops btree_extent_io_ops
;
48 static void end_workqueue_fn(struct btrfs_work
*work
);
49 static void free_fs_root(struct btrfs_root
*root
);
50 static void btrfs_check_super_valid(struct btrfs_fs_info
*fs_info
,
52 static int btrfs_destroy_ordered_operations(struct btrfs_root
*root
);
53 static int btrfs_destroy_ordered_extents(struct btrfs_root
*root
);
54 static int btrfs_destroy_delayed_refs(struct btrfs_transaction
*trans
,
55 struct btrfs_root
*root
);
56 static int btrfs_destroy_pending_snapshots(struct btrfs_transaction
*t
);
57 static int btrfs_destroy_delalloc_inodes(struct btrfs_root
*root
);
58 static int btrfs_destroy_marked_extents(struct btrfs_root
*root
,
59 struct extent_io_tree
*dirty_pages
,
61 static int btrfs_destroy_pinned_extent(struct btrfs_root
*root
,
62 struct extent_io_tree
*pinned_extents
);
63 static int btrfs_cleanup_transaction(struct btrfs_root
*root
);
66 * end_io_wq structs are used to do processing in task context when an IO is
67 * complete. This is used during reads to verify checksums, and it is used
68 * by writes to insert metadata for new file extents after IO is complete.
74 struct btrfs_fs_info
*info
;
77 struct list_head list
;
78 struct btrfs_work work
;
82 * async submit bios are used to offload expensive checksumming
83 * onto the worker threads. They checksum file and metadata bios
84 * just before they are sent down the IO stack.
86 struct async_submit_bio
{
89 struct list_head list
;
90 extent_submit_bio_hook_t
*submit_bio_start
;
91 extent_submit_bio_hook_t
*submit_bio_done
;
94 unsigned long bio_flags
;
96 * bio_offset is optional, can be used if the pages in the bio
97 * can't tell us where in the file the bio should go
100 struct btrfs_work work
;
104 * Lockdep class keys for extent_buffer->lock's in this root. For a given
105 * eb, the lockdep key is determined by the btrfs_root it belongs to and
106 * the level the eb occupies in the tree.
108 * Different roots are used for different purposes and may nest inside each
109 * other and they require separate keysets. As lockdep keys should be
110 * static, assign keysets according to the purpose of the root as indicated
111 * by btrfs_root->objectid. This ensures that all special purpose roots
112 * have separate keysets.
114 * Lock-nesting across peer nodes is always done with the immediate parent
115 * node locked thus preventing deadlock. As lockdep doesn't know this, use
116 * subclass to avoid triggering lockdep warning in such cases.
118 * The key is set by the readpage_end_io_hook after the buffer has passed
119 * csum validation but before the pages are unlocked. It is also set by
120 * btrfs_init_new_buffer on freshly allocated blocks.
122 * We also add a check to make sure the highest level of the tree is the
123 * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code
124 * needs update as well.
126 #ifdef CONFIG_DEBUG_LOCK_ALLOC
127 # if BTRFS_MAX_LEVEL != 8
131 static struct btrfs_lockdep_keyset
{
132 u64 id
; /* root objectid */
133 const char *name_stem
; /* lock name stem */
134 char names
[BTRFS_MAX_LEVEL
+ 1][20];
135 struct lock_class_key keys
[BTRFS_MAX_LEVEL
+ 1];
136 } btrfs_lockdep_keysets
[] = {
137 { .id
= BTRFS_ROOT_TREE_OBJECTID
, .name_stem
= "root" },
138 { .id
= BTRFS_EXTENT_TREE_OBJECTID
, .name_stem
= "extent" },
139 { .id
= BTRFS_CHUNK_TREE_OBJECTID
, .name_stem
= "chunk" },
140 { .id
= BTRFS_DEV_TREE_OBJECTID
, .name_stem
= "dev" },
141 { .id
= BTRFS_FS_TREE_OBJECTID
, .name_stem
= "fs" },
142 { .id
= BTRFS_CSUM_TREE_OBJECTID
, .name_stem
= "csum" },
143 { .id
= BTRFS_ORPHAN_OBJECTID
, .name_stem
= "orphan" },
144 { .id
= BTRFS_TREE_LOG_OBJECTID
, .name_stem
= "log" },
145 { .id
= BTRFS_TREE_RELOC_OBJECTID
, .name_stem
= "treloc" },
146 { .id
= BTRFS_DATA_RELOC_TREE_OBJECTID
, .name_stem
= "dreloc" },
147 { .id
= 0, .name_stem
= "tree" },
150 void __init
btrfs_init_lockdep(void)
154 /* initialize lockdep class names */
155 for (i
= 0; i
< ARRAY_SIZE(btrfs_lockdep_keysets
); i
++) {
156 struct btrfs_lockdep_keyset
*ks
= &btrfs_lockdep_keysets
[i
];
158 for (j
= 0; j
< ARRAY_SIZE(ks
->names
); j
++)
159 snprintf(ks
->names
[j
], sizeof(ks
->names
[j
]),
160 "btrfs-%s-%02d", ks
->name_stem
, j
);
164 void btrfs_set_buffer_lockdep_class(u64 objectid
, struct extent_buffer
*eb
,
167 struct btrfs_lockdep_keyset
*ks
;
169 BUG_ON(level
>= ARRAY_SIZE(ks
->keys
));
171 /* find the matching keyset, id 0 is the default entry */
172 for (ks
= btrfs_lockdep_keysets
; ks
->id
; ks
++)
173 if (ks
->id
== objectid
)
176 lockdep_set_class_and_name(&eb
->lock
,
177 &ks
->keys
[level
], ks
->names
[level
]);
183 * extents on the btree inode are pretty simple, there's one extent
184 * that covers the entire device
186 static struct extent_map
*btree_get_extent(struct inode
*inode
,
187 struct page
*page
, size_t pg_offset
, u64 start
, u64 len
,
190 struct extent_map_tree
*em_tree
= &BTRFS_I(inode
)->extent_tree
;
191 struct extent_map
*em
;
194 read_lock(&em_tree
->lock
);
195 em
= lookup_extent_mapping(em_tree
, start
, len
);
198 BTRFS_I(inode
)->root
->fs_info
->fs_devices
->latest_bdev
;
199 read_unlock(&em_tree
->lock
);
202 read_unlock(&em_tree
->lock
);
204 em
= alloc_extent_map();
206 em
= ERR_PTR(-ENOMEM
);
211 em
->block_len
= (u64
)-1;
213 em
->bdev
= BTRFS_I(inode
)->root
->fs_info
->fs_devices
->latest_bdev
;
215 write_lock(&em_tree
->lock
);
216 ret
= add_extent_mapping(em_tree
, em
);
217 if (ret
== -EEXIST
) {
218 u64 failed_start
= em
->start
;
219 u64 failed_len
= em
->len
;
222 em
= lookup_extent_mapping(em_tree
, start
, len
);
226 em
= lookup_extent_mapping(em_tree
, failed_start
,
234 write_unlock(&em_tree
->lock
);
242 u32
btrfs_csum_data(struct btrfs_root
*root
, char *data
, u32 seed
, size_t len
)
244 return crc32c(seed
, data
, len
);
247 void btrfs_csum_final(u32 crc
, char *result
)
249 put_unaligned_le32(~crc
, result
);
253 * compute the csum for a btree block, and either verify it or write it
254 * into the csum field of the block.
256 static int csum_tree_block(struct btrfs_root
*root
, struct extent_buffer
*buf
,
259 u16 csum_size
= btrfs_super_csum_size(root
->fs_info
->super_copy
);
262 unsigned long cur_len
;
263 unsigned long offset
= BTRFS_CSUM_SIZE
;
265 unsigned long map_start
;
266 unsigned long map_len
;
269 unsigned long inline_result
;
271 len
= buf
->len
- offset
;
273 err
= map_private_extent_buffer(buf
, offset
, 32,
274 &kaddr
, &map_start
, &map_len
);
277 cur_len
= min(len
, map_len
- (offset
- map_start
));
278 crc
= btrfs_csum_data(root
, kaddr
+ offset
- map_start
,
283 if (csum_size
> sizeof(inline_result
)) {
284 result
= kzalloc(csum_size
* sizeof(char), GFP_NOFS
);
288 result
= (char *)&inline_result
;
291 btrfs_csum_final(crc
, result
);
294 if (memcmp_extent_buffer(buf
, result
, 0, csum_size
)) {
297 memcpy(&found
, result
, csum_size
);
299 read_extent_buffer(buf
, &val
, 0, csum_size
);
300 printk_ratelimited(KERN_INFO
"btrfs: %s checksum verify "
301 "failed on %llu wanted %X found %X "
303 root
->fs_info
->sb
->s_id
,
304 (unsigned long long)buf
->start
, val
, found
,
305 btrfs_header_level(buf
));
306 if (result
!= (char *)&inline_result
)
311 write_extent_buffer(buf
, result
, 0, csum_size
);
313 if (result
!= (char *)&inline_result
)
319 * we can't consider a given block up to date unless the transid of the
320 * block matches the transid in the parent node's pointer. This is how we
321 * detect blocks that either didn't get written at all or got written
322 * in the wrong place.
324 static int verify_parent_transid(struct extent_io_tree
*io_tree
,
325 struct extent_buffer
*eb
, u64 parent_transid
)
327 struct extent_state
*cached_state
= NULL
;
330 if (!parent_transid
|| btrfs_header_generation(eb
) == parent_transid
)
333 lock_extent_bits(io_tree
, eb
->start
, eb
->start
+ eb
->len
- 1,
334 0, &cached_state
, GFP_NOFS
);
335 if (extent_buffer_uptodate(io_tree
, eb
, cached_state
) &&
336 btrfs_header_generation(eb
) == parent_transid
) {
340 printk_ratelimited("parent transid verify failed on %llu wanted %llu "
342 (unsigned long long)eb
->start
,
343 (unsigned long long)parent_transid
,
344 (unsigned long long)btrfs_header_generation(eb
));
346 clear_extent_buffer_uptodate(io_tree
, eb
, &cached_state
);
348 unlock_extent_cached(io_tree
, eb
->start
, eb
->start
+ eb
->len
- 1,
349 &cached_state
, GFP_NOFS
);
354 * helper to read a given tree block, doing retries as required when
355 * the checksums don't match and we have alternate mirrors to try.
357 static int btree_read_extent_buffer_pages(struct btrfs_root
*root
,
358 struct extent_buffer
*eb
,
359 u64 start
, u64 parent_transid
)
361 struct extent_io_tree
*io_tree
;
366 clear_bit(EXTENT_BUFFER_CORRUPT
, &eb
->bflags
);
367 io_tree
= &BTRFS_I(root
->fs_info
->btree_inode
)->io_tree
;
369 ret
= read_extent_buffer_pages(io_tree
, eb
, start
, 1,
370 btree_get_extent
, mirror_num
);
372 !verify_parent_transid(io_tree
, eb
, parent_transid
))
376 * This buffer's crc is fine, but its contents are corrupted, so
377 * there is no reason to read the other copies, they won't be
380 if (test_bit(EXTENT_BUFFER_CORRUPT
, &eb
->bflags
))
383 num_copies
= btrfs_num_copies(&root
->fs_info
->mapping_tree
,
389 if (mirror_num
> num_copies
)
396 * checksum a dirty tree block before IO. This has extra checks to make sure
397 * we only fill in the checksum field in the first page of a multi-page block
400 static int csum_dirty_buffer(struct btrfs_root
*root
, struct page
*page
)
402 struct extent_io_tree
*tree
;
403 u64 start
= (u64
)page
->index
<< PAGE_CACHE_SHIFT
;
406 struct extent_buffer
*eb
;
409 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
411 if (page
->private == EXTENT_PAGE_PRIVATE
) {
415 if (!page
->private) {
419 len
= page
->private >> 2;
422 eb
= alloc_extent_buffer(tree
, start
, len
, page
);
427 ret
= btree_read_extent_buffer_pages(root
, eb
, start
+ PAGE_CACHE_SIZE
,
428 btrfs_header_generation(eb
));
430 WARN_ON(!btrfs_header_flag(eb
, BTRFS_HEADER_FLAG_WRITTEN
));
432 found_start
= btrfs_header_bytenr(eb
);
433 if (found_start
!= start
) {
437 if (eb
->first_page
!= page
) {
441 if (!PageUptodate(page
)) {
445 csum_tree_block(root
, eb
, 0);
447 free_extent_buffer(eb
);
452 static int check_tree_block_fsid(struct btrfs_root
*root
,
453 struct extent_buffer
*eb
)
455 struct btrfs_fs_devices
*fs_devices
= root
->fs_info
->fs_devices
;
456 u8 fsid
[BTRFS_UUID_SIZE
];
459 read_extent_buffer(eb
, fsid
, (unsigned long)btrfs_header_fsid(eb
),
462 if (!memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
)) {
466 fs_devices
= fs_devices
->seed
;
471 #define CORRUPT(reason, eb, root, slot) \
472 printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu," \
473 "root=%llu, slot=%d\n", reason, \
474 (unsigned long long)btrfs_header_bytenr(eb), \
475 (unsigned long long)root->objectid, slot)
477 static noinline
int check_leaf(struct btrfs_root
*root
,
478 struct extent_buffer
*leaf
)
480 struct btrfs_key key
;
481 struct btrfs_key leaf_key
;
482 u32 nritems
= btrfs_header_nritems(leaf
);
488 /* Check the 0 item */
489 if (btrfs_item_offset_nr(leaf
, 0) + btrfs_item_size_nr(leaf
, 0) !=
490 BTRFS_LEAF_DATA_SIZE(root
)) {
491 CORRUPT("invalid item offset size pair", leaf
, root
, 0);
496 * Check to make sure each items keys are in the correct order and their
497 * offsets make sense. We only have to loop through nritems-1 because
498 * we check the current slot against the next slot, which verifies the
499 * next slot's offset+size makes sense and that the current's slot
502 for (slot
= 0; slot
< nritems
- 1; slot
++) {
503 btrfs_item_key_to_cpu(leaf
, &leaf_key
, slot
);
504 btrfs_item_key_to_cpu(leaf
, &key
, slot
+ 1);
506 /* Make sure the keys are in the right order */
507 if (btrfs_comp_cpu_keys(&leaf_key
, &key
) >= 0) {
508 CORRUPT("bad key order", leaf
, root
, slot
);
513 * Make sure the offset and ends are right, remember that the
514 * item data starts at the end of the leaf and grows towards the
517 if (btrfs_item_offset_nr(leaf
, slot
) !=
518 btrfs_item_end_nr(leaf
, slot
+ 1)) {
519 CORRUPT("slot offset bad", leaf
, root
, slot
);
524 * Check to make sure that we don't point outside of the leaf,
525 * just incase all the items are consistent to eachother, but
526 * all point outside of the leaf.
528 if (btrfs_item_end_nr(leaf
, slot
) >
529 BTRFS_LEAF_DATA_SIZE(root
)) {
530 CORRUPT("slot end outside of leaf", leaf
, root
, slot
);
538 static int btree_readpage_end_io_hook(struct page
*page
, u64 start
, u64 end
,
539 struct extent_state
*state
)
541 struct extent_io_tree
*tree
;
545 struct extent_buffer
*eb
;
546 struct btrfs_root
*root
= BTRFS_I(page
->mapping
->host
)->root
;
549 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
550 if (page
->private == EXTENT_PAGE_PRIVATE
)
555 len
= page
->private >> 2;
558 eb
= alloc_extent_buffer(tree
, start
, len
, page
);
564 found_start
= btrfs_header_bytenr(eb
);
565 if (found_start
!= start
) {
566 printk_ratelimited(KERN_INFO
"btrfs bad tree block start "
568 (unsigned long long)found_start
,
569 (unsigned long long)eb
->start
);
573 if (eb
->first_page
!= page
) {
574 printk(KERN_INFO
"btrfs bad first page %lu %lu\n",
575 eb
->first_page
->index
, page
->index
);
580 if (check_tree_block_fsid(root
, eb
)) {
581 printk_ratelimited(KERN_INFO
"btrfs bad fsid on block %llu\n",
582 (unsigned long long)eb
->start
);
586 found_level
= btrfs_header_level(eb
);
588 btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb
),
591 ret
= csum_tree_block(root
, eb
, 1);
598 * If this is a leaf block and it is corrupt, set the corrupt bit so
599 * that we don't try and read the other copies of this block, just
602 if (found_level
== 0 && check_leaf(root
, eb
)) {
603 set_bit(EXTENT_BUFFER_CORRUPT
, &eb
->bflags
);
607 end
= min_t(u64
, eb
->len
, PAGE_CACHE_SIZE
);
608 end
= eb
->start
+ end
- 1;
610 free_extent_buffer(eb
);
615 static void end_workqueue_bio(struct bio
*bio
, int err
)
617 struct end_io_wq
*end_io_wq
= bio
->bi_private
;
618 struct btrfs_fs_info
*fs_info
;
620 fs_info
= end_io_wq
->info
;
621 end_io_wq
->error
= err
;
622 end_io_wq
->work
.func
= end_workqueue_fn
;
623 end_io_wq
->work
.flags
= 0;
625 if (bio
->bi_rw
& REQ_WRITE
) {
626 if (end_io_wq
->metadata
== 1)
627 btrfs_queue_worker(&fs_info
->endio_meta_write_workers
,
629 else if (end_io_wq
->metadata
== 2)
630 btrfs_queue_worker(&fs_info
->endio_freespace_worker
,
633 btrfs_queue_worker(&fs_info
->endio_write_workers
,
636 if (end_io_wq
->metadata
)
637 btrfs_queue_worker(&fs_info
->endio_meta_workers
,
640 btrfs_queue_worker(&fs_info
->endio_workers
,
646 * For the metadata arg you want
649 * 1 - if normal metadta
650 * 2 - if writing to the free space cache area
652 int btrfs_bio_wq_end_io(struct btrfs_fs_info
*info
, struct bio
*bio
,
655 struct end_io_wq
*end_io_wq
;
656 end_io_wq
= kmalloc(sizeof(*end_io_wq
), GFP_NOFS
);
660 end_io_wq
->private = bio
->bi_private
;
661 end_io_wq
->end_io
= bio
->bi_end_io
;
662 end_io_wq
->info
= info
;
663 end_io_wq
->error
= 0;
664 end_io_wq
->bio
= bio
;
665 end_io_wq
->metadata
= metadata
;
667 bio
->bi_private
= end_io_wq
;
668 bio
->bi_end_io
= end_workqueue_bio
;
672 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info
*info
)
674 unsigned long limit
= min_t(unsigned long,
675 info
->workers
.max_workers
,
676 info
->fs_devices
->open_devices
);
680 static void run_one_async_start(struct btrfs_work
*work
)
682 struct async_submit_bio
*async
;
684 async
= container_of(work
, struct async_submit_bio
, work
);
685 async
->submit_bio_start(async
->inode
, async
->rw
, async
->bio
,
686 async
->mirror_num
, async
->bio_flags
,
690 static void run_one_async_done(struct btrfs_work
*work
)
692 struct btrfs_fs_info
*fs_info
;
693 struct async_submit_bio
*async
;
696 async
= container_of(work
, struct async_submit_bio
, work
);
697 fs_info
= BTRFS_I(async
->inode
)->root
->fs_info
;
699 limit
= btrfs_async_submit_limit(fs_info
);
700 limit
= limit
* 2 / 3;
702 atomic_dec(&fs_info
->nr_async_submits
);
704 if (atomic_read(&fs_info
->nr_async_submits
) < limit
&&
705 waitqueue_active(&fs_info
->async_submit_wait
))
706 wake_up(&fs_info
->async_submit_wait
);
708 async
->submit_bio_done(async
->inode
, async
->rw
, async
->bio
,
709 async
->mirror_num
, async
->bio_flags
,
713 static void run_one_async_free(struct btrfs_work
*work
)
715 struct async_submit_bio
*async
;
717 async
= container_of(work
, struct async_submit_bio
, work
);
721 int btrfs_wq_submit_bio(struct btrfs_fs_info
*fs_info
, struct inode
*inode
,
722 int rw
, struct bio
*bio
, int mirror_num
,
723 unsigned long bio_flags
,
725 extent_submit_bio_hook_t
*submit_bio_start
,
726 extent_submit_bio_hook_t
*submit_bio_done
)
728 struct async_submit_bio
*async
;
730 async
= kmalloc(sizeof(*async
), GFP_NOFS
);
734 async
->inode
= inode
;
737 async
->mirror_num
= mirror_num
;
738 async
->submit_bio_start
= submit_bio_start
;
739 async
->submit_bio_done
= submit_bio_done
;
741 async
->work
.func
= run_one_async_start
;
742 async
->work
.ordered_func
= run_one_async_done
;
743 async
->work
.ordered_free
= run_one_async_free
;
745 async
->work
.flags
= 0;
746 async
->bio_flags
= bio_flags
;
747 async
->bio_offset
= bio_offset
;
749 atomic_inc(&fs_info
->nr_async_submits
);
752 btrfs_set_work_high_prio(&async
->work
);
754 btrfs_queue_worker(&fs_info
->workers
, &async
->work
);
756 while (atomic_read(&fs_info
->async_submit_draining
) &&
757 atomic_read(&fs_info
->nr_async_submits
)) {
758 wait_event(fs_info
->async_submit_wait
,
759 (atomic_read(&fs_info
->nr_async_submits
) == 0));
765 static int btree_csum_one_bio(struct bio
*bio
)
767 struct bio_vec
*bvec
= bio
->bi_io_vec
;
769 struct btrfs_root
*root
;
771 WARN_ON(bio
->bi_vcnt
<= 0);
772 while (bio_index
< bio
->bi_vcnt
) {
773 root
= BTRFS_I(bvec
->bv_page
->mapping
->host
)->root
;
774 csum_dirty_buffer(root
, bvec
->bv_page
);
781 static int __btree_submit_bio_start(struct inode
*inode
, int rw
,
782 struct bio
*bio
, int mirror_num
,
783 unsigned long bio_flags
,
787 * when we're called for a write, we're already in the async
788 * submission context. Just jump into btrfs_map_bio
790 btree_csum_one_bio(bio
);
794 static int __btree_submit_bio_done(struct inode
*inode
, int rw
, struct bio
*bio
,
795 int mirror_num
, unsigned long bio_flags
,
799 * when we're called for a write, we're already in the async
800 * submission context. Just jump into btrfs_map_bio
802 return btrfs_map_bio(BTRFS_I(inode
)->root
, rw
, bio
, mirror_num
, 1);
805 static int btree_submit_bio_hook(struct inode
*inode
, int rw
, struct bio
*bio
,
806 int mirror_num
, unsigned long bio_flags
,
811 ret
= btrfs_bio_wq_end_io(BTRFS_I(inode
)->root
->fs_info
,
815 if (!(rw
& REQ_WRITE
)) {
817 * called for a read, do the setup so that checksum validation
818 * can happen in the async kernel threads
820 return btrfs_map_bio(BTRFS_I(inode
)->root
, rw
, bio
,
825 * kthread helpers are used to submit writes so that checksumming
826 * can happen in parallel across all CPUs
828 return btrfs_wq_submit_bio(BTRFS_I(inode
)->root
->fs_info
,
829 inode
, rw
, bio
, mirror_num
, 0,
831 __btree_submit_bio_start
,
832 __btree_submit_bio_done
);
835 #ifdef CONFIG_MIGRATION
836 static int btree_migratepage(struct address_space
*mapping
,
837 struct page
*newpage
, struct page
*page
)
840 * we can't safely write a btree page from here,
841 * we haven't done the locking hook
846 * Buffers may be managed in a filesystem specific way.
847 * We must have no buffers or drop them.
849 if (page_has_private(page
) &&
850 !try_to_release_page(page
, GFP_KERNEL
))
852 return migrate_page(mapping
, newpage
, page
);
856 static int btree_writepage(struct page
*page
, struct writeback_control
*wbc
)
858 struct extent_io_tree
*tree
;
859 struct btrfs_root
*root
= BTRFS_I(page
->mapping
->host
)->root
;
860 struct extent_buffer
*eb
;
863 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
864 if (!(current
->flags
& PF_MEMALLOC
)) {
865 return extent_write_full_page(tree
, page
,
866 btree_get_extent
, wbc
);
869 redirty_page_for_writepage(wbc
, page
);
870 eb
= btrfs_find_tree_block(root
, page_offset(page
), PAGE_CACHE_SIZE
);
873 was_dirty
= test_and_set_bit(EXTENT_BUFFER_DIRTY
, &eb
->bflags
);
875 spin_lock(&root
->fs_info
->delalloc_lock
);
876 root
->fs_info
->dirty_metadata_bytes
+= PAGE_CACHE_SIZE
;
877 spin_unlock(&root
->fs_info
->delalloc_lock
);
879 free_extent_buffer(eb
);
885 static int btree_writepages(struct address_space
*mapping
,
886 struct writeback_control
*wbc
)
888 struct extent_io_tree
*tree
;
889 tree
= &BTRFS_I(mapping
->host
)->io_tree
;
890 if (wbc
->sync_mode
== WB_SYNC_NONE
) {
891 struct btrfs_root
*root
= BTRFS_I(mapping
->host
)->root
;
893 unsigned long thresh
= 32 * 1024 * 1024;
895 if (wbc
->for_kupdate
)
898 /* this is a bit racy, but that's ok */
899 num_dirty
= root
->fs_info
->dirty_metadata_bytes
;
900 if (num_dirty
< thresh
)
903 return extent_writepages(tree
, mapping
, btree_get_extent
, wbc
);
906 static int btree_readpage(struct file
*file
, struct page
*page
)
908 struct extent_io_tree
*tree
;
909 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
910 return extent_read_full_page(tree
, page
, btree_get_extent
);
913 static int btree_releasepage(struct page
*page
, gfp_t gfp_flags
)
915 struct extent_io_tree
*tree
;
916 struct extent_map_tree
*map
;
919 if (PageWriteback(page
) || PageDirty(page
))
922 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
923 map
= &BTRFS_I(page
->mapping
->host
)->extent_tree
;
925 ret
= try_release_extent_state(map
, tree
, page
, gfp_flags
);
929 ret
= try_release_extent_buffer(tree
, page
);
931 ClearPagePrivate(page
);
932 set_page_private(page
, 0);
933 page_cache_release(page
);
939 static void btree_invalidatepage(struct page
*page
, unsigned long offset
)
941 struct extent_io_tree
*tree
;
942 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
943 extent_invalidatepage(tree
, page
, offset
);
944 btree_releasepage(page
, GFP_NOFS
);
945 if (PagePrivate(page
)) {
946 printk(KERN_WARNING
"btrfs warning page private not zero "
947 "on page %llu\n", (unsigned long long)page_offset(page
));
948 ClearPagePrivate(page
);
949 set_page_private(page
, 0);
950 page_cache_release(page
);
954 static const struct address_space_operations btree_aops
= {
955 .readpage
= btree_readpage
,
956 .writepage
= btree_writepage
,
957 .writepages
= btree_writepages
,
958 .releasepage
= btree_releasepage
,
959 .invalidatepage
= btree_invalidatepage
,
960 #ifdef CONFIG_MIGRATION
961 .migratepage
= btree_migratepage
,
965 int readahead_tree_block(struct btrfs_root
*root
, u64 bytenr
, u32 blocksize
,
968 struct extent_buffer
*buf
= NULL
;
969 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
972 buf
= btrfs_find_create_tree_block(root
, bytenr
, blocksize
);
975 read_extent_buffer_pages(&BTRFS_I(btree_inode
)->io_tree
,
976 buf
, 0, 0, btree_get_extent
, 0);
977 free_extent_buffer(buf
);
981 struct extent_buffer
*btrfs_find_tree_block(struct btrfs_root
*root
,
982 u64 bytenr
, u32 blocksize
)
984 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
985 struct extent_buffer
*eb
;
986 eb
= find_extent_buffer(&BTRFS_I(btree_inode
)->io_tree
,
991 struct extent_buffer
*btrfs_find_create_tree_block(struct btrfs_root
*root
,
992 u64 bytenr
, u32 blocksize
)
994 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
995 struct extent_buffer
*eb
;
997 eb
= alloc_extent_buffer(&BTRFS_I(btree_inode
)->io_tree
,
998 bytenr
, blocksize
, NULL
);
1003 int btrfs_write_tree_block(struct extent_buffer
*buf
)
1005 return filemap_fdatawrite_range(buf
->first_page
->mapping
, buf
->start
,
1006 buf
->start
+ buf
->len
- 1);
1009 int btrfs_wait_tree_block_writeback(struct extent_buffer
*buf
)
1011 return filemap_fdatawait_range(buf
->first_page
->mapping
,
1012 buf
->start
, buf
->start
+ buf
->len
- 1);
1015 struct extent_buffer
*read_tree_block(struct btrfs_root
*root
, u64 bytenr
,
1016 u32 blocksize
, u64 parent_transid
)
1018 struct extent_buffer
*buf
= NULL
;
1021 buf
= btrfs_find_create_tree_block(root
, bytenr
, blocksize
);
1025 ret
= btree_read_extent_buffer_pages(root
, buf
, 0, parent_transid
);
1028 set_bit(EXTENT_BUFFER_UPTODATE
, &buf
->bflags
);
1033 int clean_tree_block(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
1034 struct extent_buffer
*buf
)
1036 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
1037 if (btrfs_header_generation(buf
) ==
1038 root
->fs_info
->running_transaction
->transid
) {
1039 btrfs_assert_tree_locked(buf
);
1041 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY
, &buf
->bflags
)) {
1042 spin_lock(&root
->fs_info
->delalloc_lock
);
1043 if (root
->fs_info
->dirty_metadata_bytes
>= buf
->len
)
1044 root
->fs_info
->dirty_metadata_bytes
-= buf
->len
;
1047 spin_unlock(&root
->fs_info
->delalloc_lock
);
1050 /* ugh, clear_extent_buffer_dirty needs to lock the page */
1051 btrfs_set_lock_blocking(buf
);
1052 clear_extent_buffer_dirty(&BTRFS_I(btree_inode
)->io_tree
,
1058 static int __setup_root(u32 nodesize
, u32 leafsize
, u32 sectorsize
,
1059 u32 stripesize
, struct btrfs_root
*root
,
1060 struct btrfs_fs_info
*fs_info
,
1064 root
->commit_root
= NULL
;
1065 root
->sectorsize
= sectorsize
;
1066 root
->nodesize
= nodesize
;
1067 root
->leafsize
= leafsize
;
1068 root
->stripesize
= stripesize
;
1070 root
->track_dirty
= 0;
1072 root
->orphan_item_inserted
= 0;
1073 root
->orphan_cleanup_state
= 0;
1075 root
->fs_info
= fs_info
;
1076 root
->objectid
= objectid
;
1077 root
->last_trans
= 0;
1078 root
->highest_objectid
= 0;
1080 root
->inode_tree
= RB_ROOT
;
1081 INIT_RADIX_TREE(&root
->delayed_nodes_tree
, GFP_ATOMIC
);
1082 root
->block_rsv
= NULL
;
1083 root
->orphan_block_rsv
= NULL
;
1085 INIT_LIST_HEAD(&root
->dirty_list
);
1086 INIT_LIST_HEAD(&root
->orphan_list
);
1087 INIT_LIST_HEAD(&root
->root_list
);
1088 spin_lock_init(&root
->orphan_lock
);
1089 spin_lock_init(&root
->inode_lock
);
1090 spin_lock_init(&root
->accounting_lock
);
1091 mutex_init(&root
->objectid_mutex
);
1092 mutex_init(&root
->log_mutex
);
1093 init_waitqueue_head(&root
->log_writer_wait
);
1094 init_waitqueue_head(&root
->log_commit_wait
[0]);
1095 init_waitqueue_head(&root
->log_commit_wait
[1]);
1096 atomic_set(&root
->log_commit
[0], 0);
1097 atomic_set(&root
->log_commit
[1], 0);
1098 atomic_set(&root
->log_writers
, 0);
1099 root
->log_batch
= 0;
1100 root
->log_transid
= 0;
1101 root
->last_log_commit
= 0;
1102 extent_io_tree_init(&root
->dirty_log_pages
,
1103 fs_info
->btree_inode
->i_mapping
);
1105 memset(&root
->root_key
, 0, sizeof(root
->root_key
));
1106 memset(&root
->root_item
, 0, sizeof(root
->root_item
));
1107 memset(&root
->defrag_progress
, 0, sizeof(root
->defrag_progress
));
1108 memset(&root
->root_kobj
, 0, sizeof(root
->root_kobj
));
1109 root
->defrag_trans_start
= fs_info
->generation
;
1110 init_completion(&root
->kobj_unregister
);
1111 root
->defrag_running
= 0;
1112 root
->root_key
.objectid
= objectid
;
1117 static int find_and_setup_root(struct btrfs_root
*tree_root
,
1118 struct btrfs_fs_info
*fs_info
,
1120 struct btrfs_root
*root
)
1126 __setup_root(tree_root
->nodesize
, tree_root
->leafsize
,
1127 tree_root
->sectorsize
, tree_root
->stripesize
,
1128 root
, fs_info
, objectid
);
1129 ret
= btrfs_find_last_root(tree_root
, objectid
,
1130 &root
->root_item
, &root
->root_key
);
1135 generation
= btrfs_root_generation(&root
->root_item
);
1136 blocksize
= btrfs_level_size(root
, btrfs_root_level(&root
->root_item
));
1137 root
->commit_root
= NULL
;
1138 root
->node
= read_tree_block(root
, btrfs_root_bytenr(&root
->root_item
),
1139 blocksize
, generation
);
1140 if (!root
->node
|| !btrfs_buffer_uptodate(root
->node
, generation
)) {
1141 free_extent_buffer(root
->node
);
1145 root
->commit_root
= btrfs_root_node(root
);
1149 static struct btrfs_root
*alloc_log_tree(struct btrfs_trans_handle
*trans
,
1150 struct btrfs_fs_info
*fs_info
)
1152 struct btrfs_root
*root
;
1153 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
1154 struct extent_buffer
*leaf
;
1156 root
= kzalloc(sizeof(*root
), GFP_NOFS
);
1158 return ERR_PTR(-ENOMEM
);
1160 __setup_root(tree_root
->nodesize
, tree_root
->leafsize
,
1161 tree_root
->sectorsize
, tree_root
->stripesize
,
1162 root
, fs_info
, BTRFS_TREE_LOG_OBJECTID
);
1164 root
->root_key
.objectid
= BTRFS_TREE_LOG_OBJECTID
;
1165 root
->root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
1166 root
->root_key
.offset
= BTRFS_TREE_LOG_OBJECTID
;
1168 * log trees do not get reference counted because they go away
1169 * before a real commit is actually done. They do store pointers
1170 * to file data extents, and those reference counts still get
1171 * updated (along with back refs to the log tree).
1175 leaf
= btrfs_alloc_free_block(trans
, root
, root
->leafsize
, 0,
1176 BTRFS_TREE_LOG_OBJECTID
, NULL
, 0, 0, 0);
1179 return ERR_CAST(leaf
);
1182 memset_extent_buffer(leaf
, 0, 0, sizeof(struct btrfs_header
));
1183 btrfs_set_header_bytenr(leaf
, leaf
->start
);
1184 btrfs_set_header_generation(leaf
, trans
->transid
);
1185 btrfs_set_header_backref_rev(leaf
, BTRFS_MIXED_BACKREF_REV
);
1186 btrfs_set_header_owner(leaf
, BTRFS_TREE_LOG_OBJECTID
);
1189 write_extent_buffer(root
->node
, root
->fs_info
->fsid
,
1190 (unsigned long)btrfs_header_fsid(root
->node
),
1192 btrfs_mark_buffer_dirty(root
->node
);
1193 btrfs_tree_unlock(root
->node
);
1197 int btrfs_init_log_root_tree(struct btrfs_trans_handle
*trans
,
1198 struct btrfs_fs_info
*fs_info
)
1200 struct btrfs_root
*log_root
;
1202 log_root
= alloc_log_tree(trans
, fs_info
);
1203 if (IS_ERR(log_root
))
1204 return PTR_ERR(log_root
);
1205 WARN_ON(fs_info
->log_root_tree
);
1206 fs_info
->log_root_tree
= log_root
;
1210 int btrfs_add_log_tree(struct btrfs_trans_handle
*trans
,
1211 struct btrfs_root
*root
)
1213 struct btrfs_root
*log_root
;
1214 struct btrfs_inode_item
*inode_item
;
1216 log_root
= alloc_log_tree(trans
, root
->fs_info
);
1217 if (IS_ERR(log_root
))
1218 return PTR_ERR(log_root
);
1220 log_root
->last_trans
= trans
->transid
;
1221 log_root
->root_key
.offset
= root
->root_key
.objectid
;
1223 inode_item
= &log_root
->root_item
.inode
;
1224 inode_item
->generation
= cpu_to_le64(1);
1225 inode_item
->size
= cpu_to_le64(3);
1226 inode_item
->nlink
= cpu_to_le32(1);
1227 inode_item
->nbytes
= cpu_to_le64(root
->leafsize
);
1228 inode_item
->mode
= cpu_to_le32(S_IFDIR
| 0755);
1230 btrfs_set_root_node(&log_root
->root_item
, log_root
->node
);
1232 WARN_ON(root
->log_root
);
1233 root
->log_root
= log_root
;
1234 root
->log_transid
= 0;
1235 root
->last_log_commit
= 0;
1239 struct btrfs_root
*btrfs_read_fs_root_no_radix(struct btrfs_root
*tree_root
,
1240 struct btrfs_key
*location
)
1242 struct btrfs_root
*root
;
1243 struct btrfs_fs_info
*fs_info
= tree_root
->fs_info
;
1244 struct btrfs_path
*path
;
1245 struct extent_buffer
*l
;
1250 root
= kzalloc(sizeof(*root
), GFP_NOFS
);
1252 return ERR_PTR(-ENOMEM
);
1253 if (location
->offset
== (u64
)-1) {
1254 ret
= find_and_setup_root(tree_root
, fs_info
,
1255 location
->objectid
, root
);
1258 return ERR_PTR(ret
);
1263 __setup_root(tree_root
->nodesize
, tree_root
->leafsize
,
1264 tree_root
->sectorsize
, tree_root
->stripesize
,
1265 root
, fs_info
, location
->objectid
);
1267 path
= btrfs_alloc_path();
1270 return ERR_PTR(-ENOMEM
);
1272 ret
= btrfs_search_slot(NULL
, tree_root
, location
, path
, 0, 0);
1275 read_extent_buffer(l
, &root
->root_item
,
1276 btrfs_item_ptr_offset(l
, path
->slots
[0]),
1277 sizeof(root
->root_item
));
1278 memcpy(&root
->root_key
, location
, sizeof(*location
));
1280 btrfs_free_path(path
);
1285 return ERR_PTR(ret
);
1288 generation
= btrfs_root_generation(&root
->root_item
);
1289 blocksize
= btrfs_level_size(root
, btrfs_root_level(&root
->root_item
));
1290 root
->node
= read_tree_block(root
, btrfs_root_bytenr(&root
->root_item
),
1291 blocksize
, generation
);
1292 root
->commit_root
= btrfs_root_node(root
);
1293 BUG_ON(!root
->node
);
1295 if (location
->objectid
!= BTRFS_TREE_LOG_OBJECTID
) {
1297 btrfs_check_and_init_root_item(&root
->root_item
);
1303 struct btrfs_root
*btrfs_read_fs_root_no_name(struct btrfs_fs_info
*fs_info
,
1304 struct btrfs_key
*location
)
1306 struct btrfs_root
*root
;
1309 if (location
->objectid
== BTRFS_ROOT_TREE_OBJECTID
)
1310 return fs_info
->tree_root
;
1311 if (location
->objectid
== BTRFS_EXTENT_TREE_OBJECTID
)
1312 return fs_info
->extent_root
;
1313 if (location
->objectid
== BTRFS_CHUNK_TREE_OBJECTID
)
1314 return fs_info
->chunk_root
;
1315 if (location
->objectid
== BTRFS_DEV_TREE_OBJECTID
)
1316 return fs_info
->dev_root
;
1317 if (location
->objectid
== BTRFS_CSUM_TREE_OBJECTID
)
1318 return fs_info
->csum_root
;
1320 spin_lock(&fs_info
->fs_roots_radix_lock
);
1321 root
= radix_tree_lookup(&fs_info
->fs_roots_radix
,
1322 (unsigned long)location
->objectid
);
1323 spin_unlock(&fs_info
->fs_roots_radix_lock
);
1327 root
= btrfs_read_fs_root_no_radix(fs_info
->tree_root
, location
);
1331 root
->free_ino_ctl
= kzalloc(sizeof(*root
->free_ino_ctl
), GFP_NOFS
);
1332 root
->free_ino_pinned
= kzalloc(sizeof(*root
->free_ino_pinned
),
1334 if (!root
->free_ino_pinned
|| !root
->free_ino_ctl
) {
1339 btrfs_init_free_ino_ctl(root
);
1340 mutex_init(&root
->fs_commit_mutex
);
1341 spin_lock_init(&root
->cache_lock
);
1342 init_waitqueue_head(&root
->cache_wait
);
1344 ret
= get_anon_bdev(&root
->anon_dev
);
1348 if (btrfs_root_refs(&root
->root_item
) == 0) {
1353 ret
= btrfs_find_orphan_item(fs_info
->tree_root
, location
->objectid
);
1357 root
->orphan_item_inserted
= 1;
1359 ret
= radix_tree_preload(GFP_NOFS
& ~__GFP_HIGHMEM
);
1363 spin_lock(&fs_info
->fs_roots_radix_lock
);
1364 ret
= radix_tree_insert(&fs_info
->fs_roots_radix
,
1365 (unsigned long)root
->root_key
.objectid
,
1370 spin_unlock(&fs_info
->fs_roots_radix_lock
);
1371 radix_tree_preload_end();
1373 if (ret
== -EEXIST
) {
1380 ret
= btrfs_find_dead_roots(fs_info
->tree_root
,
1381 root
->root_key
.objectid
);
1386 return ERR_PTR(ret
);
1389 static int btrfs_congested_fn(void *congested_data
, int bdi_bits
)
1391 struct btrfs_fs_info
*info
= (struct btrfs_fs_info
*)congested_data
;
1393 struct btrfs_device
*device
;
1394 struct backing_dev_info
*bdi
;
1397 list_for_each_entry_rcu(device
, &info
->fs_devices
->devices
, dev_list
) {
1400 bdi
= blk_get_backing_dev_info(device
->bdev
);
1401 if (bdi
&& bdi_congested(bdi
, bdi_bits
)) {
1411 * If this fails, caller must call bdi_destroy() to get rid of the
1414 static int setup_bdi(struct btrfs_fs_info
*info
, struct backing_dev_info
*bdi
)
1418 bdi
->capabilities
= BDI_CAP_MAP_COPY
;
1419 err
= bdi_setup_and_register(bdi
, "btrfs", BDI_CAP_MAP_COPY
);
1423 bdi
->ra_pages
= default_backing_dev_info
.ra_pages
;
1424 bdi
->congested_fn
= btrfs_congested_fn
;
1425 bdi
->congested_data
= info
;
1429 static int bio_ready_for_csum(struct bio
*bio
)
1435 struct extent_io_tree
*io_tree
= NULL
;
1436 struct bio_vec
*bvec
;
1440 bio_for_each_segment(bvec
, bio
, i
) {
1441 page
= bvec
->bv_page
;
1442 if (page
->private == EXTENT_PAGE_PRIVATE
) {
1443 length
+= bvec
->bv_len
;
1446 if (!page
->private) {
1447 length
+= bvec
->bv_len
;
1450 length
= bvec
->bv_len
;
1451 buf_len
= page
->private >> 2;
1452 start
= page_offset(page
) + bvec
->bv_offset
;
1453 io_tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
1455 /* are we fully contained in this bio? */
1456 if (buf_len
<= length
)
1459 ret
= extent_range_uptodate(io_tree
, start
+ length
,
1460 start
+ buf_len
- 1);
1465 * called by the kthread helper functions to finally call the bio end_io
1466 * functions. This is where read checksum verification actually happens
1468 static void end_workqueue_fn(struct btrfs_work
*work
)
1471 struct end_io_wq
*end_io_wq
;
1472 struct btrfs_fs_info
*fs_info
;
1475 end_io_wq
= container_of(work
, struct end_io_wq
, work
);
1476 bio
= end_io_wq
->bio
;
1477 fs_info
= end_io_wq
->info
;
1479 /* metadata bio reads are special because the whole tree block must
1480 * be checksummed at once. This makes sure the entire block is in
1481 * ram and up to date before trying to verify things. For
1482 * blocksize <= pagesize, it is basically a noop
1484 if (!(bio
->bi_rw
& REQ_WRITE
) && end_io_wq
->metadata
&&
1485 !bio_ready_for_csum(bio
)) {
1486 btrfs_queue_worker(&fs_info
->endio_meta_workers
,
1490 error
= end_io_wq
->error
;
1491 bio
->bi_private
= end_io_wq
->private;
1492 bio
->bi_end_io
= end_io_wq
->end_io
;
1494 bio_endio(bio
, error
);
1497 static int cleaner_kthread(void *arg
)
1499 struct btrfs_root
*root
= arg
;
1502 vfs_check_frozen(root
->fs_info
->sb
, SB_FREEZE_WRITE
);
1504 if (!(root
->fs_info
->sb
->s_flags
& MS_RDONLY
) &&
1505 mutex_trylock(&root
->fs_info
->cleaner_mutex
)) {
1506 btrfs_run_delayed_iputs(root
);
1507 btrfs_clean_old_snapshots(root
);
1508 mutex_unlock(&root
->fs_info
->cleaner_mutex
);
1509 btrfs_run_defrag_inodes(root
->fs_info
);
1512 if (freezing(current
)) {
1515 set_current_state(TASK_INTERRUPTIBLE
);
1516 if (!kthread_should_stop())
1518 __set_current_state(TASK_RUNNING
);
1520 } while (!kthread_should_stop());
1524 static int transaction_kthread(void *arg
)
1526 struct btrfs_root
*root
= arg
;
1527 struct btrfs_trans_handle
*trans
;
1528 struct btrfs_transaction
*cur
;
1531 unsigned long delay
;
1536 vfs_check_frozen(root
->fs_info
->sb
, SB_FREEZE_WRITE
);
1537 mutex_lock(&root
->fs_info
->transaction_kthread_mutex
);
1539 spin_lock(&root
->fs_info
->trans_lock
);
1540 cur
= root
->fs_info
->running_transaction
;
1542 spin_unlock(&root
->fs_info
->trans_lock
);
1546 now
= get_seconds();
1547 if (!cur
->blocked
&&
1548 (now
< cur
->start_time
|| now
- cur
->start_time
< 30)) {
1549 spin_unlock(&root
->fs_info
->trans_lock
);
1553 transid
= cur
->transid
;
1554 spin_unlock(&root
->fs_info
->trans_lock
);
1556 trans
= btrfs_join_transaction(root
);
1557 BUG_ON(IS_ERR(trans
));
1558 if (transid
== trans
->transid
) {
1559 ret
= btrfs_commit_transaction(trans
, root
);
1562 btrfs_end_transaction(trans
, root
);
1565 wake_up_process(root
->fs_info
->cleaner_kthread
);
1566 mutex_unlock(&root
->fs_info
->transaction_kthread_mutex
);
1568 if (freezing(current
)) {
1571 set_current_state(TASK_INTERRUPTIBLE
);
1572 if (!kthread_should_stop() &&
1573 !btrfs_transaction_blocked(root
->fs_info
))
1574 schedule_timeout(delay
);
1575 __set_current_state(TASK_RUNNING
);
1577 } while (!kthread_should_stop());
1582 * this will find the highest generation in the array of
1583 * root backups. The index of the highest array is returned,
1584 * or -1 if we can't find anything.
1586 * We check to make sure the array is valid by comparing the
1587 * generation of the latest root in the array with the generation
1588 * in the super block. If they don't match we pitch it.
1590 static int find_newest_super_backup(struct btrfs_fs_info
*info
, u64 newest_gen
)
1593 int newest_index
= -1;
1594 struct btrfs_root_backup
*root_backup
;
1597 for (i
= 0; i
< BTRFS_NUM_BACKUP_ROOTS
; i
++) {
1598 root_backup
= info
->super_copy
->super_roots
+ i
;
1599 cur
= btrfs_backup_tree_root_gen(root_backup
);
1600 if (cur
== newest_gen
)
1604 /* check to see if we actually wrapped around */
1605 if (newest_index
== BTRFS_NUM_BACKUP_ROOTS
- 1) {
1606 root_backup
= info
->super_copy
->super_roots
;
1607 cur
= btrfs_backup_tree_root_gen(root_backup
);
1608 if (cur
== newest_gen
)
1611 return newest_index
;
1616 * find the oldest backup so we know where to store new entries
1617 * in the backup array. This will set the backup_root_index
1618 * field in the fs_info struct
1620 static void find_oldest_super_backup(struct btrfs_fs_info
*info
,
1623 int newest_index
= -1;
1625 newest_index
= find_newest_super_backup(info
, newest_gen
);
1626 /* if there was garbage in there, just move along */
1627 if (newest_index
== -1) {
1628 info
->backup_root_index
= 0;
1630 info
->backup_root_index
= (newest_index
+ 1) % BTRFS_NUM_BACKUP_ROOTS
;
1635 * copy all the root pointers into the super backup array.
1636 * this will bump the backup pointer by one when it is
1639 static void backup_super_roots(struct btrfs_fs_info
*info
)
1642 struct btrfs_root_backup
*root_backup
;
1645 next_backup
= info
->backup_root_index
;
1646 last_backup
= (next_backup
+ BTRFS_NUM_BACKUP_ROOTS
- 1) %
1647 BTRFS_NUM_BACKUP_ROOTS
;
1650 * just overwrite the last backup if we're at the same generation
1651 * this happens only at umount
1653 root_backup
= info
->super_for_commit
->super_roots
+ last_backup
;
1654 if (btrfs_backup_tree_root_gen(root_backup
) ==
1655 btrfs_header_generation(info
->tree_root
->node
))
1656 next_backup
= last_backup
;
1658 root_backup
= info
->super_for_commit
->super_roots
+ next_backup
;
1661 * make sure all of our padding and empty slots get zero filled
1662 * regardless of which ones we use today
1664 memset(root_backup
, 0, sizeof(*root_backup
));
1666 info
->backup_root_index
= (next_backup
+ 1) % BTRFS_NUM_BACKUP_ROOTS
;
1668 btrfs_set_backup_tree_root(root_backup
, info
->tree_root
->node
->start
);
1669 btrfs_set_backup_tree_root_gen(root_backup
,
1670 btrfs_header_generation(info
->tree_root
->node
));
1672 btrfs_set_backup_tree_root_level(root_backup
,
1673 btrfs_header_level(info
->tree_root
->node
));
1675 btrfs_set_backup_chunk_root(root_backup
, info
->chunk_root
->node
->start
);
1676 btrfs_set_backup_chunk_root_gen(root_backup
,
1677 btrfs_header_generation(info
->chunk_root
->node
));
1678 btrfs_set_backup_chunk_root_level(root_backup
,
1679 btrfs_header_level(info
->chunk_root
->node
));
1681 btrfs_set_backup_extent_root(root_backup
, info
->extent_root
->node
->start
);
1682 btrfs_set_backup_extent_root_gen(root_backup
,
1683 btrfs_header_generation(info
->extent_root
->node
));
1684 btrfs_set_backup_extent_root_level(root_backup
,
1685 btrfs_header_level(info
->extent_root
->node
));
1687 btrfs_set_backup_fs_root(root_backup
, info
->fs_root
->node
->start
);
1688 btrfs_set_backup_fs_root_gen(root_backup
,
1689 btrfs_header_generation(info
->fs_root
->node
));
1690 btrfs_set_backup_fs_root_level(root_backup
,
1691 btrfs_header_level(info
->fs_root
->node
));
1693 btrfs_set_backup_dev_root(root_backup
, info
->dev_root
->node
->start
);
1694 btrfs_set_backup_dev_root_gen(root_backup
,
1695 btrfs_header_generation(info
->dev_root
->node
));
1696 btrfs_set_backup_dev_root_level(root_backup
,
1697 btrfs_header_level(info
->dev_root
->node
));
1699 btrfs_set_backup_csum_root(root_backup
, info
->csum_root
->node
->start
);
1700 btrfs_set_backup_csum_root_gen(root_backup
,
1701 btrfs_header_generation(info
->csum_root
->node
));
1702 btrfs_set_backup_csum_root_level(root_backup
,
1703 btrfs_header_level(info
->csum_root
->node
));
1705 btrfs_set_backup_total_bytes(root_backup
,
1706 btrfs_super_total_bytes(info
->super_copy
));
1707 btrfs_set_backup_bytes_used(root_backup
,
1708 btrfs_super_bytes_used(info
->super_copy
));
1709 btrfs_set_backup_num_devices(root_backup
,
1710 btrfs_super_num_devices(info
->super_copy
));
1713 * if we don't copy this out to the super_copy, it won't get remembered
1714 * for the next commit
1716 memcpy(&info
->super_copy
->super_roots
,
1717 &info
->super_for_commit
->super_roots
,
1718 sizeof(*root_backup
) * BTRFS_NUM_BACKUP_ROOTS
);
1722 * this copies info out of the root backup array and back into
1723 * the in-memory super block. It is meant to help iterate through
1724 * the array, so you send it the number of backups you've already
1725 * tried and the last backup index you used.
1727 * this returns -1 when it has tried all the backups
1729 static noinline
int next_root_backup(struct btrfs_fs_info
*info
,
1730 struct btrfs_super_block
*super
,
1731 int *num_backups_tried
, int *backup_index
)
1733 struct btrfs_root_backup
*root_backup
;
1734 int newest
= *backup_index
;
1736 if (*num_backups_tried
== 0) {
1737 u64 gen
= btrfs_super_generation(super
);
1739 newest
= find_newest_super_backup(info
, gen
);
1743 *backup_index
= newest
;
1744 *num_backups_tried
= 1;
1745 } else if (*num_backups_tried
== BTRFS_NUM_BACKUP_ROOTS
) {
1746 /* we've tried all the backups, all done */
1749 /* jump to the next oldest backup */
1750 newest
= (*backup_index
+ BTRFS_NUM_BACKUP_ROOTS
- 1) %
1751 BTRFS_NUM_BACKUP_ROOTS
;
1752 *backup_index
= newest
;
1753 *num_backups_tried
+= 1;
1755 root_backup
= super
->super_roots
+ newest
;
1757 btrfs_set_super_generation(super
,
1758 btrfs_backup_tree_root_gen(root_backup
));
1759 btrfs_set_super_root(super
, btrfs_backup_tree_root(root_backup
));
1760 btrfs_set_super_root_level(super
,
1761 btrfs_backup_tree_root_level(root_backup
));
1762 btrfs_set_super_bytes_used(super
, btrfs_backup_bytes_used(root_backup
));
1765 * fixme: the total bytes and num_devices need to match or we should
1768 btrfs_set_super_total_bytes(super
, btrfs_backup_total_bytes(root_backup
));
1769 btrfs_set_super_num_devices(super
, btrfs_backup_num_devices(root_backup
));
1773 /* helper to cleanup tree roots */
1774 static void free_root_pointers(struct btrfs_fs_info
*info
, int chunk_root
)
1776 free_extent_buffer(info
->tree_root
->node
);
1777 free_extent_buffer(info
->tree_root
->commit_root
);
1778 free_extent_buffer(info
->dev_root
->node
);
1779 free_extent_buffer(info
->dev_root
->commit_root
);
1780 free_extent_buffer(info
->extent_root
->node
);
1781 free_extent_buffer(info
->extent_root
->commit_root
);
1782 free_extent_buffer(info
->csum_root
->node
);
1783 free_extent_buffer(info
->csum_root
->commit_root
);
1785 info
->tree_root
->node
= NULL
;
1786 info
->tree_root
->commit_root
= NULL
;
1787 info
->dev_root
->node
= NULL
;
1788 info
->dev_root
->commit_root
= NULL
;
1789 info
->extent_root
->node
= NULL
;
1790 info
->extent_root
->commit_root
= NULL
;
1791 info
->csum_root
->node
= NULL
;
1792 info
->csum_root
->commit_root
= NULL
;
1795 free_extent_buffer(info
->chunk_root
->node
);
1796 free_extent_buffer(info
->chunk_root
->commit_root
);
1797 info
->chunk_root
->node
= NULL
;
1798 info
->chunk_root
->commit_root
= NULL
;
1803 struct btrfs_root
*open_ctree(struct super_block
*sb
,
1804 struct btrfs_fs_devices
*fs_devices
,
1814 struct btrfs_key location
;
1815 struct buffer_head
*bh
;
1816 struct btrfs_root
*extent_root
= kzalloc(sizeof(struct btrfs_root
),
1818 struct btrfs_root
*csum_root
= kzalloc(sizeof(struct btrfs_root
),
1820 struct btrfs_root
*tree_root
= btrfs_sb(sb
);
1821 struct btrfs_fs_info
*fs_info
= NULL
;
1822 struct btrfs_root
*chunk_root
= kzalloc(sizeof(struct btrfs_root
),
1824 struct btrfs_root
*dev_root
= kzalloc(sizeof(struct btrfs_root
),
1826 struct btrfs_root
*log_tree_root
;
1830 int num_backups_tried
= 0;
1831 int backup_index
= 0;
1833 struct btrfs_super_block
*disk_super
;
1835 if (!extent_root
|| !tree_root
|| !tree_root
->fs_info
||
1836 !chunk_root
|| !dev_root
|| !csum_root
) {
1840 fs_info
= tree_root
->fs_info
;
1842 ret
= init_srcu_struct(&fs_info
->subvol_srcu
);
1848 ret
= setup_bdi(fs_info
, &fs_info
->bdi
);
1854 fs_info
->btree_inode
= new_inode(sb
);
1855 if (!fs_info
->btree_inode
) {
1860 mapping_set_gfp_mask(fs_info
->btree_inode
->i_mapping
, GFP_NOFS
);
1862 INIT_RADIX_TREE(&fs_info
->fs_roots_radix
, GFP_ATOMIC
);
1863 INIT_LIST_HEAD(&fs_info
->trans_list
);
1864 INIT_LIST_HEAD(&fs_info
->dead_roots
);
1865 INIT_LIST_HEAD(&fs_info
->delayed_iputs
);
1866 INIT_LIST_HEAD(&fs_info
->hashers
);
1867 INIT_LIST_HEAD(&fs_info
->delalloc_inodes
);
1868 INIT_LIST_HEAD(&fs_info
->ordered_operations
);
1869 INIT_LIST_HEAD(&fs_info
->caching_block_groups
);
1870 spin_lock_init(&fs_info
->delalloc_lock
);
1871 spin_lock_init(&fs_info
->trans_lock
);
1872 spin_lock_init(&fs_info
->ref_cache_lock
);
1873 spin_lock_init(&fs_info
->fs_roots_radix_lock
);
1874 spin_lock_init(&fs_info
->delayed_iput_lock
);
1875 spin_lock_init(&fs_info
->defrag_inodes_lock
);
1876 spin_lock_init(&fs_info
->free_chunk_lock
);
1877 mutex_init(&fs_info
->reloc_mutex
);
1879 init_completion(&fs_info
->kobj_unregister
);
1880 fs_info
->tree_root
= tree_root
;
1881 fs_info
->extent_root
= extent_root
;
1882 fs_info
->csum_root
= csum_root
;
1883 fs_info
->chunk_root
= chunk_root
;
1884 fs_info
->dev_root
= dev_root
;
1885 fs_info
->fs_devices
= fs_devices
;
1886 INIT_LIST_HEAD(&fs_info
->dirty_cowonly_roots
);
1887 INIT_LIST_HEAD(&fs_info
->space_info
);
1888 btrfs_mapping_init(&fs_info
->mapping_tree
);
1889 btrfs_init_block_rsv(&fs_info
->global_block_rsv
);
1890 btrfs_init_block_rsv(&fs_info
->delalloc_block_rsv
);
1891 btrfs_init_block_rsv(&fs_info
->trans_block_rsv
);
1892 btrfs_init_block_rsv(&fs_info
->chunk_block_rsv
);
1893 btrfs_init_block_rsv(&fs_info
->empty_block_rsv
);
1894 atomic_set(&fs_info
->nr_async_submits
, 0);
1895 atomic_set(&fs_info
->async_delalloc_pages
, 0);
1896 atomic_set(&fs_info
->async_submit_draining
, 0);
1897 atomic_set(&fs_info
->nr_async_bios
, 0);
1898 atomic_set(&fs_info
->defrag_running
, 0);
1900 fs_info
->max_inline
= 8192 * 1024;
1901 fs_info
->metadata_ratio
= 0;
1902 fs_info
->defrag_inodes
= RB_ROOT
;
1903 fs_info
->trans_no_join
= 0;
1904 fs_info
->free_chunk_space
= 0;
1906 fs_info
->thread_pool_size
= min_t(unsigned long,
1907 num_online_cpus() + 2, 8);
1909 INIT_LIST_HEAD(&fs_info
->ordered_extents
);
1910 spin_lock_init(&fs_info
->ordered_extent_lock
);
1911 fs_info
->delayed_root
= kmalloc(sizeof(struct btrfs_delayed_root
),
1913 if (!fs_info
->delayed_root
) {
1917 btrfs_init_delayed_root(fs_info
->delayed_root
);
1919 mutex_init(&fs_info
->scrub_lock
);
1920 atomic_set(&fs_info
->scrubs_running
, 0);
1921 atomic_set(&fs_info
->scrub_pause_req
, 0);
1922 atomic_set(&fs_info
->scrubs_paused
, 0);
1923 atomic_set(&fs_info
->scrub_cancel_req
, 0);
1924 init_waitqueue_head(&fs_info
->scrub_pause_wait
);
1925 init_rwsem(&fs_info
->scrub_super_lock
);
1926 fs_info
->scrub_workers_refcnt
= 0;
1928 sb
->s_blocksize
= 4096;
1929 sb
->s_blocksize_bits
= blksize_bits(4096);
1930 sb
->s_bdi
= &fs_info
->bdi
;
1932 fs_info
->btree_inode
->i_ino
= BTRFS_BTREE_INODE_OBJECTID
;
1933 fs_info
->btree_inode
->i_nlink
= 1;
1935 * we set the i_size on the btree inode to the max possible int.
1936 * the real end of the address space is determined by all of
1937 * the devices in the system
1939 fs_info
->btree_inode
->i_size
= OFFSET_MAX
;
1940 fs_info
->btree_inode
->i_mapping
->a_ops
= &btree_aops
;
1941 fs_info
->btree_inode
->i_mapping
->backing_dev_info
= &fs_info
->bdi
;
1943 RB_CLEAR_NODE(&BTRFS_I(fs_info
->btree_inode
)->rb_node
);
1944 extent_io_tree_init(&BTRFS_I(fs_info
->btree_inode
)->io_tree
,
1945 fs_info
->btree_inode
->i_mapping
);
1946 extent_map_tree_init(&BTRFS_I(fs_info
->btree_inode
)->extent_tree
);
1948 BTRFS_I(fs_info
->btree_inode
)->io_tree
.ops
= &btree_extent_io_ops
;
1950 BTRFS_I(fs_info
->btree_inode
)->root
= tree_root
;
1951 memset(&BTRFS_I(fs_info
->btree_inode
)->location
, 0,
1952 sizeof(struct btrfs_key
));
1953 BTRFS_I(fs_info
->btree_inode
)->dummy_inode
= 1;
1954 insert_inode_hash(fs_info
->btree_inode
);
1956 spin_lock_init(&fs_info
->block_group_cache_lock
);
1957 fs_info
->block_group_cache_tree
= RB_ROOT
;
1959 extent_io_tree_init(&fs_info
->freed_extents
[0],
1960 fs_info
->btree_inode
->i_mapping
);
1961 extent_io_tree_init(&fs_info
->freed_extents
[1],
1962 fs_info
->btree_inode
->i_mapping
);
1963 fs_info
->pinned_extents
= &fs_info
->freed_extents
[0];
1964 fs_info
->do_barriers
= 1;
1967 mutex_init(&fs_info
->ordered_operations_mutex
);
1968 mutex_init(&fs_info
->tree_log_mutex
);
1969 mutex_init(&fs_info
->chunk_mutex
);
1970 mutex_init(&fs_info
->transaction_kthread_mutex
);
1971 mutex_init(&fs_info
->cleaner_mutex
);
1972 mutex_init(&fs_info
->volume_mutex
);
1973 init_rwsem(&fs_info
->extent_commit_sem
);
1974 init_rwsem(&fs_info
->cleanup_work_sem
);
1975 init_rwsem(&fs_info
->subvol_sem
);
1977 btrfs_init_free_cluster(&fs_info
->meta_alloc_cluster
);
1978 btrfs_init_free_cluster(&fs_info
->data_alloc_cluster
);
1980 init_waitqueue_head(&fs_info
->transaction_throttle
);
1981 init_waitqueue_head(&fs_info
->transaction_wait
);
1982 init_waitqueue_head(&fs_info
->transaction_blocked_wait
);
1983 init_waitqueue_head(&fs_info
->async_submit_wait
);
1985 __setup_root(4096, 4096, 4096, 4096, tree_root
,
1986 fs_info
, BTRFS_ROOT_TREE_OBJECTID
);
1988 bh
= btrfs_read_dev_super(fs_devices
->latest_bdev
);
1994 memcpy(fs_info
->super_copy
, bh
->b_data
, sizeof(*fs_info
->super_copy
));
1995 memcpy(fs_info
->super_for_commit
, fs_info
->super_copy
,
1996 sizeof(*fs_info
->super_for_commit
));
1999 memcpy(fs_info
->fsid
, fs_info
->super_copy
->fsid
, BTRFS_FSID_SIZE
);
2001 disk_super
= fs_info
->super_copy
;
2002 if (!btrfs_super_root(disk_super
))
2005 /* check FS state, whether FS is broken. */
2006 fs_info
->fs_state
|= btrfs_super_flags(disk_super
);
2008 btrfs_check_super_valid(fs_info
, sb
->s_flags
& MS_RDONLY
);
2011 * run through our array of backup supers and setup
2012 * our ring pointer to the oldest one
2014 generation
= btrfs_super_generation(disk_super
);
2015 find_oldest_super_backup(fs_info
, generation
);
2018 * In the long term, we'll store the compression type in the super
2019 * block, and it'll be used for per file compression control.
2021 fs_info
->compress_type
= BTRFS_COMPRESS_ZLIB
;
2023 ret
= btrfs_parse_options(tree_root
, options
);
2029 features
= btrfs_super_incompat_flags(disk_super
) &
2030 ~BTRFS_FEATURE_INCOMPAT_SUPP
;
2032 printk(KERN_ERR
"BTRFS: couldn't mount because of "
2033 "unsupported optional features (%Lx).\n",
2034 (unsigned long long)features
);
2039 features
= btrfs_super_incompat_flags(disk_super
);
2040 features
|= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF
;
2041 if (tree_root
->fs_info
->compress_type
& BTRFS_COMPRESS_LZO
)
2042 features
|= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO
;
2043 btrfs_set_super_incompat_flags(disk_super
, features
);
2045 features
= btrfs_super_compat_ro_flags(disk_super
) &
2046 ~BTRFS_FEATURE_COMPAT_RO_SUPP
;
2047 if (!(sb
->s_flags
& MS_RDONLY
) && features
) {
2048 printk(KERN_ERR
"BTRFS: couldn't mount RDWR because of "
2049 "unsupported option features (%Lx).\n",
2050 (unsigned long long)features
);
2055 btrfs_init_workers(&fs_info
->generic_worker
,
2056 "genwork", 1, NULL
);
2058 btrfs_init_workers(&fs_info
->workers
, "worker",
2059 fs_info
->thread_pool_size
,
2060 &fs_info
->generic_worker
);
2062 btrfs_init_workers(&fs_info
->delalloc_workers
, "delalloc",
2063 fs_info
->thread_pool_size
,
2064 &fs_info
->generic_worker
);
2066 btrfs_init_workers(&fs_info
->submit_workers
, "submit",
2067 min_t(u64
, fs_devices
->num_devices
,
2068 fs_info
->thread_pool_size
),
2069 &fs_info
->generic_worker
);
2071 btrfs_init_workers(&fs_info
->caching_workers
, "cache",
2072 2, &fs_info
->generic_worker
);
2074 /* a higher idle thresh on the submit workers makes it much more
2075 * likely that bios will be send down in a sane order to the
2078 fs_info
->submit_workers
.idle_thresh
= 64;
2080 fs_info
->workers
.idle_thresh
= 16;
2081 fs_info
->workers
.ordered
= 1;
2083 fs_info
->delalloc_workers
.idle_thresh
= 2;
2084 fs_info
->delalloc_workers
.ordered
= 1;
2086 btrfs_init_workers(&fs_info
->fixup_workers
, "fixup", 1,
2087 &fs_info
->generic_worker
);
2088 btrfs_init_workers(&fs_info
->endio_workers
, "endio",
2089 fs_info
->thread_pool_size
,
2090 &fs_info
->generic_worker
);
2091 btrfs_init_workers(&fs_info
->endio_meta_workers
, "endio-meta",
2092 fs_info
->thread_pool_size
,
2093 &fs_info
->generic_worker
);
2094 btrfs_init_workers(&fs_info
->endio_meta_write_workers
,
2095 "endio-meta-write", fs_info
->thread_pool_size
,
2096 &fs_info
->generic_worker
);
2097 btrfs_init_workers(&fs_info
->endio_write_workers
, "endio-write",
2098 fs_info
->thread_pool_size
,
2099 &fs_info
->generic_worker
);
2100 btrfs_init_workers(&fs_info
->endio_freespace_worker
, "freespace-write",
2101 1, &fs_info
->generic_worker
);
2102 btrfs_init_workers(&fs_info
->delayed_workers
, "delayed-meta",
2103 fs_info
->thread_pool_size
,
2104 &fs_info
->generic_worker
);
2107 * endios are largely parallel and should have a very
2110 fs_info
->endio_workers
.idle_thresh
= 4;
2111 fs_info
->endio_meta_workers
.idle_thresh
= 4;
2113 fs_info
->endio_write_workers
.idle_thresh
= 2;
2114 fs_info
->endio_meta_write_workers
.idle_thresh
= 2;
2116 btrfs_start_workers(&fs_info
->workers
, 1);
2117 btrfs_start_workers(&fs_info
->generic_worker
, 1);
2118 btrfs_start_workers(&fs_info
->submit_workers
, 1);
2119 btrfs_start_workers(&fs_info
->delalloc_workers
, 1);
2120 btrfs_start_workers(&fs_info
->fixup_workers
, 1);
2121 btrfs_start_workers(&fs_info
->endio_workers
, 1);
2122 btrfs_start_workers(&fs_info
->endio_meta_workers
, 1);
2123 btrfs_start_workers(&fs_info
->endio_meta_write_workers
, 1);
2124 btrfs_start_workers(&fs_info
->endio_write_workers
, 1);
2125 btrfs_start_workers(&fs_info
->endio_freespace_worker
, 1);
2126 btrfs_start_workers(&fs_info
->delayed_workers
, 1);
2127 btrfs_start_workers(&fs_info
->caching_workers
, 1);
2129 fs_info
->bdi
.ra_pages
*= btrfs_super_num_devices(disk_super
);
2130 fs_info
->bdi
.ra_pages
= max(fs_info
->bdi
.ra_pages
,
2131 4 * 1024 * 1024 / PAGE_CACHE_SIZE
);
2133 nodesize
= btrfs_super_nodesize(disk_super
);
2134 leafsize
= btrfs_super_leafsize(disk_super
);
2135 sectorsize
= btrfs_super_sectorsize(disk_super
);
2136 stripesize
= btrfs_super_stripesize(disk_super
);
2137 tree_root
->nodesize
= nodesize
;
2138 tree_root
->leafsize
= leafsize
;
2139 tree_root
->sectorsize
= sectorsize
;
2140 tree_root
->stripesize
= stripesize
;
2142 sb
->s_blocksize
= sectorsize
;
2143 sb
->s_blocksize_bits
= blksize_bits(sectorsize
);
2145 if (strncmp((char *)(&disk_super
->magic
), BTRFS_MAGIC
,
2146 sizeof(disk_super
->magic
))) {
2147 printk(KERN_INFO
"btrfs: valid FS not found on %s\n", sb
->s_id
);
2148 goto fail_sb_buffer
;
2151 mutex_lock(&fs_info
->chunk_mutex
);
2152 ret
= btrfs_read_sys_array(tree_root
);
2153 mutex_unlock(&fs_info
->chunk_mutex
);
2155 printk(KERN_WARNING
"btrfs: failed to read the system "
2156 "array on %s\n", sb
->s_id
);
2157 goto fail_sb_buffer
;
2160 blocksize
= btrfs_level_size(tree_root
,
2161 btrfs_super_chunk_root_level(disk_super
));
2162 generation
= btrfs_super_chunk_root_generation(disk_super
);
2164 __setup_root(nodesize
, leafsize
, sectorsize
, stripesize
,
2165 chunk_root
, fs_info
, BTRFS_CHUNK_TREE_OBJECTID
);
2167 chunk_root
->node
= read_tree_block(chunk_root
,
2168 btrfs_super_chunk_root(disk_super
),
2169 blocksize
, generation
);
2170 BUG_ON(!chunk_root
->node
);
2171 if (!test_bit(EXTENT_BUFFER_UPTODATE
, &chunk_root
->node
->bflags
)) {
2172 printk(KERN_WARNING
"btrfs: failed to read chunk root on %s\n",
2174 goto fail_tree_roots
;
2176 btrfs_set_root_node(&chunk_root
->root_item
, chunk_root
->node
);
2177 chunk_root
->commit_root
= btrfs_root_node(chunk_root
);
2179 read_extent_buffer(chunk_root
->node
, fs_info
->chunk_tree_uuid
,
2180 (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root
->node
),
2183 mutex_lock(&fs_info
->chunk_mutex
);
2184 ret
= btrfs_read_chunk_tree(chunk_root
);
2185 mutex_unlock(&fs_info
->chunk_mutex
);
2187 printk(KERN_WARNING
"btrfs: failed to read chunk tree on %s\n",
2189 goto fail_tree_roots
;
2192 btrfs_close_extra_devices(fs_devices
);
2195 blocksize
= btrfs_level_size(tree_root
,
2196 btrfs_super_root_level(disk_super
));
2197 generation
= btrfs_super_generation(disk_super
);
2199 tree_root
->node
= read_tree_block(tree_root
,
2200 btrfs_super_root(disk_super
),
2201 blocksize
, generation
);
2202 if (!tree_root
->node
||
2203 !test_bit(EXTENT_BUFFER_UPTODATE
, &tree_root
->node
->bflags
)) {
2204 printk(KERN_WARNING
"btrfs: failed to read tree root on %s\n",
2207 goto recovery_tree_root
;
2210 btrfs_set_root_node(&tree_root
->root_item
, tree_root
->node
);
2211 tree_root
->commit_root
= btrfs_root_node(tree_root
);
2213 ret
= find_and_setup_root(tree_root
, fs_info
,
2214 BTRFS_EXTENT_TREE_OBJECTID
, extent_root
);
2216 goto recovery_tree_root
;
2217 extent_root
->track_dirty
= 1;
2219 ret
= find_and_setup_root(tree_root
, fs_info
,
2220 BTRFS_DEV_TREE_OBJECTID
, dev_root
);
2222 goto recovery_tree_root
;
2223 dev_root
->track_dirty
= 1;
2225 ret
= find_and_setup_root(tree_root
, fs_info
,
2226 BTRFS_CSUM_TREE_OBJECTID
, csum_root
);
2228 goto recovery_tree_root
;
2230 csum_root
->track_dirty
= 1;
2232 fs_info
->generation
= generation
;
2233 fs_info
->last_trans_committed
= generation
;
2234 fs_info
->data_alloc_profile
= (u64
)-1;
2235 fs_info
->metadata_alloc_profile
= (u64
)-1;
2236 fs_info
->system_alloc_profile
= fs_info
->metadata_alloc_profile
;
2238 ret
= btrfs_init_space_info(fs_info
);
2240 printk(KERN_ERR
"Failed to initial space info: %d\n", ret
);
2241 goto fail_block_groups
;
2244 ret
= btrfs_read_block_groups(extent_root
);
2246 printk(KERN_ERR
"Failed to read block groups: %d\n", ret
);
2247 goto fail_block_groups
;
2250 fs_info
->cleaner_kthread
= kthread_run(cleaner_kthread
, tree_root
,
2252 if (IS_ERR(fs_info
->cleaner_kthread
))
2253 goto fail_block_groups
;
2255 fs_info
->transaction_kthread
= kthread_run(transaction_kthread
,
2257 "btrfs-transaction");
2258 if (IS_ERR(fs_info
->transaction_kthread
))
2261 if (!btrfs_test_opt(tree_root
, SSD
) &&
2262 !btrfs_test_opt(tree_root
, NOSSD
) &&
2263 !fs_info
->fs_devices
->rotating
) {
2264 printk(KERN_INFO
"Btrfs detected SSD devices, enabling SSD "
2266 btrfs_set_opt(fs_info
->mount_opt
, SSD
);
2269 /* do not make disk changes in broken FS */
2270 if (btrfs_super_log_root(disk_super
) != 0 &&
2271 !(fs_info
->fs_state
& BTRFS_SUPER_FLAG_ERROR
)) {
2272 u64 bytenr
= btrfs_super_log_root(disk_super
);
2274 if (fs_devices
->rw_devices
== 0) {
2275 printk(KERN_WARNING
"Btrfs log replay required "
2278 goto fail_trans_kthread
;
2281 btrfs_level_size(tree_root
,
2282 btrfs_super_log_root_level(disk_super
));
2284 log_tree_root
= kzalloc(sizeof(struct btrfs_root
), GFP_NOFS
);
2285 if (!log_tree_root
) {
2287 goto fail_trans_kthread
;
2290 __setup_root(nodesize
, leafsize
, sectorsize
, stripesize
,
2291 log_tree_root
, fs_info
, BTRFS_TREE_LOG_OBJECTID
);
2293 log_tree_root
->node
= read_tree_block(tree_root
, bytenr
,
2296 ret
= btrfs_recover_log_trees(log_tree_root
);
2299 if (sb
->s_flags
& MS_RDONLY
) {
2300 ret
= btrfs_commit_super(tree_root
);
2305 ret
= btrfs_find_orphan_roots(tree_root
);
2308 if (!(sb
->s_flags
& MS_RDONLY
)) {
2309 ret
= btrfs_cleanup_fs_roots(fs_info
);
2312 ret
= btrfs_recover_relocation(tree_root
);
2315 "btrfs: failed to recover relocation\n");
2317 goto fail_trans_kthread
;
2321 location
.objectid
= BTRFS_FS_TREE_OBJECTID
;
2322 location
.type
= BTRFS_ROOT_ITEM_KEY
;
2323 location
.offset
= (u64
)-1;
2325 fs_info
->fs_root
= btrfs_read_fs_root_no_name(fs_info
, &location
);
2326 if (!fs_info
->fs_root
)
2327 goto fail_trans_kthread
;
2328 if (IS_ERR(fs_info
->fs_root
)) {
2329 err
= PTR_ERR(fs_info
->fs_root
);
2330 goto fail_trans_kthread
;
2333 if (!(sb
->s_flags
& MS_RDONLY
)) {
2334 down_read(&fs_info
->cleanup_work_sem
);
2335 err
= btrfs_orphan_cleanup(fs_info
->fs_root
);
2337 err
= btrfs_orphan_cleanup(fs_info
->tree_root
);
2338 up_read(&fs_info
->cleanup_work_sem
);
2340 close_ctree(tree_root
);
2341 return ERR_PTR(err
);
2348 kthread_stop(fs_info
->transaction_kthread
);
2350 kthread_stop(fs_info
->cleaner_kthread
);
2353 * make sure we're done with the btree inode before we stop our
2356 filemap_write_and_wait(fs_info
->btree_inode
->i_mapping
);
2357 invalidate_inode_pages2(fs_info
->btree_inode
->i_mapping
);
2360 btrfs_free_block_groups(fs_info
);
2363 free_root_pointers(fs_info
, 1);
2366 btrfs_stop_workers(&fs_info
->generic_worker
);
2367 btrfs_stop_workers(&fs_info
->fixup_workers
);
2368 btrfs_stop_workers(&fs_info
->delalloc_workers
);
2369 btrfs_stop_workers(&fs_info
->workers
);
2370 btrfs_stop_workers(&fs_info
->endio_workers
);
2371 btrfs_stop_workers(&fs_info
->endio_meta_workers
);
2372 btrfs_stop_workers(&fs_info
->endio_meta_write_workers
);
2373 btrfs_stop_workers(&fs_info
->endio_write_workers
);
2374 btrfs_stop_workers(&fs_info
->endio_freespace_worker
);
2375 btrfs_stop_workers(&fs_info
->submit_workers
);
2376 btrfs_stop_workers(&fs_info
->delayed_workers
);
2377 btrfs_stop_workers(&fs_info
->caching_workers
);
2380 invalidate_inode_pages2(fs_info
->btree_inode
->i_mapping
);
2381 iput(fs_info
->btree_inode
);
2383 btrfs_close_devices(fs_info
->fs_devices
);
2384 btrfs_mapping_tree_free(&fs_info
->mapping_tree
);
2386 bdi_destroy(&fs_info
->bdi
);
2388 cleanup_srcu_struct(&fs_info
->subvol_srcu
);
2390 free_fs_info(fs_info
);
2391 return ERR_PTR(err
);
2395 if (!btrfs_test_opt(tree_root
, RECOVERY
))
2396 goto fail_tree_roots
;
2398 free_root_pointers(fs_info
, 0);
2400 /* don't use the log in recovery mode, it won't be valid */
2401 btrfs_set_super_log_root(disk_super
, 0);
2403 /* we can't trust the free space cache either */
2404 btrfs_set_opt(fs_info
->mount_opt
, CLEAR_CACHE
);
2406 ret
= next_root_backup(fs_info
, fs_info
->super_copy
,
2407 &num_backups_tried
, &backup_index
);
2409 goto fail_block_groups
;
2410 goto retry_root_backup
;
2413 static void btrfs_end_buffer_write_sync(struct buffer_head
*bh
, int uptodate
)
2415 char b
[BDEVNAME_SIZE
];
2418 set_buffer_uptodate(bh
);
2420 printk_ratelimited(KERN_WARNING
"lost page write due to "
2421 "I/O error on %s\n",
2422 bdevname(bh
->b_bdev
, b
));
2423 /* note, we dont' set_buffer_write_io_error because we have
2424 * our own ways of dealing with the IO errors
2426 clear_buffer_uptodate(bh
);
2432 struct buffer_head
*btrfs_read_dev_super(struct block_device
*bdev
)
2434 struct buffer_head
*bh
;
2435 struct buffer_head
*latest
= NULL
;
2436 struct btrfs_super_block
*super
;
2441 /* we would like to check all the supers, but that would make
2442 * a btrfs mount succeed after a mkfs from a different FS.
2443 * So, we need to add a special mount option to scan for
2444 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
2446 for (i
= 0; i
< 1; i
++) {
2447 bytenr
= btrfs_sb_offset(i
);
2448 if (bytenr
+ 4096 >= i_size_read(bdev
->bd_inode
))
2450 bh
= __bread(bdev
, bytenr
/ 4096, 4096);
2454 super
= (struct btrfs_super_block
*)bh
->b_data
;
2455 if (btrfs_super_bytenr(super
) != bytenr
||
2456 strncmp((char *)(&super
->magic
), BTRFS_MAGIC
,
2457 sizeof(super
->magic
))) {
2462 if (!latest
|| btrfs_super_generation(super
) > transid
) {
2465 transid
= btrfs_super_generation(super
);
2474 * this should be called twice, once with wait == 0 and
2475 * once with wait == 1. When wait == 0 is done, all the buffer heads
2476 * we write are pinned.
2478 * They are released when wait == 1 is done.
2479 * max_mirrors must be the same for both runs, and it indicates how
2480 * many supers on this one device should be written.
2482 * max_mirrors == 0 means to write them all.
2484 static int write_dev_supers(struct btrfs_device
*device
,
2485 struct btrfs_super_block
*sb
,
2486 int do_barriers
, int wait
, int max_mirrors
)
2488 struct buffer_head
*bh
;
2494 int last_barrier
= 0;
2496 if (max_mirrors
== 0)
2497 max_mirrors
= BTRFS_SUPER_MIRROR_MAX
;
2499 /* make sure only the last submit_bh does a barrier */
2501 for (i
= 0; i
< max_mirrors
; i
++) {
2502 bytenr
= btrfs_sb_offset(i
);
2503 if (bytenr
+ BTRFS_SUPER_INFO_SIZE
>=
2504 device
->total_bytes
)
2510 for (i
= 0; i
< max_mirrors
; i
++) {
2511 bytenr
= btrfs_sb_offset(i
);
2512 if (bytenr
+ BTRFS_SUPER_INFO_SIZE
>= device
->total_bytes
)
2516 bh
= __find_get_block(device
->bdev
, bytenr
/ 4096,
2517 BTRFS_SUPER_INFO_SIZE
);
2520 if (!buffer_uptodate(bh
))
2523 /* drop our reference */
2526 /* drop the reference from the wait == 0 run */
2530 btrfs_set_super_bytenr(sb
, bytenr
);
2533 crc
= btrfs_csum_data(NULL
, (char *)sb
+
2534 BTRFS_CSUM_SIZE
, crc
,
2535 BTRFS_SUPER_INFO_SIZE
-
2537 btrfs_csum_final(crc
, sb
->csum
);
2540 * one reference for us, and we leave it for the
2543 bh
= __getblk(device
->bdev
, bytenr
/ 4096,
2544 BTRFS_SUPER_INFO_SIZE
);
2545 memcpy(bh
->b_data
, sb
, BTRFS_SUPER_INFO_SIZE
);
2547 /* one reference for submit_bh */
2550 set_buffer_uptodate(bh
);
2552 bh
->b_end_io
= btrfs_end_buffer_write_sync
;
2555 if (i
== last_barrier
&& do_barriers
)
2556 ret
= submit_bh(WRITE_FLUSH_FUA
, bh
);
2558 ret
= submit_bh(WRITE_SYNC
, bh
);
2563 return errors
< i
? 0 : -1;
2566 int write_all_supers(struct btrfs_root
*root
, int max_mirrors
)
2568 struct list_head
*head
;
2569 struct btrfs_device
*dev
;
2570 struct btrfs_super_block
*sb
;
2571 struct btrfs_dev_item
*dev_item
;
2575 int total_errors
= 0;
2578 max_errors
= btrfs_super_num_devices(root
->fs_info
->super_copy
) - 1;
2579 do_barriers
= !btrfs_test_opt(root
, NOBARRIER
);
2580 backup_super_roots(root
->fs_info
);
2582 sb
= root
->fs_info
->super_for_commit
;
2583 dev_item
= &sb
->dev_item
;
2585 mutex_lock(&root
->fs_info
->fs_devices
->device_list_mutex
);
2586 head
= &root
->fs_info
->fs_devices
->devices
;
2587 list_for_each_entry_rcu(dev
, head
, dev_list
) {
2592 if (!dev
->in_fs_metadata
|| !dev
->writeable
)
2595 btrfs_set_stack_device_generation(dev_item
, 0);
2596 btrfs_set_stack_device_type(dev_item
, dev
->type
);
2597 btrfs_set_stack_device_id(dev_item
, dev
->devid
);
2598 btrfs_set_stack_device_total_bytes(dev_item
, dev
->total_bytes
);
2599 btrfs_set_stack_device_bytes_used(dev_item
, dev
->bytes_used
);
2600 btrfs_set_stack_device_io_align(dev_item
, dev
->io_align
);
2601 btrfs_set_stack_device_io_width(dev_item
, dev
->io_width
);
2602 btrfs_set_stack_device_sector_size(dev_item
, dev
->sector_size
);
2603 memcpy(dev_item
->uuid
, dev
->uuid
, BTRFS_UUID_SIZE
);
2604 memcpy(dev_item
->fsid
, dev
->fs_devices
->fsid
, BTRFS_UUID_SIZE
);
2606 flags
= btrfs_super_flags(sb
);
2607 btrfs_set_super_flags(sb
, flags
| BTRFS_HEADER_FLAG_WRITTEN
);
2609 ret
= write_dev_supers(dev
, sb
, do_barriers
, 0, max_mirrors
);
2613 if (total_errors
> max_errors
) {
2614 printk(KERN_ERR
"btrfs: %d errors while writing supers\n",
2620 list_for_each_entry_rcu(dev
, head
, dev_list
) {
2623 if (!dev
->in_fs_metadata
|| !dev
->writeable
)
2626 ret
= write_dev_supers(dev
, sb
, do_barriers
, 1, max_mirrors
);
2630 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
2631 if (total_errors
> max_errors
) {
2632 printk(KERN_ERR
"btrfs: %d errors while writing supers\n",
2639 int write_ctree_super(struct btrfs_trans_handle
*trans
,
2640 struct btrfs_root
*root
, int max_mirrors
)
2644 ret
= write_all_supers(root
, max_mirrors
);
2648 int btrfs_free_fs_root(struct btrfs_fs_info
*fs_info
, struct btrfs_root
*root
)
2650 spin_lock(&fs_info
->fs_roots_radix_lock
);
2651 radix_tree_delete(&fs_info
->fs_roots_radix
,
2652 (unsigned long)root
->root_key
.objectid
);
2653 spin_unlock(&fs_info
->fs_roots_radix_lock
);
2655 if (btrfs_root_refs(&root
->root_item
) == 0)
2656 synchronize_srcu(&fs_info
->subvol_srcu
);
2658 __btrfs_remove_free_space_cache(root
->free_ino_pinned
);
2659 __btrfs_remove_free_space_cache(root
->free_ino_ctl
);
2664 static void free_fs_root(struct btrfs_root
*root
)
2666 iput(root
->cache_inode
);
2667 WARN_ON(!RB_EMPTY_ROOT(&root
->inode_tree
));
2669 free_anon_bdev(root
->anon_dev
);
2670 free_extent_buffer(root
->node
);
2671 free_extent_buffer(root
->commit_root
);
2672 kfree(root
->free_ino_ctl
);
2673 kfree(root
->free_ino_pinned
);
2678 static int del_fs_roots(struct btrfs_fs_info
*fs_info
)
2681 struct btrfs_root
*gang
[8];
2684 while (!list_empty(&fs_info
->dead_roots
)) {
2685 gang
[0] = list_entry(fs_info
->dead_roots
.next
,
2686 struct btrfs_root
, root_list
);
2687 list_del(&gang
[0]->root_list
);
2689 if (gang
[0]->in_radix
) {
2690 btrfs_free_fs_root(fs_info
, gang
[0]);
2692 free_extent_buffer(gang
[0]->node
);
2693 free_extent_buffer(gang
[0]->commit_root
);
2699 ret
= radix_tree_gang_lookup(&fs_info
->fs_roots_radix
,
2704 for (i
= 0; i
< ret
; i
++)
2705 btrfs_free_fs_root(fs_info
, gang
[i
]);
2710 int btrfs_cleanup_fs_roots(struct btrfs_fs_info
*fs_info
)
2712 u64 root_objectid
= 0;
2713 struct btrfs_root
*gang
[8];
2718 ret
= radix_tree_gang_lookup(&fs_info
->fs_roots_radix
,
2719 (void **)gang
, root_objectid
,
2724 root_objectid
= gang
[ret
- 1]->root_key
.objectid
+ 1;
2725 for (i
= 0; i
< ret
; i
++) {
2728 root_objectid
= gang
[i
]->root_key
.objectid
;
2729 err
= btrfs_orphan_cleanup(gang
[i
]);
2738 int btrfs_commit_super(struct btrfs_root
*root
)
2740 struct btrfs_trans_handle
*trans
;
2743 mutex_lock(&root
->fs_info
->cleaner_mutex
);
2744 btrfs_run_delayed_iputs(root
);
2745 btrfs_clean_old_snapshots(root
);
2746 mutex_unlock(&root
->fs_info
->cleaner_mutex
);
2748 /* wait until ongoing cleanup work done */
2749 down_write(&root
->fs_info
->cleanup_work_sem
);
2750 up_write(&root
->fs_info
->cleanup_work_sem
);
2752 trans
= btrfs_join_transaction(root
);
2754 return PTR_ERR(trans
);
2755 ret
= btrfs_commit_transaction(trans
, root
);
2757 /* run commit again to drop the original snapshot */
2758 trans
= btrfs_join_transaction(root
);
2760 return PTR_ERR(trans
);
2761 btrfs_commit_transaction(trans
, root
);
2762 ret
= btrfs_write_and_wait_transaction(NULL
, root
);
2765 ret
= write_ctree_super(NULL
, root
, 0);
2769 int close_ctree(struct btrfs_root
*root
)
2771 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2774 fs_info
->closing
= 1;
2777 btrfs_scrub_cancel(root
);
2779 /* wait for any defraggers to finish */
2780 wait_event(fs_info
->transaction_wait
,
2781 (atomic_read(&fs_info
->defrag_running
) == 0));
2783 /* clear out the rbtree of defraggable inodes */
2784 btrfs_run_defrag_inodes(root
->fs_info
);
2787 * Here come 2 situations when btrfs is broken to flip readonly:
2789 * 1. when btrfs flips readonly somewhere else before
2790 * btrfs_commit_super, sb->s_flags has MS_RDONLY flag,
2791 * and btrfs will skip to write sb directly to keep
2792 * ERROR state on disk.
2794 * 2. when btrfs flips readonly just in btrfs_commit_super,
2795 * and in such case, btrfs cannot write sb via btrfs_commit_super,
2796 * and since fs_state has been set BTRFS_SUPER_FLAG_ERROR flag,
2797 * btrfs will cleanup all FS resources first and write sb then.
2799 if (!(fs_info
->sb
->s_flags
& MS_RDONLY
)) {
2800 ret
= btrfs_commit_super(root
);
2802 printk(KERN_ERR
"btrfs: commit super ret %d\n", ret
);
2805 if (fs_info
->fs_state
& BTRFS_SUPER_FLAG_ERROR
) {
2806 ret
= btrfs_error_commit_super(root
);
2808 printk(KERN_ERR
"btrfs: commit super ret %d\n", ret
);
2811 btrfs_put_block_group_cache(fs_info
);
2813 kthread_stop(root
->fs_info
->transaction_kthread
);
2814 kthread_stop(root
->fs_info
->cleaner_kthread
);
2816 fs_info
->closing
= 2;
2819 if (fs_info
->delalloc_bytes
) {
2820 printk(KERN_INFO
"btrfs: at unmount delalloc count %llu\n",
2821 (unsigned long long)fs_info
->delalloc_bytes
);
2823 if (fs_info
->total_ref_cache_size
) {
2824 printk(KERN_INFO
"btrfs: at umount reference cache size %llu\n",
2825 (unsigned long long)fs_info
->total_ref_cache_size
);
2828 free_extent_buffer(fs_info
->extent_root
->node
);
2829 free_extent_buffer(fs_info
->extent_root
->commit_root
);
2830 free_extent_buffer(fs_info
->tree_root
->node
);
2831 free_extent_buffer(fs_info
->tree_root
->commit_root
);
2832 free_extent_buffer(root
->fs_info
->chunk_root
->node
);
2833 free_extent_buffer(root
->fs_info
->chunk_root
->commit_root
);
2834 free_extent_buffer(root
->fs_info
->dev_root
->node
);
2835 free_extent_buffer(root
->fs_info
->dev_root
->commit_root
);
2836 free_extent_buffer(root
->fs_info
->csum_root
->node
);
2837 free_extent_buffer(root
->fs_info
->csum_root
->commit_root
);
2839 btrfs_free_block_groups(root
->fs_info
);
2841 del_fs_roots(fs_info
);
2843 iput(fs_info
->btree_inode
);
2845 btrfs_stop_workers(&fs_info
->generic_worker
);
2846 btrfs_stop_workers(&fs_info
->fixup_workers
);
2847 btrfs_stop_workers(&fs_info
->delalloc_workers
);
2848 btrfs_stop_workers(&fs_info
->workers
);
2849 btrfs_stop_workers(&fs_info
->endio_workers
);
2850 btrfs_stop_workers(&fs_info
->endio_meta_workers
);
2851 btrfs_stop_workers(&fs_info
->endio_meta_write_workers
);
2852 btrfs_stop_workers(&fs_info
->endio_write_workers
);
2853 btrfs_stop_workers(&fs_info
->endio_freespace_worker
);
2854 btrfs_stop_workers(&fs_info
->submit_workers
);
2855 btrfs_stop_workers(&fs_info
->delayed_workers
);
2856 btrfs_stop_workers(&fs_info
->caching_workers
);
2858 btrfs_close_devices(fs_info
->fs_devices
);
2859 btrfs_mapping_tree_free(&fs_info
->mapping_tree
);
2861 bdi_destroy(&fs_info
->bdi
);
2862 cleanup_srcu_struct(&fs_info
->subvol_srcu
);
2864 free_fs_info(fs_info
);
2869 int btrfs_buffer_uptodate(struct extent_buffer
*buf
, u64 parent_transid
)
2872 struct inode
*btree_inode
= buf
->first_page
->mapping
->host
;
2874 ret
= extent_buffer_uptodate(&BTRFS_I(btree_inode
)->io_tree
, buf
,
2879 ret
= verify_parent_transid(&BTRFS_I(btree_inode
)->io_tree
, buf
,
2884 int btrfs_set_buffer_uptodate(struct extent_buffer
*buf
)
2886 struct inode
*btree_inode
= buf
->first_page
->mapping
->host
;
2887 return set_extent_buffer_uptodate(&BTRFS_I(btree_inode
)->io_tree
,
2891 void btrfs_mark_buffer_dirty(struct extent_buffer
*buf
)
2893 struct btrfs_root
*root
= BTRFS_I(buf
->first_page
->mapping
->host
)->root
;
2894 u64 transid
= btrfs_header_generation(buf
);
2895 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
2898 btrfs_assert_tree_locked(buf
);
2899 if (transid
!= root
->fs_info
->generation
) {
2900 printk(KERN_CRIT
"btrfs transid mismatch buffer %llu, "
2901 "found %llu running %llu\n",
2902 (unsigned long long)buf
->start
,
2903 (unsigned long long)transid
,
2904 (unsigned long long)root
->fs_info
->generation
);
2907 was_dirty
= set_extent_buffer_dirty(&BTRFS_I(btree_inode
)->io_tree
,
2910 spin_lock(&root
->fs_info
->delalloc_lock
);
2911 root
->fs_info
->dirty_metadata_bytes
+= buf
->len
;
2912 spin_unlock(&root
->fs_info
->delalloc_lock
);
2916 void btrfs_btree_balance_dirty(struct btrfs_root
*root
, unsigned long nr
)
2919 * looks as though older kernels can get into trouble with
2920 * this code, they end up stuck in balance_dirty_pages forever
2923 unsigned long thresh
= 32 * 1024 * 1024;
2925 if (current
->flags
& PF_MEMALLOC
)
2928 btrfs_balance_delayed_items(root
);
2930 num_dirty
= root
->fs_info
->dirty_metadata_bytes
;
2932 if (num_dirty
> thresh
) {
2933 balance_dirty_pages_ratelimited_nr(
2934 root
->fs_info
->btree_inode
->i_mapping
, 1);
2939 void __btrfs_btree_balance_dirty(struct btrfs_root
*root
, unsigned long nr
)
2942 * looks as though older kernels can get into trouble with
2943 * this code, they end up stuck in balance_dirty_pages forever
2946 unsigned long thresh
= 32 * 1024 * 1024;
2948 if (current
->flags
& PF_MEMALLOC
)
2951 num_dirty
= root
->fs_info
->dirty_metadata_bytes
;
2953 if (num_dirty
> thresh
) {
2954 balance_dirty_pages_ratelimited_nr(
2955 root
->fs_info
->btree_inode
->i_mapping
, 1);
2960 int btrfs_read_buffer(struct extent_buffer
*buf
, u64 parent_transid
)
2962 struct btrfs_root
*root
= BTRFS_I(buf
->first_page
->mapping
->host
)->root
;
2964 ret
= btree_read_extent_buffer_pages(root
, buf
, 0, parent_transid
);
2966 set_bit(EXTENT_BUFFER_UPTODATE
, &buf
->bflags
);
2970 static int btree_lock_page_hook(struct page
*page
, void *data
,
2971 void (*flush_fn
)(void *))
2973 struct inode
*inode
= page
->mapping
->host
;
2974 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
2975 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
2976 struct extent_buffer
*eb
;
2978 u64 bytenr
= page_offset(page
);
2980 if (page
->private == EXTENT_PAGE_PRIVATE
)
2983 len
= page
->private >> 2;
2984 eb
= find_extent_buffer(io_tree
, bytenr
, len
);
2988 if (!btrfs_try_tree_write_lock(eb
)) {
2990 btrfs_tree_lock(eb
);
2992 btrfs_set_header_flag(eb
, BTRFS_HEADER_FLAG_WRITTEN
);
2994 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY
, &eb
->bflags
)) {
2995 spin_lock(&root
->fs_info
->delalloc_lock
);
2996 if (root
->fs_info
->dirty_metadata_bytes
>= eb
->len
)
2997 root
->fs_info
->dirty_metadata_bytes
-= eb
->len
;
3000 spin_unlock(&root
->fs_info
->delalloc_lock
);
3003 btrfs_tree_unlock(eb
);
3004 free_extent_buffer(eb
);
3006 if (!trylock_page(page
)) {
3013 static void btrfs_check_super_valid(struct btrfs_fs_info
*fs_info
,
3019 if (fs_info
->fs_state
& BTRFS_SUPER_FLAG_ERROR
)
3020 printk(KERN_WARNING
"warning: mount fs with errors, "
3021 "running btrfsck is recommended\n");
3024 int btrfs_error_commit_super(struct btrfs_root
*root
)
3028 mutex_lock(&root
->fs_info
->cleaner_mutex
);
3029 btrfs_run_delayed_iputs(root
);
3030 mutex_unlock(&root
->fs_info
->cleaner_mutex
);
3032 down_write(&root
->fs_info
->cleanup_work_sem
);
3033 up_write(&root
->fs_info
->cleanup_work_sem
);
3035 /* cleanup FS via transaction */
3036 btrfs_cleanup_transaction(root
);
3038 ret
= write_ctree_super(NULL
, root
, 0);
3043 static int btrfs_destroy_ordered_operations(struct btrfs_root
*root
)
3045 struct btrfs_inode
*btrfs_inode
;
3046 struct list_head splice
;
3048 INIT_LIST_HEAD(&splice
);
3050 mutex_lock(&root
->fs_info
->ordered_operations_mutex
);
3051 spin_lock(&root
->fs_info
->ordered_extent_lock
);
3053 list_splice_init(&root
->fs_info
->ordered_operations
, &splice
);
3054 while (!list_empty(&splice
)) {
3055 btrfs_inode
= list_entry(splice
.next
, struct btrfs_inode
,
3056 ordered_operations
);
3058 list_del_init(&btrfs_inode
->ordered_operations
);
3060 btrfs_invalidate_inodes(btrfs_inode
->root
);
3063 spin_unlock(&root
->fs_info
->ordered_extent_lock
);
3064 mutex_unlock(&root
->fs_info
->ordered_operations_mutex
);
3069 static int btrfs_destroy_ordered_extents(struct btrfs_root
*root
)
3071 struct list_head splice
;
3072 struct btrfs_ordered_extent
*ordered
;
3073 struct inode
*inode
;
3075 INIT_LIST_HEAD(&splice
);
3077 spin_lock(&root
->fs_info
->ordered_extent_lock
);
3079 list_splice_init(&root
->fs_info
->ordered_extents
, &splice
);
3080 while (!list_empty(&splice
)) {
3081 ordered
= list_entry(splice
.next
, struct btrfs_ordered_extent
,
3084 list_del_init(&ordered
->root_extent_list
);
3085 atomic_inc(&ordered
->refs
);
3087 /* the inode may be getting freed (in sys_unlink path). */
3088 inode
= igrab(ordered
->inode
);
3090 spin_unlock(&root
->fs_info
->ordered_extent_lock
);
3094 atomic_set(&ordered
->refs
, 1);
3095 btrfs_put_ordered_extent(ordered
);
3097 spin_lock(&root
->fs_info
->ordered_extent_lock
);
3100 spin_unlock(&root
->fs_info
->ordered_extent_lock
);
3105 static int btrfs_destroy_delayed_refs(struct btrfs_transaction
*trans
,
3106 struct btrfs_root
*root
)
3108 struct rb_node
*node
;
3109 struct btrfs_delayed_ref_root
*delayed_refs
;
3110 struct btrfs_delayed_ref_node
*ref
;
3113 delayed_refs
= &trans
->delayed_refs
;
3115 spin_lock(&delayed_refs
->lock
);
3116 if (delayed_refs
->num_entries
== 0) {
3117 spin_unlock(&delayed_refs
->lock
);
3118 printk(KERN_INFO
"delayed_refs has NO entry\n");
3122 node
= rb_first(&delayed_refs
->root
);
3124 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
, rb_node
);
3125 node
= rb_next(node
);
3128 rb_erase(&ref
->rb_node
, &delayed_refs
->root
);
3129 delayed_refs
->num_entries
--;
3131 atomic_set(&ref
->refs
, 1);
3132 if (btrfs_delayed_ref_is_head(ref
)) {
3133 struct btrfs_delayed_ref_head
*head
;
3135 head
= btrfs_delayed_node_to_head(ref
);
3136 mutex_lock(&head
->mutex
);
3137 kfree(head
->extent_op
);
3138 delayed_refs
->num_heads
--;
3139 if (list_empty(&head
->cluster
))
3140 delayed_refs
->num_heads_ready
--;
3141 list_del_init(&head
->cluster
);
3142 mutex_unlock(&head
->mutex
);
3145 spin_unlock(&delayed_refs
->lock
);
3146 btrfs_put_delayed_ref(ref
);
3149 spin_lock(&delayed_refs
->lock
);
3152 spin_unlock(&delayed_refs
->lock
);
3157 static int btrfs_destroy_pending_snapshots(struct btrfs_transaction
*t
)
3159 struct btrfs_pending_snapshot
*snapshot
;
3160 struct list_head splice
;
3162 INIT_LIST_HEAD(&splice
);
3164 list_splice_init(&t
->pending_snapshots
, &splice
);
3166 while (!list_empty(&splice
)) {
3167 snapshot
= list_entry(splice
.next
,
3168 struct btrfs_pending_snapshot
,
3171 list_del_init(&snapshot
->list
);
3179 static int btrfs_destroy_delalloc_inodes(struct btrfs_root
*root
)
3181 struct btrfs_inode
*btrfs_inode
;
3182 struct list_head splice
;
3184 INIT_LIST_HEAD(&splice
);
3186 spin_lock(&root
->fs_info
->delalloc_lock
);
3187 list_splice_init(&root
->fs_info
->delalloc_inodes
, &splice
);
3189 while (!list_empty(&splice
)) {
3190 btrfs_inode
= list_entry(splice
.next
, struct btrfs_inode
,
3193 list_del_init(&btrfs_inode
->delalloc_inodes
);
3195 btrfs_invalidate_inodes(btrfs_inode
->root
);
3198 spin_unlock(&root
->fs_info
->delalloc_lock
);
3203 static int btrfs_destroy_marked_extents(struct btrfs_root
*root
,
3204 struct extent_io_tree
*dirty_pages
,
3209 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
3210 struct extent_buffer
*eb
;
3214 unsigned long index
;
3217 ret
= find_first_extent_bit(dirty_pages
, start
, &start
, &end
,
3222 clear_extent_bits(dirty_pages
, start
, end
, mark
, GFP_NOFS
);
3223 while (start
<= end
) {
3224 index
= start
>> PAGE_CACHE_SHIFT
;
3225 start
= (u64
)(index
+ 1) << PAGE_CACHE_SHIFT
;
3226 page
= find_get_page(btree_inode
->i_mapping
, index
);
3229 offset
= page_offset(page
);
3231 spin_lock(&dirty_pages
->buffer_lock
);
3232 eb
= radix_tree_lookup(
3233 &(&BTRFS_I(page
->mapping
->host
)->io_tree
)->buffer
,
3234 offset
>> PAGE_CACHE_SHIFT
);
3235 spin_unlock(&dirty_pages
->buffer_lock
);
3237 ret
= test_and_clear_bit(EXTENT_BUFFER_DIRTY
,
3239 atomic_set(&eb
->refs
, 1);
3241 if (PageWriteback(page
))
3242 end_page_writeback(page
);
3245 if (PageDirty(page
)) {
3246 clear_page_dirty_for_io(page
);
3247 spin_lock_irq(&page
->mapping
->tree_lock
);
3248 radix_tree_tag_clear(&page
->mapping
->page_tree
,
3250 PAGECACHE_TAG_DIRTY
);
3251 spin_unlock_irq(&page
->mapping
->tree_lock
);
3254 page
->mapping
->a_ops
->invalidatepage(page
, 0);
3262 static int btrfs_destroy_pinned_extent(struct btrfs_root
*root
,
3263 struct extent_io_tree
*pinned_extents
)
3265 struct extent_io_tree
*unpin
;
3270 unpin
= pinned_extents
;
3272 ret
= find_first_extent_bit(unpin
, 0, &start
, &end
,
3278 if (btrfs_test_opt(root
, DISCARD
))
3279 ret
= btrfs_error_discard_extent(root
, start
,
3283 clear_extent_dirty(unpin
, start
, end
, GFP_NOFS
);
3284 btrfs_error_unpin_extent_range(root
, start
, end
);
3291 static int btrfs_cleanup_transaction(struct btrfs_root
*root
)
3293 struct btrfs_transaction
*t
;
3298 mutex_lock(&root
->fs_info
->transaction_kthread_mutex
);
3300 spin_lock(&root
->fs_info
->trans_lock
);
3301 list_splice_init(&root
->fs_info
->trans_list
, &list
);
3302 root
->fs_info
->trans_no_join
= 1;
3303 spin_unlock(&root
->fs_info
->trans_lock
);
3305 while (!list_empty(&list
)) {
3306 t
= list_entry(list
.next
, struct btrfs_transaction
, list
);
3310 btrfs_destroy_ordered_operations(root
);
3312 btrfs_destroy_ordered_extents(root
);
3314 btrfs_destroy_delayed_refs(t
, root
);
3316 btrfs_block_rsv_release(root
,
3317 &root
->fs_info
->trans_block_rsv
,
3318 t
->dirty_pages
.dirty_bytes
);
3320 /* FIXME: cleanup wait for commit */
3323 if (waitqueue_active(&root
->fs_info
->transaction_blocked_wait
))
3324 wake_up(&root
->fs_info
->transaction_blocked_wait
);
3327 if (waitqueue_active(&root
->fs_info
->transaction_wait
))
3328 wake_up(&root
->fs_info
->transaction_wait
);
3331 if (waitqueue_active(&t
->commit_wait
))
3332 wake_up(&t
->commit_wait
);
3334 btrfs_destroy_pending_snapshots(t
);
3336 btrfs_destroy_delalloc_inodes(root
);
3338 spin_lock(&root
->fs_info
->trans_lock
);
3339 root
->fs_info
->running_transaction
= NULL
;
3340 spin_unlock(&root
->fs_info
->trans_lock
);
3342 btrfs_destroy_marked_extents(root
, &t
->dirty_pages
,
3345 btrfs_destroy_pinned_extent(root
,
3346 root
->fs_info
->pinned_extents
);
3348 atomic_set(&t
->use_count
, 0);
3349 list_del_init(&t
->list
);
3350 memset(t
, 0, sizeof(*t
));
3351 kmem_cache_free(btrfs_transaction_cachep
, t
);
3354 spin_lock(&root
->fs_info
->trans_lock
);
3355 root
->fs_info
->trans_no_join
= 0;
3356 spin_unlock(&root
->fs_info
->trans_lock
);
3357 mutex_unlock(&root
->fs_info
->transaction_kthread_mutex
);
3362 static struct extent_io_ops btree_extent_io_ops
= {
3363 .write_cache_pages_lock_hook
= btree_lock_page_hook
,
3364 .readpage_end_io_hook
= btree_readpage_end_io_hook
,
3365 .submit_bio_hook
= btree_submit_bio_hook
,
3366 /* note we're sharing with inode.c for the merge bio hook */
3367 .merge_bio_hook
= btrfs_merge_bio_hook
,