2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/version.h>
21 #include <linux/blkdev.h>
22 #include <linux/scatterlist.h>
23 #include <linux/swap.h>
24 #include <linux/radix-tree.h>
25 #include <linux/writeback.h>
26 #include <linux/buffer_head.h> // for block_sync_page
27 #include <linux/workqueue.h>
28 #include <linux/kthread.h>
29 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
30 # include <linux/freezer.h>
32 # include <linux/sched.h>
37 #include "transaction.h"
38 #include "btrfs_inode.h"
40 #include "print-tree.h"
41 #include "async-thread.h"
43 #include "ref-cache.h"
46 static int check_tree_block(struct btrfs_root
*root
, struct extent_buffer
*buf
)
48 if (extent_buffer_blocknr(buf
) != btrfs_header_blocknr(buf
)) {
49 printk(KERN_CRIT
"buf blocknr(buf) is %llu, header is %llu\n",
50 (unsigned long long)extent_buffer_blocknr(buf
),
51 (unsigned long long)btrfs_header_blocknr(buf
));
58 static struct extent_io_ops btree_extent_io_ops
;
59 static void end_workqueue_fn(struct btrfs_work
*work
);
65 struct btrfs_fs_info
*info
;
68 struct list_head list
;
69 struct btrfs_work work
;
72 struct async_submit_bio
{
75 struct list_head list
;
76 extent_submit_bio_hook_t
*submit_bio_hook
;
79 struct btrfs_work work
;
82 struct extent_map
*btree_get_extent(struct inode
*inode
, struct page
*page
,
83 size_t page_offset
, u64 start
, u64 len
,
86 struct extent_map_tree
*em_tree
= &BTRFS_I(inode
)->extent_tree
;
87 struct extent_map
*em
;
90 spin_lock(&em_tree
->lock
);
91 em
= lookup_extent_mapping(em_tree
, start
, len
);
94 BTRFS_I(inode
)->root
->fs_info
->fs_devices
->latest_bdev
;
95 spin_unlock(&em_tree
->lock
);
98 spin_unlock(&em_tree
->lock
);
100 em
= alloc_extent_map(GFP_NOFS
);
102 em
= ERR_PTR(-ENOMEM
);
108 em
->bdev
= BTRFS_I(inode
)->root
->fs_info
->fs_devices
->latest_bdev
;
110 spin_lock(&em_tree
->lock
);
111 ret
= add_extent_mapping(em_tree
, em
);
112 if (ret
== -EEXIST
) {
113 u64 failed_start
= em
->start
;
114 u64 failed_len
= em
->len
;
116 printk("failed to insert %Lu %Lu -> %Lu into tree\n",
117 em
->start
, em
->len
, em
->block_start
);
119 em
= lookup_extent_mapping(em_tree
, start
, len
);
121 printk("after failing, found %Lu %Lu %Lu\n",
122 em
->start
, em
->len
, em
->block_start
);
125 em
= lookup_extent_mapping(em_tree
, failed_start
,
128 printk("double failure lookup gives us "
129 "%Lu %Lu -> %Lu\n", em
->start
,
130 em
->len
, em
->block_start
);
139 spin_unlock(&em_tree
->lock
);
147 u32
btrfs_csum_data(struct btrfs_root
*root
, char *data
, u32 seed
, size_t len
)
149 return btrfs_crc32c(seed
, data
, len
);
152 void btrfs_csum_final(u32 crc
, char *result
)
154 *(__le32
*)result
= ~cpu_to_le32(crc
);
157 static int csum_tree_block(struct btrfs_root
*root
, struct extent_buffer
*buf
,
160 char result
[BTRFS_CRC32_SIZE
];
162 unsigned long cur_len
;
163 unsigned long offset
= BTRFS_CSUM_SIZE
;
164 char *map_token
= NULL
;
166 unsigned long map_start
;
167 unsigned long map_len
;
171 len
= buf
->len
- offset
;
173 err
= map_private_extent_buffer(buf
, offset
, 32,
175 &map_start
, &map_len
, KM_USER0
);
177 printk("failed to map extent buffer! %lu\n",
181 cur_len
= min(len
, map_len
- (offset
- map_start
));
182 crc
= btrfs_csum_data(root
, kaddr
+ offset
- map_start
,
186 unmap_extent_buffer(buf
, map_token
, KM_USER0
);
188 btrfs_csum_final(crc
, result
);
191 /* FIXME, this is not good */
192 if (memcmp_extent_buffer(buf
, result
, 0, BTRFS_CRC32_SIZE
)) {
195 memcpy(&found
, result
, BTRFS_CRC32_SIZE
);
197 read_extent_buffer(buf
, &val
, 0, BTRFS_CRC32_SIZE
);
198 printk("btrfs: %s checksum verify failed on %llu "
199 "wanted %X found %X level %d\n",
200 root
->fs_info
->sb
->s_id
,
201 buf
->start
, val
, found
, btrfs_header_level(buf
));
205 write_extent_buffer(buf
, result
, 0, BTRFS_CRC32_SIZE
);
210 static int verify_parent_transid(struct extent_io_tree
*io_tree
,
211 struct extent_buffer
*eb
, u64 parent_transid
)
215 if (!parent_transid
|| btrfs_header_generation(eb
) == parent_transid
)
218 lock_extent(io_tree
, eb
->start
, eb
->start
+ eb
->len
- 1, GFP_NOFS
);
219 if (extent_buffer_uptodate(io_tree
, eb
) &&
220 btrfs_header_generation(eb
) == parent_transid
) {
224 printk("parent transid verify failed on %llu wanted %llu found %llu\n",
225 (unsigned long long)eb
->start
,
226 (unsigned long long)parent_transid
,
227 (unsigned long long)btrfs_header_generation(eb
));
229 clear_extent_buffer_uptodate(io_tree
, eb
);
231 unlock_extent(io_tree
, eb
->start
, eb
->start
+ eb
->len
- 1,
237 static int btree_read_extent_buffer_pages(struct btrfs_root
*root
,
238 struct extent_buffer
*eb
,
239 u64 start
, u64 parent_transid
)
241 struct extent_io_tree
*io_tree
;
246 io_tree
= &BTRFS_I(root
->fs_info
->btree_inode
)->io_tree
;
248 ret
= read_extent_buffer_pages(io_tree
, eb
, start
, 1,
249 btree_get_extent
, mirror_num
);
251 !verify_parent_transid(io_tree
, eb
, parent_transid
))
254 num_copies
= btrfs_num_copies(&root
->fs_info
->mapping_tree
,
260 if (mirror_num
> num_copies
)
266 int csum_dirty_buffer(struct btrfs_root
*root
, struct page
*page
)
268 struct extent_io_tree
*tree
;
269 u64 start
= (u64
)page
->index
<< PAGE_CACHE_SHIFT
;
273 struct extent_buffer
*eb
;
276 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
278 if (page
->private == EXTENT_PAGE_PRIVATE
)
282 len
= page
->private >> 2;
286 eb
= alloc_extent_buffer(tree
, start
, len
, page
, GFP_NOFS
);
287 ret
= btree_read_extent_buffer_pages(root
, eb
, start
+ PAGE_CACHE_SIZE
,
288 btrfs_header_generation(eb
));
290 found_start
= btrfs_header_bytenr(eb
);
291 if (found_start
!= start
) {
292 printk("warning: eb start incorrect %Lu buffer %Lu len %lu\n",
293 start
, found_start
, len
);
297 if (eb
->first_page
!= page
) {
298 printk("bad first page %lu %lu\n", eb
->first_page
->index
,
303 if (!PageUptodate(page
)) {
304 printk("csum not up to date page %lu\n", page
->index
);
308 found_level
= btrfs_header_level(eb
);
309 spin_lock(&root
->fs_info
->hash_lock
);
310 btrfs_set_header_flag(eb
, BTRFS_HEADER_FLAG_WRITTEN
);
311 spin_unlock(&root
->fs_info
->hash_lock
);
312 csum_tree_block(root
, eb
, 0);
314 free_extent_buffer(eb
);
319 static int btree_writepage_io_hook(struct page
*page
, u64 start
, u64 end
)
321 struct btrfs_root
*root
= BTRFS_I(page
->mapping
->host
)->root
;
323 csum_dirty_buffer(root
, page
);
327 int btree_readpage_end_io_hook(struct page
*page
, u64 start
, u64 end
,
328 struct extent_state
*state
)
330 struct extent_io_tree
*tree
;
334 struct extent_buffer
*eb
;
335 struct btrfs_root
*root
= BTRFS_I(page
->mapping
->host
)->root
;
338 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
339 if (page
->private == EXTENT_PAGE_PRIVATE
)
343 len
= page
->private >> 2;
347 eb
= alloc_extent_buffer(tree
, start
, len
, page
, GFP_NOFS
);
349 found_start
= btrfs_header_bytenr(eb
);
350 if (found_start
!= start
) {
354 if (eb
->first_page
!= page
) {
355 printk("bad first page %lu %lu\n", eb
->first_page
->index
,
361 if (memcmp_extent_buffer(eb
, root
->fs_info
->fsid
,
362 (unsigned long)btrfs_header_fsid(eb
),
364 printk("bad fsid on block %Lu\n", eb
->start
);
368 found_level
= btrfs_header_level(eb
);
370 ret
= csum_tree_block(root
, eb
, 1);
374 end
= min_t(u64
, eb
->len
, PAGE_CACHE_SIZE
);
375 end
= eb
->start
+ end
- 1;
377 free_extent_buffer(eb
);
382 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
383 static void end_workqueue_bio(struct bio
*bio
, int err
)
385 static int end_workqueue_bio(struct bio
*bio
,
386 unsigned int bytes_done
, int err
)
389 struct end_io_wq
*end_io_wq
= bio
->bi_private
;
390 struct btrfs_fs_info
*fs_info
;
392 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
397 fs_info
= end_io_wq
->info
;
398 end_io_wq
->error
= err
;
399 end_io_wq
->work
.func
= end_workqueue_fn
;
400 end_io_wq
->work
.flags
= 0;
401 if (bio
->bi_rw
& (1 << BIO_RW
))
402 btrfs_queue_worker(&fs_info
->endio_write_workers
,
405 btrfs_queue_worker(&fs_info
->endio_workers
, &end_io_wq
->work
);
407 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
412 int btrfs_bio_wq_end_io(struct btrfs_fs_info
*info
, struct bio
*bio
,
415 struct end_io_wq
*end_io_wq
;
416 end_io_wq
= kmalloc(sizeof(*end_io_wq
), GFP_NOFS
);
420 end_io_wq
->private = bio
->bi_private
;
421 end_io_wq
->end_io
= bio
->bi_end_io
;
422 end_io_wq
->info
= info
;
423 end_io_wq
->error
= 0;
424 end_io_wq
->bio
= bio
;
425 end_io_wq
->metadata
= metadata
;
427 bio
->bi_private
= end_io_wq
;
428 bio
->bi_end_io
= end_workqueue_bio
;
432 static unsigned long async_submit_limit(struct btrfs_fs_info
*info
)
434 unsigned long limit
= min_t(unsigned long,
435 info
->workers
.max_workers
,
436 info
->fs_devices
->open_devices
);
440 int btrfs_congested_async(struct btrfs_fs_info
*info
, int iodone
)
442 return atomic_read(&info
->nr_async_bios
) > async_submit_limit(info
);
445 static void run_one_async_submit(struct btrfs_work
*work
)
447 struct btrfs_fs_info
*fs_info
;
448 struct async_submit_bio
*async
;
451 async
= container_of(work
, struct async_submit_bio
, work
);
452 fs_info
= BTRFS_I(async
->inode
)->root
->fs_info
;
454 limit
= async_submit_limit(fs_info
);
455 limit
= limit
* 2 / 3;
457 atomic_dec(&fs_info
->nr_async_submits
);
459 if (atomic_read(&fs_info
->nr_async_submits
) < limit
)
460 wake_up(&fs_info
->async_submit_wait
);
462 async
->submit_bio_hook(async
->inode
, async
->rw
, async
->bio
,
467 int btrfs_wq_submit_bio(struct btrfs_fs_info
*fs_info
, struct inode
*inode
,
468 int rw
, struct bio
*bio
, int mirror_num
,
469 extent_submit_bio_hook_t
*submit_bio_hook
)
471 struct async_submit_bio
*async
;
472 int limit
= async_submit_limit(fs_info
);
474 async
= kmalloc(sizeof(*async
), GFP_NOFS
);
478 async
->inode
= inode
;
481 async
->mirror_num
= mirror_num
;
482 async
->submit_bio_hook
= submit_bio_hook
;
483 async
->work
.func
= run_one_async_submit
;
484 async
->work
.flags
= 0;
485 atomic_inc(&fs_info
->nr_async_submits
);
486 btrfs_queue_worker(&fs_info
->workers
, &async
->work
);
488 wait_event_timeout(fs_info
->async_submit_wait
,
489 (atomic_read(&fs_info
->nr_async_submits
) < limit
),
494 static int __btree_submit_bio_hook(struct inode
*inode
, int rw
, struct bio
*bio
,
497 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
501 offset
= bio
->bi_sector
<< 9;
504 * when we're called for a write, we're already in the async
505 * submission context. Just jump into btrfs_map_bio
507 if (rw
& (1 << BIO_RW
)) {
508 return btrfs_map_bio(BTRFS_I(inode
)->root
, rw
, bio
,
513 * called for a read, do the setup so that checksum validation
514 * can happen in the async kernel threads
516 ret
= btrfs_bio_wq_end_io(root
->fs_info
, bio
, 1);
519 return btrfs_map_bio(BTRFS_I(inode
)->root
, rw
, bio
, mirror_num
, 1);
522 static int btree_submit_bio_hook(struct inode
*inode
, int rw
, struct bio
*bio
,
526 * kthread helpers are used to submit writes so that checksumming
527 * can happen in parallel across all CPUs
529 if (!(rw
& (1 << BIO_RW
))) {
530 return __btree_submit_bio_hook(inode
, rw
, bio
, mirror_num
);
532 return btrfs_wq_submit_bio(BTRFS_I(inode
)->root
->fs_info
,
533 inode
, rw
, bio
, mirror_num
,
534 __btree_submit_bio_hook
);
537 static int btree_writepage(struct page
*page
, struct writeback_control
*wbc
)
539 struct extent_io_tree
*tree
;
540 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
542 if (current
->flags
& PF_MEMALLOC
) {
543 redirty_page_for_writepage(wbc
, page
);
547 return extent_write_full_page(tree
, page
, btree_get_extent
, wbc
);
550 static int btree_writepages(struct address_space
*mapping
,
551 struct writeback_control
*wbc
)
553 struct extent_io_tree
*tree
;
554 tree
= &BTRFS_I(mapping
->host
)->io_tree
;
555 if (wbc
->sync_mode
== WB_SYNC_NONE
) {
558 unsigned long thresh
= 8 * 1024 * 1024;
560 if (wbc
->for_kupdate
)
563 num_dirty
= count_range_bits(tree
, &start
, (u64
)-1,
564 thresh
, EXTENT_DIRTY
);
565 if (num_dirty
< thresh
) {
569 return extent_writepages(tree
, mapping
, btree_get_extent
, wbc
);
572 int btree_readpage(struct file
*file
, struct page
*page
)
574 struct extent_io_tree
*tree
;
575 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
576 return extent_read_full_page(tree
, page
, btree_get_extent
);
579 static int btree_releasepage(struct page
*page
, gfp_t gfp_flags
)
581 struct extent_io_tree
*tree
;
582 struct extent_map_tree
*map
;
585 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
586 map
= &BTRFS_I(page
->mapping
->host
)->extent_tree
;
588 ret
= try_release_extent_state(map
, tree
, page
, gfp_flags
);
593 ret
= try_release_extent_buffer(tree
, page
);
595 ClearPagePrivate(page
);
596 set_page_private(page
, 0);
597 page_cache_release(page
);
603 static void btree_invalidatepage(struct page
*page
, unsigned long offset
)
605 struct extent_io_tree
*tree
;
606 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
607 extent_invalidatepage(tree
, page
, offset
);
608 btree_releasepage(page
, GFP_NOFS
);
609 if (PagePrivate(page
)) {
610 printk("warning page private not zero on page %Lu\n",
612 ClearPagePrivate(page
);
613 set_page_private(page
, 0);
614 page_cache_release(page
);
619 static int btree_writepage(struct page
*page
, struct writeback_control
*wbc
)
621 struct buffer_head
*bh
;
622 struct btrfs_root
*root
= BTRFS_I(page
->mapping
->host
)->root
;
623 struct buffer_head
*head
;
624 if (!page_has_buffers(page
)) {
625 create_empty_buffers(page
, root
->fs_info
->sb
->s_blocksize
,
626 (1 << BH_Dirty
)|(1 << BH_Uptodate
));
628 head
= page_buffers(page
);
631 if (buffer_dirty(bh
))
632 csum_tree_block(root
, bh
, 0);
633 bh
= bh
->b_this_page
;
634 } while (bh
!= head
);
635 return block_write_full_page(page
, btree_get_block
, wbc
);
639 static struct address_space_operations btree_aops
= {
640 .readpage
= btree_readpage
,
641 .writepage
= btree_writepage
,
642 .writepages
= btree_writepages
,
643 .releasepage
= btree_releasepage
,
644 .invalidatepage
= btree_invalidatepage
,
645 .sync_page
= block_sync_page
,
648 int readahead_tree_block(struct btrfs_root
*root
, u64 bytenr
, u32 blocksize
,
651 struct extent_buffer
*buf
= NULL
;
652 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
655 buf
= btrfs_find_create_tree_block(root
, bytenr
, blocksize
);
658 read_extent_buffer_pages(&BTRFS_I(btree_inode
)->io_tree
,
659 buf
, 0, 0, btree_get_extent
, 0);
660 free_extent_buffer(buf
);
664 struct extent_buffer
*btrfs_find_tree_block(struct btrfs_root
*root
,
665 u64 bytenr
, u32 blocksize
)
667 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
668 struct extent_buffer
*eb
;
669 eb
= find_extent_buffer(&BTRFS_I(btree_inode
)->io_tree
,
670 bytenr
, blocksize
, GFP_NOFS
);
674 struct extent_buffer
*btrfs_find_create_tree_block(struct btrfs_root
*root
,
675 u64 bytenr
, u32 blocksize
)
677 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
678 struct extent_buffer
*eb
;
680 eb
= alloc_extent_buffer(&BTRFS_I(btree_inode
)->io_tree
,
681 bytenr
, blocksize
, NULL
, GFP_NOFS
);
686 struct extent_buffer
*read_tree_block(struct btrfs_root
*root
, u64 bytenr
,
687 u32 blocksize
, u64 parent_transid
)
689 struct extent_buffer
*buf
= NULL
;
690 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
691 struct extent_io_tree
*io_tree
;
694 io_tree
= &BTRFS_I(btree_inode
)->io_tree
;
696 buf
= btrfs_find_create_tree_block(root
, bytenr
, blocksize
);
700 ret
= btree_read_extent_buffer_pages(root
, buf
, 0, parent_transid
);
703 buf
->flags
|= EXTENT_UPTODATE
;
709 int clean_tree_block(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
710 struct extent_buffer
*buf
)
712 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
713 if (btrfs_header_generation(buf
) ==
714 root
->fs_info
->running_transaction
->transid
) {
715 WARN_ON(!btrfs_tree_locked(buf
));
716 clear_extent_buffer_dirty(&BTRFS_I(btree_inode
)->io_tree
,
722 int wait_on_tree_block_writeback(struct btrfs_root
*root
,
723 struct extent_buffer
*buf
)
725 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
726 wait_on_extent_buffer_writeback(&BTRFS_I(btree_inode
)->io_tree
,
731 static int __setup_root(u32 nodesize
, u32 leafsize
, u32 sectorsize
,
732 u32 stripesize
, struct btrfs_root
*root
,
733 struct btrfs_fs_info
*fs_info
,
738 root
->commit_root
= NULL
;
739 root
->ref_tree
= NULL
;
740 root
->sectorsize
= sectorsize
;
741 root
->nodesize
= nodesize
;
742 root
->leafsize
= leafsize
;
743 root
->stripesize
= stripesize
;
745 root
->track_dirty
= 0;
747 root
->fs_info
= fs_info
;
748 root
->objectid
= objectid
;
749 root
->last_trans
= 0;
750 root
->highest_inode
= 0;
751 root
->last_inode_alloc
= 0;
755 INIT_LIST_HEAD(&root
->dirty_list
);
756 INIT_LIST_HEAD(&root
->orphan_list
);
757 INIT_LIST_HEAD(&root
->dead_list
);
758 spin_lock_init(&root
->node_lock
);
759 spin_lock_init(&root
->list_lock
);
760 mutex_init(&root
->objectid_mutex
);
762 btrfs_leaf_ref_tree_init(&root
->ref_tree_struct
);
763 root
->ref_tree
= &root
->ref_tree_struct
;
765 memset(&root
->root_key
, 0, sizeof(root
->root_key
));
766 memset(&root
->root_item
, 0, sizeof(root
->root_item
));
767 memset(&root
->defrag_progress
, 0, sizeof(root
->defrag_progress
));
768 memset(&root
->root_kobj
, 0, sizeof(root
->root_kobj
));
769 root
->defrag_trans_start
= fs_info
->generation
;
770 init_completion(&root
->kobj_unregister
);
771 root
->defrag_running
= 0;
772 root
->defrag_level
= 0;
773 root
->root_key
.objectid
= objectid
;
777 static int find_and_setup_root(struct btrfs_root
*tree_root
,
778 struct btrfs_fs_info
*fs_info
,
780 struct btrfs_root
*root
)
785 __setup_root(tree_root
->nodesize
, tree_root
->leafsize
,
786 tree_root
->sectorsize
, tree_root
->stripesize
,
787 root
, fs_info
, objectid
);
788 ret
= btrfs_find_last_root(tree_root
, objectid
,
789 &root
->root_item
, &root
->root_key
);
792 blocksize
= btrfs_level_size(root
, btrfs_root_level(&root
->root_item
));
793 root
->node
= read_tree_block(root
, btrfs_root_bytenr(&root
->root_item
),
799 struct btrfs_root
*btrfs_read_fs_root_no_radix(struct btrfs_fs_info
*fs_info
,
800 struct btrfs_key
*location
)
802 struct btrfs_root
*root
;
803 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
804 struct btrfs_path
*path
;
805 struct extent_buffer
*l
;
810 root
= kzalloc(sizeof(*root
), GFP_NOFS
);
812 return ERR_PTR(-ENOMEM
);
813 if (location
->offset
== (u64
)-1) {
814 ret
= find_and_setup_root(tree_root
, fs_info
,
815 location
->objectid
, root
);
823 __setup_root(tree_root
->nodesize
, tree_root
->leafsize
,
824 tree_root
->sectorsize
, tree_root
->stripesize
,
825 root
, fs_info
, location
->objectid
);
827 path
= btrfs_alloc_path();
829 ret
= btrfs_search_slot(NULL
, tree_root
, location
, path
, 0, 0);
836 read_extent_buffer(l
, &root
->root_item
,
837 btrfs_item_ptr_offset(l
, path
->slots
[0]),
838 sizeof(root
->root_item
));
839 memcpy(&root
->root_key
, location
, sizeof(*location
));
842 btrfs_release_path(root
, path
);
843 btrfs_free_path(path
);
848 blocksize
= btrfs_level_size(root
, btrfs_root_level(&root
->root_item
));
849 root
->node
= read_tree_block(root
, btrfs_root_bytenr(&root
->root_item
),
854 ret
= btrfs_find_highest_inode(root
, &highest_inode
);
856 root
->highest_inode
= highest_inode
;
857 root
->last_inode_alloc
= highest_inode
;
862 struct btrfs_root
*btrfs_lookup_fs_root(struct btrfs_fs_info
*fs_info
,
865 struct btrfs_root
*root
;
867 if (root_objectid
== BTRFS_ROOT_TREE_OBJECTID
)
868 return fs_info
->tree_root
;
869 if (root_objectid
== BTRFS_EXTENT_TREE_OBJECTID
)
870 return fs_info
->extent_root
;
872 root
= radix_tree_lookup(&fs_info
->fs_roots_radix
,
873 (unsigned long)root_objectid
);
877 struct btrfs_root
*btrfs_read_fs_root_no_name(struct btrfs_fs_info
*fs_info
,
878 struct btrfs_key
*location
)
880 struct btrfs_root
*root
;
883 if (location
->objectid
== BTRFS_ROOT_TREE_OBJECTID
)
884 return fs_info
->tree_root
;
885 if (location
->objectid
== BTRFS_EXTENT_TREE_OBJECTID
)
886 return fs_info
->extent_root
;
887 if (location
->objectid
== BTRFS_CHUNK_TREE_OBJECTID
)
888 return fs_info
->chunk_root
;
889 if (location
->objectid
== BTRFS_DEV_TREE_OBJECTID
)
890 return fs_info
->dev_root
;
892 root
= radix_tree_lookup(&fs_info
->fs_roots_radix
,
893 (unsigned long)location
->objectid
);
897 root
= btrfs_read_fs_root_no_radix(fs_info
, location
);
900 ret
= radix_tree_insert(&fs_info
->fs_roots_radix
,
901 (unsigned long)root
->root_key
.objectid
,
904 free_extent_buffer(root
->node
);
908 ret
= btrfs_find_dead_roots(fs_info
->tree_root
,
909 root
->root_key
.objectid
, root
);
915 struct btrfs_root
*btrfs_read_fs_root(struct btrfs_fs_info
*fs_info
,
916 struct btrfs_key
*location
,
917 const char *name
, int namelen
)
919 struct btrfs_root
*root
;
922 root
= btrfs_read_fs_root_no_name(fs_info
, location
);
929 ret
= btrfs_set_root_name(root
, name
, namelen
);
931 free_extent_buffer(root
->node
);
936 ret
= btrfs_sysfs_add_root(root
);
938 free_extent_buffer(root
->node
);
947 static int add_hasher(struct btrfs_fs_info
*info
, char *type
) {
948 struct btrfs_hasher
*hasher
;
950 hasher
= kmalloc(sizeof(*hasher
), GFP_NOFS
);
953 hasher
->hash_tfm
= crypto_alloc_hash(type
, 0, CRYPTO_ALG_ASYNC
);
954 if (!hasher
->hash_tfm
) {
958 spin_lock(&info
->hash_lock
);
959 list_add(&hasher
->list
, &info
->hashers
);
960 spin_unlock(&info
->hash_lock
);
965 static int btrfs_congested_fn(void *congested_data
, int bdi_bits
)
967 struct btrfs_fs_info
*info
= (struct btrfs_fs_info
*)congested_data
;
969 struct list_head
*cur
;
970 struct btrfs_device
*device
;
971 struct backing_dev_info
*bdi
;
973 if ((bdi_bits
& (1 << BDI_write_congested
)) &&
974 btrfs_congested_async(info
, 0))
977 list_for_each(cur
, &info
->fs_devices
->devices
) {
978 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
981 bdi
= blk_get_backing_dev_info(device
->bdev
);
982 if (bdi
&& bdi_congested(bdi
, bdi_bits
)) {
991 * this unplugs every device on the box, and it is only used when page
994 static void __unplug_io_fn(struct backing_dev_info
*bdi
, struct page
*page
)
996 struct list_head
*cur
;
997 struct btrfs_device
*device
;
998 struct btrfs_fs_info
*info
;
1000 info
= (struct btrfs_fs_info
*)bdi
->unplug_io_data
;
1001 list_for_each(cur
, &info
->fs_devices
->devices
) {
1002 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
1003 bdi
= blk_get_backing_dev_info(device
->bdev
);
1004 if (bdi
->unplug_io_fn
) {
1005 bdi
->unplug_io_fn(bdi
, page
);
1010 void btrfs_unplug_io_fn(struct backing_dev_info
*bdi
, struct page
*page
)
1012 struct inode
*inode
;
1013 struct extent_map_tree
*em_tree
;
1014 struct extent_map
*em
;
1015 struct address_space
*mapping
;
1018 /* the generic O_DIRECT read code does this */
1020 __unplug_io_fn(bdi
, page
);
1025 * page->mapping may change at any time. Get a consistent copy
1026 * and use that for everything below
1029 mapping
= page
->mapping
;
1033 inode
= mapping
->host
;
1034 offset
= page_offset(page
);
1036 em_tree
= &BTRFS_I(inode
)->extent_tree
;
1037 spin_lock(&em_tree
->lock
);
1038 em
= lookup_extent_mapping(em_tree
, offset
, PAGE_CACHE_SIZE
);
1039 spin_unlock(&em_tree
->lock
);
1041 __unplug_io_fn(bdi
, page
);
1045 if (em
->block_start
>= EXTENT_MAP_LAST_BYTE
) {
1046 free_extent_map(em
);
1047 __unplug_io_fn(bdi
, page
);
1050 offset
= offset
- em
->start
;
1051 btrfs_unplug_page(&BTRFS_I(inode
)->root
->fs_info
->mapping_tree
,
1052 em
->block_start
+ offset
, page
);
1053 free_extent_map(em
);
1056 static int setup_bdi(struct btrfs_fs_info
*info
, struct backing_dev_info
*bdi
)
1058 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1061 bdi
->ra_pages
= default_backing_dev_info
.ra_pages
;
1063 bdi
->capabilities
= default_backing_dev_info
.capabilities
;
1064 bdi
->unplug_io_fn
= btrfs_unplug_io_fn
;
1065 bdi
->unplug_io_data
= info
;
1066 bdi
->congested_fn
= btrfs_congested_fn
;
1067 bdi
->congested_data
= info
;
1071 static int bio_ready_for_csum(struct bio
*bio
)
1077 struct extent_io_tree
*io_tree
= NULL
;
1078 struct btrfs_fs_info
*info
= NULL
;
1079 struct bio_vec
*bvec
;
1083 bio_for_each_segment(bvec
, bio
, i
) {
1084 page
= bvec
->bv_page
;
1085 if (page
->private == EXTENT_PAGE_PRIVATE
) {
1086 length
+= bvec
->bv_len
;
1089 if (!page
->private) {
1090 length
+= bvec
->bv_len
;
1093 length
= bvec
->bv_len
;
1094 buf_len
= page
->private >> 2;
1095 start
= page_offset(page
) + bvec
->bv_offset
;
1096 io_tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
1097 info
= BTRFS_I(page
->mapping
->host
)->root
->fs_info
;
1099 /* are we fully contained in this bio? */
1100 if (buf_len
<= length
)
1103 ret
= extent_range_uptodate(io_tree
, start
+ length
,
1104 start
+ buf_len
- 1);
1111 * called by the kthread helper functions to finally call the bio end_io
1112 * functions. This is where read checksum verification actually happens
1114 static void end_workqueue_fn(struct btrfs_work
*work
)
1117 struct end_io_wq
*end_io_wq
;
1118 struct btrfs_fs_info
*fs_info
;
1121 end_io_wq
= container_of(work
, struct end_io_wq
, work
);
1122 bio
= end_io_wq
->bio
;
1123 fs_info
= end_io_wq
->info
;
1125 /* metadata bios are special because the whole tree block must
1126 * be checksummed at once. This makes sure the entire block is in
1127 * ram and up to date before trying to verify things. For
1128 * blocksize <= pagesize, it is basically a noop
1130 if (end_io_wq
->metadata
&& !bio_ready_for_csum(bio
)) {
1131 btrfs_queue_worker(&fs_info
->endio_workers
,
1135 error
= end_io_wq
->error
;
1136 bio
->bi_private
= end_io_wq
->private;
1137 bio
->bi_end_io
= end_io_wq
->end_io
;
1139 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1140 bio_endio(bio
, bio
->bi_size
, error
);
1142 bio_endio(bio
, error
);
1146 static int cleaner_kthread(void *arg
)
1148 struct btrfs_root
*root
= arg
;
1152 if (root
->fs_info
->closing
)
1155 vfs_check_frozen(root
->fs_info
->sb
, SB_FREEZE_WRITE
);
1156 mutex_lock(&root
->fs_info
->cleaner_mutex
);
1157 btrfs_clean_old_snapshots(root
);
1158 mutex_unlock(&root
->fs_info
->cleaner_mutex
);
1160 if (freezing(current
)) {
1164 if (root
->fs_info
->closing
)
1166 set_current_state(TASK_INTERRUPTIBLE
);
1168 __set_current_state(TASK_RUNNING
);
1170 } while (!kthread_should_stop());
1174 static int transaction_kthread(void *arg
)
1176 struct btrfs_root
*root
= arg
;
1177 struct btrfs_trans_handle
*trans
;
1178 struct btrfs_transaction
*cur
;
1180 unsigned long delay
;
1185 if (root
->fs_info
->closing
)
1189 vfs_check_frozen(root
->fs_info
->sb
, SB_FREEZE_WRITE
);
1190 mutex_lock(&root
->fs_info
->transaction_kthread_mutex
);
1192 if (root
->fs_info
->total_ref_cache_size
> 20 * 1024 * 1024) {
1193 printk("btrfs: total reference cache size %Lu\n",
1194 root
->fs_info
->total_ref_cache_size
);
1197 mutex_lock(&root
->fs_info
->trans_mutex
);
1198 cur
= root
->fs_info
->running_transaction
;
1200 mutex_unlock(&root
->fs_info
->trans_mutex
);
1204 now
= get_seconds();
1205 if (now
< cur
->start_time
|| now
- cur
->start_time
< 30) {
1206 mutex_unlock(&root
->fs_info
->trans_mutex
);
1210 mutex_unlock(&root
->fs_info
->trans_mutex
);
1211 trans
= btrfs_start_transaction(root
, 1);
1212 ret
= btrfs_commit_transaction(trans
, root
);
1214 wake_up_process(root
->fs_info
->cleaner_kthread
);
1215 mutex_unlock(&root
->fs_info
->transaction_kthread_mutex
);
1217 if (freezing(current
)) {
1220 if (root
->fs_info
->closing
)
1222 set_current_state(TASK_INTERRUPTIBLE
);
1223 schedule_timeout(delay
);
1224 __set_current_state(TASK_RUNNING
);
1226 } while (!kthread_should_stop());
1230 struct btrfs_root
*open_ctree(struct super_block
*sb
,
1231 struct btrfs_fs_devices
*fs_devices
,
1239 struct buffer_head
*bh
;
1240 struct btrfs_root
*extent_root
= kmalloc(sizeof(struct btrfs_root
),
1242 struct btrfs_root
*tree_root
= kmalloc(sizeof(struct btrfs_root
),
1244 struct btrfs_fs_info
*fs_info
= kzalloc(sizeof(*fs_info
),
1246 struct btrfs_root
*chunk_root
= kmalloc(sizeof(struct btrfs_root
),
1248 struct btrfs_root
*dev_root
= kmalloc(sizeof(struct btrfs_root
),
1253 struct btrfs_super_block
*disk_super
;
1255 if (!extent_root
|| !tree_root
|| !fs_info
) {
1259 INIT_RADIX_TREE(&fs_info
->fs_roots_radix
, GFP_NOFS
);
1260 INIT_LIST_HEAD(&fs_info
->trans_list
);
1261 INIT_LIST_HEAD(&fs_info
->dead_roots
);
1262 INIT_LIST_HEAD(&fs_info
->hashers
);
1263 INIT_LIST_HEAD(&fs_info
->delalloc_inodes
);
1264 spin_lock_init(&fs_info
->hash_lock
);
1265 spin_lock_init(&fs_info
->delalloc_lock
);
1266 spin_lock_init(&fs_info
->new_trans_lock
);
1267 spin_lock_init(&fs_info
->ref_cache_lock
);
1269 init_completion(&fs_info
->kobj_unregister
);
1270 fs_info
->tree_root
= tree_root
;
1271 fs_info
->extent_root
= extent_root
;
1272 fs_info
->chunk_root
= chunk_root
;
1273 fs_info
->dev_root
= dev_root
;
1274 fs_info
->fs_devices
= fs_devices
;
1275 INIT_LIST_HEAD(&fs_info
->dirty_cowonly_roots
);
1276 INIT_LIST_HEAD(&fs_info
->space_info
);
1277 btrfs_mapping_init(&fs_info
->mapping_tree
);
1278 atomic_set(&fs_info
->nr_async_submits
, 0);
1279 atomic_set(&fs_info
->nr_async_bios
, 0);
1280 atomic_set(&fs_info
->throttles
, 0);
1281 atomic_set(&fs_info
->throttle_gen
, 0);
1283 fs_info
->max_extent
= (u64
)-1;
1284 fs_info
->max_inline
= 8192 * 1024;
1285 setup_bdi(fs_info
, &fs_info
->bdi
);
1286 fs_info
->btree_inode
= new_inode(sb
);
1287 fs_info
->btree_inode
->i_ino
= 1;
1288 fs_info
->btree_inode
->i_nlink
= 1;
1289 fs_info
->thread_pool_size
= min(num_online_cpus() + 2, 8);
1291 INIT_LIST_HEAD(&fs_info
->ordered_extents
);
1292 spin_lock_init(&fs_info
->ordered_extent_lock
);
1294 sb
->s_blocksize
= 4096;
1295 sb
->s_blocksize_bits
= blksize_bits(4096);
1298 * we set the i_size on the btree inode to the max possible int.
1299 * the real end of the address space is determined by all of
1300 * the devices in the system
1302 fs_info
->btree_inode
->i_size
= OFFSET_MAX
;
1303 fs_info
->btree_inode
->i_mapping
->a_ops
= &btree_aops
;
1304 fs_info
->btree_inode
->i_mapping
->backing_dev_info
= &fs_info
->bdi
;
1306 extent_io_tree_init(&BTRFS_I(fs_info
->btree_inode
)->io_tree
,
1307 fs_info
->btree_inode
->i_mapping
,
1309 extent_map_tree_init(&BTRFS_I(fs_info
->btree_inode
)->extent_tree
,
1312 BTRFS_I(fs_info
->btree_inode
)->io_tree
.ops
= &btree_extent_io_ops
;
1314 extent_io_tree_init(&fs_info
->free_space_cache
,
1315 fs_info
->btree_inode
->i_mapping
, GFP_NOFS
);
1316 extent_io_tree_init(&fs_info
->block_group_cache
,
1317 fs_info
->btree_inode
->i_mapping
, GFP_NOFS
);
1318 extent_io_tree_init(&fs_info
->pinned_extents
,
1319 fs_info
->btree_inode
->i_mapping
, GFP_NOFS
);
1320 extent_io_tree_init(&fs_info
->pending_del
,
1321 fs_info
->btree_inode
->i_mapping
, GFP_NOFS
);
1322 extent_io_tree_init(&fs_info
->extent_ins
,
1323 fs_info
->btree_inode
->i_mapping
, GFP_NOFS
);
1324 fs_info
->do_barriers
= 1;
1326 BTRFS_I(fs_info
->btree_inode
)->root
= tree_root
;
1327 memset(&BTRFS_I(fs_info
->btree_inode
)->location
, 0,
1328 sizeof(struct btrfs_key
));
1329 insert_inode_hash(fs_info
->btree_inode
);
1330 mapping_set_gfp_mask(fs_info
->btree_inode
->i_mapping
, GFP_NOFS
);
1332 mutex_init(&fs_info
->trans_mutex
);
1333 mutex_init(&fs_info
->drop_mutex
);
1334 mutex_init(&fs_info
->alloc_mutex
);
1335 mutex_init(&fs_info
->chunk_mutex
);
1336 mutex_init(&fs_info
->transaction_kthread_mutex
);
1337 mutex_init(&fs_info
->cleaner_mutex
);
1338 mutex_init(&fs_info
->volume_mutex
);
1339 init_waitqueue_head(&fs_info
->transaction_throttle
);
1340 init_waitqueue_head(&fs_info
->transaction_wait
);
1341 init_waitqueue_head(&fs_info
->async_submit_wait
);
1344 ret
= add_hasher(fs_info
, "crc32c");
1346 printk("btrfs: failed hash setup, modprobe cryptomgr?\n");
1351 __setup_root(4096, 4096, 4096, 4096, tree_root
,
1352 fs_info
, BTRFS_ROOT_TREE_OBJECTID
);
1355 bh
= __bread(fs_devices
->latest_bdev
,
1356 BTRFS_SUPER_INFO_OFFSET
/ 4096, 4096);
1360 memcpy(&fs_info
->super_copy
, bh
->b_data
, sizeof(fs_info
->super_copy
));
1363 memcpy(fs_info
->fsid
, fs_info
->super_copy
.fsid
, BTRFS_FSID_SIZE
);
1365 disk_super
= &fs_info
->super_copy
;
1366 if (!btrfs_super_root(disk_super
))
1367 goto fail_sb_buffer
;
1369 err
= btrfs_parse_options(tree_root
, options
);
1371 goto fail_sb_buffer
;
1374 * we need to start all the end_io workers up front because the
1375 * queue work function gets called at interrupt time, and so it
1376 * cannot dynamically grow.
1378 btrfs_init_workers(&fs_info
->workers
, "worker",
1379 fs_info
->thread_pool_size
);
1380 btrfs_init_workers(&fs_info
->submit_workers
, "submit",
1381 min_t(u64
, fs_devices
->num_devices
,
1382 fs_info
->thread_pool_size
));
1384 /* a higher idle thresh on the submit workers makes it much more
1385 * likely that bios will be send down in a sane order to the
1388 fs_info
->submit_workers
.idle_thresh
= 64;
1389 fs_info
->workers
.idle_thresh
= 32;
1391 btrfs_init_workers(&fs_info
->fixup_workers
, "fixup", 1);
1392 btrfs_init_workers(&fs_info
->endio_workers
, "endio",
1393 fs_info
->thread_pool_size
);
1394 btrfs_init_workers(&fs_info
->endio_write_workers
, "endio-write",
1395 fs_info
->thread_pool_size
);
1398 * endios are largely parallel and should have a very
1401 fs_info
->endio_workers
.idle_thresh
= 4;
1402 fs_info
->endio_write_workers
.idle_thresh
= 4;
1404 btrfs_start_workers(&fs_info
->workers
, 1);
1405 btrfs_start_workers(&fs_info
->submit_workers
, 1);
1406 btrfs_start_workers(&fs_info
->fixup_workers
, 1);
1407 btrfs_start_workers(&fs_info
->endio_workers
, fs_info
->thread_pool_size
);
1408 btrfs_start_workers(&fs_info
->endio_write_workers
,
1409 fs_info
->thread_pool_size
);
1412 if (btrfs_super_num_devices(disk_super
) > fs_devices
->open_devices
) {
1413 printk("Btrfs: wanted %llu devices, but found %llu\n",
1414 (unsigned long long)btrfs_super_num_devices(disk_super
),
1415 (unsigned long long)fs_devices
->open_devices
);
1416 if (btrfs_test_opt(tree_root
, DEGRADED
))
1417 printk("continuing in degraded mode\n");
1419 goto fail_sb_buffer
;
1423 fs_info
->bdi
.ra_pages
*= btrfs_super_num_devices(disk_super
);
1425 nodesize
= btrfs_super_nodesize(disk_super
);
1426 leafsize
= btrfs_super_leafsize(disk_super
);
1427 sectorsize
= btrfs_super_sectorsize(disk_super
);
1428 stripesize
= btrfs_super_stripesize(disk_super
);
1429 tree_root
->nodesize
= nodesize
;
1430 tree_root
->leafsize
= leafsize
;
1431 tree_root
->sectorsize
= sectorsize
;
1432 tree_root
->stripesize
= stripesize
;
1434 sb
->s_blocksize
= sectorsize
;
1435 sb
->s_blocksize_bits
= blksize_bits(sectorsize
);
1437 if (strncmp((char *)(&disk_super
->magic
), BTRFS_MAGIC
,
1438 sizeof(disk_super
->magic
))) {
1439 printk("btrfs: valid FS not found on %s\n", sb
->s_id
);
1440 goto fail_sb_buffer
;
1443 mutex_lock(&fs_info
->chunk_mutex
);
1444 ret
= btrfs_read_sys_array(tree_root
);
1445 mutex_unlock(&fs_info
->chunk_mutex
);
1447 printk("btrfs: failed to read the system array on %s\n",
1449 goto fail_sys_array
;
1452 blocksize
= btrfs_level_size(tree_root
,
1453 btrfs_super_chunk_root_level(disk_super
));
1455 __setup_root(nodesize
, leafsize
, sectorsize
, stripesize
,
1456 chunk_root
, fs_info
, BTRFS_CHUNK_TREE_OBJECTID
);
1458 chunk_root
->node
= read_tree_block(chunk_root
,
1459 btrfs_super_chunk_root(disk_super
),
1461 BUG_ON(!chunk_root
->node
);
1463 read_extent_buffer(chunk_root
->node
, fs_info
->chunk_tree_uuid
,
1464 (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root
->node
),
1467 mutex_lock(&fs_info
->chunk_mutex
);
1468 ret
= btrfs_read_chunk_tree(chunk_root
);
1469 mutex_unlock(&fs_info
->chunk_mutex
);
1472 btrfs_close_extra_devices(fs_devices
);
1474 blocksize
= btrfs_level_size(tree_root
,
1475 btrfs_super_root_level(disk_super
));
1478 tree_root
->node
= read_tree_block(tree_root
,
1479 btrfs_super_root(disk_super
),
1481 if (!tree_root
->node
)
1482 goto fail_sb_buffer
;
1485 ret
= find_and_setup_root(tree_root
, fs_info
,
1486 BTRFS_EXTENT_TREE_OBJECTID
, extent_root
);
1488 goto fail_tree_root
;
1489 extent_root
->track_dirty
= 1;
1491 ret
= find_and_setup_root(tree_root
, fs_info
,
1492 BTRFS_DEV_TREE_OBJECTID
, dev_root
);
1493 dev_root
->track_dirty
= 1;
1496 goto fail_extent_root
;
1498 btrfs_read_block_groups(extent_root
);
1500 fs_info
->generation
= btrfs_super_generation(disk_super
) + 1;
1501 fs_info
->data_alloc_profile
= (u64
)-1;
1502 fs_info
->metadata_alloc_profile
= (u64
)-1;
1503 fs_info
->system_alloc_profile
= fs_info
->metadata_alloc_profile
;
1504 fs_info
->cleaner_kthread
= kthread_run(cleaner_kthread
, tree_root
,
1506 if (!fs_info
->cleaner_kthread
)
1507 goto fail_extent_root
;
1509 fs_info
->transaction_kthread
= kthread_run(transaction_kthread
,
1511 "btrfs-transaction");
1512 if (!fs_info
->transaction_kthread
)
1519 kthread_stop(fs_info
->cleaner_kthread
);
1521 free_extent_buffer(extent_root
->node
);
1523 free_extent_buffer(tree_root
->node
);
1526 btrfs_stop_workers(&fs_info
->fixup_workers
);
1527 btrfs_stop_workers(&fs_info
->workers
);
1528 btrfs_stop_workers(&fs_info
->endio_workers
);
1529 btrfs_stop_workers(&fs_info
->endio_write_workers
);
1530 btrfs_stop_workers(&fs_info
->submit_workers
);
1532 iput(fs_info
->btree_inode
);
1534 btrfs_close_devices(fs_info
->fs_devices
);
1535 btrfs_mapping_tree_free(&fs_info
->mapping_tree
);
1539 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1540 bdi_destroy(&fs_info
->bdi
);
1543 return ERR_PTR(err
);
1546 static void btrfs_end_buffer_write_sync(struct buffer_head
*bh
, int uptodate
)
1548 char b
[BDEVNAME_SIZE
];
1551 set_buffer_uptodate(bh
);
1553 if (!buffer_eopnotsupp(bh
) && printk_ratelimit()) {
1554 printk(KERN_WARNING
"lost page write due to "
1555 "I/O error on %s\n",
1556 bdevname(bh
->b_bdev
, b
));
1558 /* note, we dont' set_buffer_write_io_error because we have
1559 * our own ways of dealing with the IO errors
1561 clear_buffer_uptodate(bh
);
1567 int write_all_supers(struct btrfs_root
*root
)
1569 struct list_head
*cur
;
1570 struct list_head
*head
= &root
->fs_info
->fs_devices
->devices
;
1571 struct btrfs_device
*dev
;
1572 struct btrfs_super_block
*sb
;
1573 struct btrfs_dev_item
*dev_item
;
1574 struct buffer_head
*bh
;
1578 int total_errors
= 0;
1582 max_errors
= btrfs_super_num_devices(&root
->fs_info
->super_copy
) - 1;
1583 do_barriers
= !btrfs_test_opt(root
, NOBARRIER
);
1585 sb
= &root
->fs_info
->super_for_commit
;
1586 dev_item
= &sb
->dev_item
;
1587 list_for_each(cur
, head
) {
1588 dev
= list_entry(cur
, struct btrfs_device
, dev_list
);
1593 if (!dev
->in_fs_metadata
)
1596 btrfs_set_stack_device_type(dev_item
, dev
->type
);
1597 btrfs_set_stack_device_id(dev_item
, dev
->devid
);
1598 btrfs_set_stack_device_total_bytes(dev_item
, dev
->total_bytes
);
1599 btrfs_set_stack_device_bytes_used(dev_item
, dev
->bytes_used
);
1600 btrfs_set_stack_device_io_align(dev_item
, dev
->io_align
);
1601 btrfs_set_stack_device_io_width(dev_item
, dev
->io_width
);
1602 btrfs_set_stack_device_sector_size(dev_item
, dev
->sector_size
);
1603 memcpy(dev_item
->uuid
, dev
->uuid
, BTRFS_UUID_SIZE
);
1604 flags
= btrfs_super_flags(sb
);
1605 btrfs_set_super_flags(sb
, flags
| BTRFS_HEADER_FLAG_WRITTEN
);
1609 crc
= btrfs_csum_data(root
, (char *)sb
+ BTRFS_CSUM_SIZE
, crc
,
1610 BTRFS_SUPER_INFO_SIZE
- BTRFS_CSUM_SIZE
);
1611 btrfs_csum_final(crc
, sb
->csum
);
1613 bh
= __getblk(dev
->bdev
, BTRFS_SUPER_INFO_OFFSET
/ 4096,
1614 BTRFS_SUPER_INFO_SIZE
);
1616 memcpy(bh
->b_data
, sb
, BTRFS_SUPER_INFO_SIZE
);
1617 dev
->pending_io
= bh
;
1620 set_buffer_uptodate(bh
);
1622 bh
->b_end_io
= btrfs_end_buffer_write_sync
;
1624 if (do_barriers
&& dev
->barriers
) {
1625 ret
= submit_bh(WRITE_BARRIER
, bh
);
1626 if (ret
== -EOPNOTSUPP
) {
1627 printk("btrfs: disabling barriers on dev %s\n",
1629 set_buffer_uptodate(bh
);
1633 ret
= submit_bh(WRITE
, bh
);
1636 ret
= submit_bh(WRITE
, bh
);
1641 if (total_errors
> max_errors
) {
1642 printk("btrfs: %d errors while writing supers\n", total_errors
);
1647 list_for_each(cur
, head
) {
1648 dev
= list_entry(cur
, struct btrfs_device
, dev_list
);
1651 if (!dev
->in_fs_metadata
)
1654 BUG_ON(!dev
->pending_io
);
1655 bh
= dev
->pending_io
;
1657 if (!buffer_uptodate(dev
->pending_io
)) {
1658 if (do_barriers
&& dev
->barriers
) {
1659 printk("btrfs: disabling barriers on dev %s\n",
1661 set_buffer_uptodate(bh
);
1665 ret
= submit_bh(WRITE
, bh
);
1668 if (!buffer_uptodate(bh
))
1675 dev
->pending_io
= NULL
;
1678 if (total_errors
> max_errors
) {
1679 printk("btrfs: %d errors while writing supers\n", total_errors
);
1685 int write_ctree_super(struct btrfs_trans_handle
*trans
, struct btrfs_root
1690 ret
= write_all_supers(root
);
1694 int btrfs_free_fs_root(struct btrfs_fs_info
*fs_info
, struct btrfs_root
*root
)
1696 radix_tree_delete(&fs_info
->fs_roots_radix
,
1697 (unsigned long)root
->root_key
.objectid
);
1699 btrfs_sysfs_del_root(root
);
1703 free_extent_buffer(root
->node
);
1704 if (root
->commit_root
)
1705 free_extent_buffer(root
->commit_root
);
1712 static int del_fs_roots(struct btrfs_fs_info
*fs_info
)
1715 struct btrfs_root
*gang
[8];
1719 ret
= radix_tree_gang_lookup(&fs_info
->fs_roots_radix
,
1724 for (i
= 0; i
< ret
; i
++)
1725 btrfs_free_fs_root(fs_info
, gang
[i
]);
1730 int close_ctree(struct btrfs_root
*root
)
1733 struct btrfs_trans_handle
*trans
;
1734 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1736 fs_info
->closing
= 1;
1739 kthread_stop(root
->fs_info
->transaction_kthread
);
1740 kthread_stop(root
->fs_info
->cleaner_kthread
);
1742 btrfs_clean_old_snapshots(root
);
1743 trans
= btrfs_start_transaction(root
, 1);
1744 ret
= btrfs_commit_transaction(trans
, root
);
1745 /* run commit again to drop the original snapshot */
1746 trans
= btrfs_start_transaction(root
, 1);
1747 btrfs_commit_transaction(trans
, root
);
1748 ret
= btrfs_write_and_wait_transaction(NULL
, root
);
1751 write_ctree_super(NULL
, root
);
1753 if (fs_info
->delalloc_bytes
) {
1754 printk("btrfs: at unmount delalloc count %Lu\n",
1755 fs_info
->delalloc_bytes
);
1757 if (fs_info
->total_ref_cache_size
) {
1758 printk("btrfs: at umount reference cache size %Lu\n",
1759 fs_info
->total_ref_cache_size
);
1762 if (fs_info
->extent_root
->node
)
1763 free_extent_buffer(fs_info
->extent_root
->node
);
1765 if (fs_info
->tree_root
->node
)
1766 free_extent_buffer(fs_info
->tree_root
->node
);
1768 if (root
->fs_info
->chunk_root
->node
);
1769 free_extent_buffer(root
->fs_info
->chunk_root
->node
);
1771 if (root
->fs_info
->dev_root
->node
);
1772 free_extent_buffer(root
->fs_info
->dev_root
->node
);
1774 btrfs_free_block_groups(root
->fs_info
);
1775 fs_info
->closing
= 2;
1776 del_fs_roots(fs_info
);
1778 filemap_write_and_wait(fs_info
->btree_inode
->i_mapping
);
1780 truncate_inode_pages(fs_info
->btree_inode
->i_mapping
, 0);
1782 btrfs_stop_workers(&fs_info
->fixup_workers
);
1783 btrfs_stop_workers(&fs_info
->workers
);
1784 btrfs_stop_workers(&fs_info
->endio_workers
);
1785 btrfs_stop_workers(&fs_info
->endio_write_workers
);
1786 btrfs_stop_workers(&fs_info
->submit_workers
);
1788 iput(fs_info
->btree_inode
);
1790 while(!list_empty(&fs_info
->hashers
)) {
1791 struct btrfs_hasher
*hasher
;
1792 hasher
= list_entry(fs_info
->hashers
.next
, struct btrfs_hasher
,
1794 list_del(&hasher
->hashers
);
1795 crypto_free_hash(&fs_info
->hash_tfm
);
1799 btrfs_close_devices(fs_info
->fs_devices
);
1800 btrfs_mapping_tree_free(&fs_info
->mapping_tree
);
1802 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1803 bdi_destroy(&fs_info
->bdi
);
1806 kfree(fs_info
->extent_root
);
1807 kfree(fs_info
->tree_root
);
1808 kfree(fs_info
->chunk_root
);
1809 kfree(fs_info
->dev_root
);
1813 int btrfs_buffer_uptodate(struct extent_buffer
*buf
, u64 parent_transid
)
1816 struct inode
*btree_inode
= buf
->first_page
->mapping
->host
;
1818 ret
= extent_buffer_uptodate(&BTRFS_I(btree_inode
)->io_tree
, buf
);
1822 ret
= verify_parent_transid(&BTRFS_I(btree_inode
)->io_tree
, buf
,
1827 int btrfs_set_buffer_uptodate(struct extent_buffer
*buf
)
1829 struct inode
*btree_inode
= buf
->first_page
->mapping
->host
;
1830 return set_extent_buffer_uptodate(&BTRFS_I(btree_inode
)->io_tree
,
1834 void btrfs_mark_buffer_dirty(struct extent_buffer
*buf
)
1836 struct btrfs_root
*root
= BTRFS_I(buf
->first_page
->mapping
->host
)->root
;
1837 u64 transid
= btrfs_header_generation(buf
);
1838 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
1840 WARN_ON(!btrfs_tree_locked(buf
));
1841 if (transid
!= root
->fs_info
->generation
) {
1842 printk(KERN_CRIT
"transid mismatch buffer %llu, found %Lu running %Lu\n",
1843 (unsigned long long)buf
->start
,
1844 transid
, root
->fs_info
->generation
);
1847 set_extent_buffer_dirty(&BTRFS_I(btree_inode
)->io_tree
, buf
);
1850 void btrfs_btree_balance_dirty(struct btrfs_root
*root
, unsigned long nr
)
1853 * looks as though older kernels can get into trouble with
1854 * this code, they end up stuck in balance_dirty_pages forever
1856 struct extent_io_tree
*tree
;
1859 unsigned long thresh
= 12 * 1024 * 1024;
1860 tree
= &BTRFS_I(root
->fs_info
->btree_inode
)->io_tree
;
1862 if (current_is_pdflush())
1865 num_dirty
= count_range_bits(tree
, &start
, (u64
)-1,
1866 thresh
, EXTENT_DIRTY
);
1867 if (num_dirty
> thresh
) {
1868 balance_dirty_pages_ratelimited_nr(
1869 root
->fs_info
->btree_inode
->i_mapping
, 1);
1874 int btrfs_read_buffer(struct extent_buffer
*buf
, u64 parent_transid
)
1876 struct btrfs_root
*root
= BTRFS_I(buf
->first_page
->mapping
->host
)->root
;
1878 ret
= btree_read_extent_buffer_pages(root
, buf
, 0, parent_transid
);
1880 buf
->flags
|= EXTENT_UPTODATE
;
1885 static struct extent_io_ops btree_extent_io_ops
= {
1886 .writepage_io_hook
= btree_writepage_io_hook
,
1887 .readpage_end_io_hook
= btree_readpage_end_io_hook
,
1888 .submit_bio_hook
= btree_submit_bio_hook
,
1889 /* note we're sharing with inode.c for the merge bio hook */
1890 .merge_bio_hook
= btrfs_merge_bio_hook
,