2 * Copyright (C) 2008 Red Hat. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/math64.h>
23 #include <linux/ratelimit.h>
25 #include "free-space-cache.h"
26 #include "transaction.h"
28 #include "extent_io.h"
29 #include "inode-map.h"
31 #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
32 #define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
34 static int link_free_space(struct btrfs_free_space_ctl
*ctl
,
35 struct btrfs_free_space
*info
);
37 static struct inode
*__lookup_free_space_inode(struct btrfs_root
*root
,
38 struct btrfs_path
*path
,
42 struct btrfs_key location
;
43 struct btrfs_disk_key disk_key
;
44 struct btrfs_free_space_header
*header
;
45 struct extent_buffer
*leaf
;
46 struct inode
*inode
= NULL
;
49 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
53 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
57 btrfs_release_path(path
);
58 return ERR_PTR(-ENOENT
);
61 leaf
= path
->nodes
[0];
62 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
63 struct btrfs_free_space_header
);
64 btrfs_free_space_key(leaf
, header
, &disk_key
);
65 btrfs_disk_key_to_cpu(&location
, &disk_key
);
66 btrfs_release_path(path
);
68 inode
= btrfs_iget(root
->fs_info
->sb
, &location
, root
, NULL
);
70 return ERR_PTR(-ENOENT
);
73 if (is_bad_inode(inode
)) {
75 return ERR_PTR(-ENOENT
);
78 inode
->i_mapping
->flags
&= ~__GFP_FS
;
83 struct inode
*lookup_free_space_inode(struct btrfs_root
*root
,
84 struct btrfs_block_group_cache
85 *block_group
, struct btrfs_path
*path
)
87 struct inode
*inode
= NULL
;
89 spin_lock(&block_group
->lock
);
90 if (block_group
->inode
)
91 inode
= igrab(block_group
->inode
);
92 spin_unlock(&block_group
->lock
);
96 inode
= __lookup_free_space_inode(root
, path
,
97 block_group
->key
.objectid
);
101 spin_lock(&block_group
->lock
);
102 if (BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
) {
103 printk(KERN_INFO
"Old style space inode found, converting.\n");
104 BTRFS_I(inode
)->flags
&= ~BTRFS_INODE_NODATASUM
;
105 block_group
->disk_cache_state
= BTRFS_DC_CLEAR
;
108 if (!block_group
->iref
) {
109 block_group
->inode
= igrab(inode
);
110 block_group
->iref
= 1;
112 spin_unlock(&block_group
->lock
);
117 int __create_free_space_inode(struct btrfs_root
*root
,
118 struct btrfs_trans_handle
*trans
,
119 struct btrfs_path
*path
, u64 ino
, u64 offset
)
121 struct btrfs_key key
;
122 struct btrfs_disk_key disk_key
;
123 struct btrfs_free_space_header
*header
;
124 struct btrfs_inode_item
*inode_item
;
125 struct extent_buffer
*leaf
;
128 ret
= btrfs_insert_empty_inode(trans
, root
, path
, ino
);
132 leaf
= path
->nodes
[0];
133 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
134 struct btrfs_inode_item
);
135 btrfs_item_key(leaf
, &disk_key
, path
->slots
[0]);
136 memset_extent_buffer(leaf
, 0, (unsigned long)inode_item
,
137 sizeof(*inode_item
));
138 btrfs_set_inode_generation(leaf
, inode_item
, trans
->transid
);
139 btrfs_set_inode_size(leaf
, inode_item
, 0);
140 btrfs_set_inode_nbytes(leaf
, inode_item
, 0);
141 btrfs_set_inode_uid(leaf
, inode_item
, 0);
142 btrfs_set_inode_gid(leaf
, inode_item
, 0);
143 btrfs_set_inode_mode(leaf
, inode_item
, S_IFREG
| 0600);
144 btrfs_set_inode_flags(leaf
, inode_item
, BTRFS_INODE_NOCOMPRESS
|
145 BTRFS_INODE_PREALLOC
);
146 btrfs_set_inode_nlink(leaf
, inode_item
, 1);
147 btrfs_set_inode_transid(leaf
, inode_item
, trans
->transid
);
148 btrfs_set_inode_block_group(leaf
, inode_item
, offset
);
149 btrfs_mark_buffer_dirty(leaf
);
150 btrfs_release_path(path
);
152 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
156 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
157 sizeof(struct btrfs_free_space_header
));
159 btrfs_release_path(path
);
162 leaf
= path
->nodes
[0];
163 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
164 struct btrfs_free_space_header
);
165 memset_extent_buffer(leaf
, 0, (unsigned long)header
, sizeof(*header
));
166 btrfs_set_free_space_key(leaf
, header
, &disk_key
);
167 btrfs_mark_buffer_dirty(leaf
);
168 btrfs_release_path(path
);
173 int create_free_space_inode(struct btrfs_root
*root
,
174 struct btrfs_trans_handle
*trans
,
175 struct btrfs_block_group_cache
*block_group
,
176 struct btrfs_path
*path
)
181 ret
= btrfs_find_free_objectid(root
, &ino
);
185 return __create_free_space_inode(root
, trans
, path
, ino
,
186 block_group
->key
.objectid
);
189 int btrfs_truncate_free_space_cache(struct btrfs_root
*root
,
190 struct btrfs_trans_handle
*trans
,
191 struct btrfs_path
*path
,
194 struct btrfs_block_rsv
*rsv
;
198 rsv
= trans
->block_rsv
;
199 trans
->block_rsv
= root
->orphan_block_rsv
;
200 ret
= btrfs_block_rsv_check(root
, root
->orphan_block_rsv
, 0, 5, 0);
204 oldsize
= i_size_read(inode
);
205 btrfs_i_size_write(inode
, 0);
206 truncate_pagecache(inode
, oldsize
, 0);
209 * We don't need an orphan item because truncating the free space cache
210 * will never be split across transactions.
212 ret
= btrfs_truncate_inode_items(trans
, root
, inode
,
213 0, BTRFS_EXTENT_DATA_KEY
);
215 trans
->block_rsv
= rsv
;
221 ret
= btrfs_update_inode(trans
, root
, inode
);
225 static int readahead_cache(struct inode
*inode
)
227 struct file_ra_state
*ra
;
228 unsigned long last_index
;
230 ra
= kzalloc(sizeof(*ra
), GFP_NOFS
);
234 file_ra_state_init(ra
, inode
->i_mapping
);
235 last_index
= (i_size_read(inode
) - 1) >> PAGE_CACHE_SHIFT
;
237 page_cache_sync_readahead(inode
->i_mapping
, ra
, NULL
, 0, last_index
);
244 int __load_free_space_cache(struct btrfs_root
*root
, struct inode
*inode
,
245 struct btrfs_free_space_ctl
*ctl
,
246 struct btrfs_path
*path
, u64 offset
)
248 struct btrfs_free_space_header
*header
;
249 struct extent_buffer
*leaf
;
251 struct btrfs_key key
;
252 struct list_head bitmaps
;
257 gfp_t mask
= btrfs_alloc_write_mask(inode
->i_mapping
);
260 INIT_LIST_HEAD(&bitmaps
);
262 /* Nothing in the space cache, goodbye */
263 if (!i_size_read(inode
))
266 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
270 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
274 btrfs_release_path(path
);
281 leaf
= path
->nodes
[0];
282 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
283 struct btrfs_free_space_header
);
284 num_entries
= btrfs_free_space_entries(leaf
, header
);
285 num_bitmaps
= btrfs_free_space_bitmaps(leaf
, header
);
286 generation
= btrfs_free_space_generation(leaf
, header
);
287 btrfs_release_path(path
);
289 if (BTRFS_I(inode
)->generation
!= generation
) {
290 printk(KERN_ERR
"btrfs: free space inode generation (%llu) did"
291 " not match free space cache generation (%llu)\n",
292 (unsigned long long)BTRFS_I(inode
)->generation
,
293 (unsigned long long)generation
);
300 ret
= readahead_cache(inode
);
305 struct btrfs_free_space_entry
*entry
;
306 struct btrfs_free_space
*e
;
308 unsigned long offset
= 0;
311 if (!num_entries
&& !num_bitmaps
)
314 page
= find_or_create_page(inode
->i_mapping
, index
, mask
);
318 if (!PageUptodate(page
)) {
319 btrfs_readpage(NULL
, page
);
321 if (!PageUptodate(page
)) {
323 page_cache_release(page
);
324 printk(KERN_ERR
"btrfs: error reading free "
335 * We put a bogus crc in the front of the first page in
336 * case old kernels try to mount a fs with the new
337 * format to make sure they discard the cache.
340 offset
+= sizeof(u64
);
343 if (*gen
!= BTRFS_I(inode
)->generation
) {
344 printk_ratelimited(KERN_ERR
"btrfs: space cache"
345 " generation (%llu) does not match "
347 (unsigned long long)*gen
,
349 BTRFS_I(inode
)->generation
);
352 page_cache_release(page
);
356 offset
+= sizeof(u64
);
365 e
= kmem_cache_zalloc(btrfs_free_space_cachep
,
370 page_cache_release(page
);
374 e
->offset
= le64_to_cpu(entry
->offset
);
375 e
->bytes
= le64_to_cpu(entry
->bytes
);
378 kmem_cache_free(btrfs_free_space_cachep
, e
);
380 page_cache_release(page
);
384 if (entry
->type
== BTRFS_FREE_SPACE_EXTENT
) {
385 spin_lock(&ctl
->tree_lock
);
386 ret
= link_free_space(ctl
, e
);
387 spin_unlock(&ctl
->tree_lock
);
389 printk(KERN_ERR
"Duplicate entries in "
390 "free space cache, dumping\n");
393 page_cache_release(page
);
397 e
->bitmap
= kzalloc(PAGE_CACHE_SIZE
, GFP_NOFS
);
401 btrfs_free_space_cachep
, e
);
403 page_cache_release(page
);
406 spin_lock(&ctl
->tree_lock
);
407 ret
= link_free_space(ctl
, e
);
408 ctl
->total_bitmaps
++;
409 ctl
->op
->recalc_thresholds(ctl
);
410 spin_unlock(&ctl
->tree_lock
);
412 printk(KERN_ERR
"Duplicate entries in "
413 "free space cache, dumping\n");
416 page_cache_release(page
);
419 list_add_tail(&e
->list
, &bitmaps
);
423 offset
+= sizeof(struct btrfs_free_space_entry
);
424 if (offset
+ sizeof(struct btrfs_free_space_entry
) >=
431 * We read an entry out of this page, we need to move on to the
440 * We add the bitmaps at the end of the entries in order that
441 * the bitmap entries are added to the cache.
443 e
= list_entry(bitmaps
.next
, struct btrfs_free_space
, list
);
444 list_del_init(&e
->list
);
445 memcpy(e
->bitmap
, addr
, PAGE_CACHE_SIZE
);
450 page_cache_release(page
);
458 __btrfs_remove_free_space_cache(ctl
);
462 int load_free_space_cache(struct btrfs_fs_info
*fs_info
,
463 struct btrfs_block_group_cache
*block_group
)
465 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
466 struct btrfs_root
*root
= fs_info
->tree_root
;
468 struct btrfs_path
*path
;
471 u64 used
= btrfs_block_group_used(&block_group
->item
);
474 * If we're unmounting then just return, since this does a search on the
475 * normal root and not the commit root and we could deadlock.
477 if (btrfs_fs_closing(fs_info
))
481 * If this block group has been marked to be cleared for one reason or
482 * another then we can't trust the on disk cache, so just return.
484 spin_lock(&block_group
->lock
);
485 if (block_group
->disk_cache_state
!= BTRFS_DC_WRITTEN
) {
486 spin_unlock(&block_group
->lock
);
489 spin_unlock(&block_group
->lock
);
491 path
= btrfs_alloc_path();
495 inode
= lookup_free_space_inode(root
, block_group
, path
);
497 btrfs_free_path(path
);
501 ret
= __load_free_space_cache(fs_info
->tree_root
, inode
, ctl
,
502 path
, block_group
->key
.objectid
);
503 btrfs_free_path(path
);
507 spin_lock(&ctl
->tree_lock
);
508 matched
= (ctl
->free_space
== (block_group
->key
.offset
- used
-
509 block_group
->bytes_super
));
510 spin_unlock(&ctl
->tree_lock
);
513 __btrfs_remove_free_space_cache(ctl
);
514 printk(KERN_ERR
"block group %llu has an wrong amount of free "
515 "space\n", block_group
->key
.objectid
);
520 /* This cache is bogus, make sure it gets cleared */
521 spin_lock(&block_group
->lock
);
522 block_group
->disk_cache_state
= BTRFS_DC_CLEAR
;
523 spin_unlock(&block_group
->lock
);
526 printk(KERN_ERR
"btrfs: failed to load free space cache "
527 "for block group %llu\n", block_group
->key
.objectid
);
535 * __btrfs_write_out_cache - write out cached info to an inode
536 * @root - the root the inode belongs to
537 * @ctl - the free space cache we are going to write out
538 * @block_group - the block_group for this cache if it belongs to a block_group
539 * @trans - the trans handle
540 * @path - the path to use
541 * @offset - the offset for the key we'll insert
543 * This function writes out a free space cache struct to disk for quick recovery
544 * on mount. This will return 0 if it was successfull in writing the cache out,
545 * and -1 if it was not.
547 int __btrfs_write_out_cache(struct btrfs_root
*root
, struct inode
*inode
,
548 struct btrfs_free_space_ctl
*ctl
,
549 struct btrfs_block_group_cache
*block_group
,
550 struct btrfs_trans_handle
*trans
,
551 struct btrfs_path
*path
, u64 offset
)
553 struct btrfs_free_space_header
*header
;
554 struct extent_buffer
*leaf
;
555 struct rb_node
*node
;
556 struct list_head
*pos
, *n
;
559 struct extent_state
*cached_state
= NULL
;
560 struct btrfs_free_cluster
*cluster
= NULL
;
561 struct extent_io_tree
*unpin
= NULL
;
562 struct list_head bitmap_list
;
563 struct btrfs_key key
;
567 gfp_t mask
= btrfs_alloc_write_mask(inode
->i_mapping
);
568 int index
= 0, num_pages
= 0;
573 bool next_page
= false;
574 bool out_of_space
= false;
576 INIT_LIST_HEAD(&bitmap_list
);
578 node
= rb_first(&ctl
->free_space_offset
);
582 if (!i_size_read(inode
))
585 num_pages
= (i_size_read(inode
) + PAGE_CACHE_SIZE
- 1) >>
588 filemap_write_and_wait(inode
->i_mapping
);
589 btrfs_wait_ordered_range(inode
, inode
->i_size
&
590 ~(root
->sectorsize
- 1), (u64
)-1);
592 pages
= kzalloc(sizeof(struct page
*) * num_pages
, GFP_NOFS
);
596 /* Get the cluster for this block_group if it exists */
597 if (block_group
&& !list_empty(&block_group
->cluster_list
))
598 cluster
= list_entry(block_group
->cluster_list
.next
,
599 struct btrfs_free_cluster
,
603 * We shouldn't have switched the pinned extents yet so this is the
606 unpin
= root
->fs_info
->pinned_extents
;
609 * Lock all pages first so we can lock the extent safely.
611 * NOTE: Because we hold the ref the entire time we're going to write to
612 * the page find_get_page should never fail, so we don't do a check
613 * after find_get_page at this point. Just putting this here so people
614 * know and don't freak out.
616 while (index
< num_pages
) {
617 page
= find_or_create_page(inode
->i_mapping
, index
, mask
);
621 for (i
= 0; i
< num_pages
; i
++) {
622 unlock_page(pages
[i
]);
623 page_cache_release(pages
[i
]);
632 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, 0, i_size_read(inode
) - 1,
633 0, &cached_state
, GFP_NOFS
);
636 * When searching for pinned extents, we need to start at our start
640 start
= block_group
->key
.objectid
;
642 /* Write out the extent entries */
644 struct btrfs_free_space_entry
*entry
;
646 unsigned long offset
= 0;
650 if (index
>= num_pages
) {
657 orig
= addr
= kmap(page
);
662 * We're going to put in a bogus crc for this page to
663 * make sure that old kernels who aren't aware of this
664 * format will be sure to discard the cache.
667 offset
+= sizeof(u64
);
670 *gen
= trans
->transid
;
672 offset
+= sizeof(u64
);
676 memset(addr
, 0, PAGE_CACHE_SIZE
- offset
);
677 while (node
&& !next_page
) {
678 struct btrfs_free_space
*e
;
680 e
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
683 entry
->offset
= cpu_to_le64(e
->offset
);
684 entry
->bytes
= cpu_to_le64(e
->bytes
);
686 entry
->type
= BTRFS_FREE_SPACE_BITMAP
;
687 list_add_tail(&e
->list
, &bitmap_list
);
690 entry
->type
= BTRFS_FREE_SPACE_EXTENT
;
692 node
= rb_next(node
);
693 if (!node
&& cluster
) {
694 node
= rb_first(&cluster
->root
);
697 offset
+= sizeof(struct btrfs_free_space_entry
);
698 if (offset
+ sizeof(struct btrfs_free_space_entry
) >=
705 * We want to add any pinned extents to our free space cache
706 * so we don't leak the space
708 while (block_group
&& !next_page
&&
709 (start
< block_group
->key
.objectid
+
710 block_group
->key
.offset
)) {
711 ret
= find_first_extent_bit(unpin
, start
, &start
, &end
,
718 /* This pinned extent is out of our range */
719 if (start
>= block_group
->key
.objectid
+
720 block_group
->key
.offset
)
723 len
= block_group
->key
.objectid
+
724 block_group
->key
.offset
- start
;
725 len
= min(len
, end
+ 1 - start
);
728 entry
->offset
= cpu_to_le64(start
);
729 entry
->bytes
= cpu_to_le64(len
);
730 entry
->type
= BTRFS_FREE_SPACE_EXTENT
;
733 offset
+= sizeof(struct btrfs_free_space_entry
);
734 if (offset
+ sizeof(struct btrfs_free_space_entry
) >=
740 /* Generate bogus crc value */
743 crc
= btrfs_csum_data(root
, orig
+ sizeof(u64
), crc
,
744 PAGE_CACHE_SIZE
- sizeof(u64
));
745 btrfs_csum_final(crc
, (char *)&crc
);
753 bytes
+= PAGE_CACHE_SIZE
;
756 } while (node
|| next_page
);
758 /* Write out the bitmaps */
759 list_for_each_safe(pos
, n
, &bitmap_list
) {
761 struct btrfs_free_space
*entry
=
762 list_entry(pos
, struct btrfs_free_space
, list
);
764 if (index
>= num_pages
) {
771 memcpy(addr
, entry
->bitmap
, PAGE_CACHE_SIZE
);
773 bytes
+= PAGE_CACHE_SIZE
;
775 list_del_init(&entry
->list
);
780 btrfs_drop_pages(pages
, num_pages
);
781 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, 0,
782 i_size_read(inode
) - 1, &cached_state
,
787 /* Zero out the rest of the pages just to make sure */
788 while (index
< num_pages
) {
793 memset(addr
, 0, PAGE_CACHE_SIZE
);
795 bytes
+= PAGE_CACHE_SIZE
;
799 ret
= btrfs_dirty_pages(root
, inode
, pages
, num_pages
, 0,
800 bytes
, &cached_state
);
801 btrfs_drop_pages(pages
, num_pages
);
802 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, 0,
803 i_size_read(inode
) - 1, &cached_state
, GFP_NOFS
);
808 BTRFS_I(inode
)->generation
= trans
->transid
;
810 filemap_write_and_wait(inode
->i_mapping
);
812 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
816 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
818 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0, bytes
- 1,
819 EXTENT_DIRTY
| EXTENT_DELALLOC
|
820 EXTENT_DO_ACCOUNTING
, 0, 0, NULL
, GFP_NOFS
);
823 leaf
= path
->nodes
[0];
825 struct btrfs_key found_key
;
826 BUG_ON(!path
->slots
[0]);
828 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
829 if (found_key
.objectid
!= BTRFS_FREE_SPACE_OBJECTID
||
830 found_key
.offset
!= offset
) {
831 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0, bytes
- 1,
832 EXTENT_DIRTY
| EXTENT_DELALLOC
|
833 EXTENT_DO_ACCOUNTING
, 0, 0, NULL
,
835 btrfs_release_path(path
);
839 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
840 struct btrfs_free_space_header
);
841 btrfs_set_free_space_entries(leaf
, header
, entries
);
842 btrfs_set_free_space_bitmaps(leaf
, header
, bitmaps
);
843 btrfs_set_free_space_generation(leaf
, header
, trans
->transid
);
844 btrfs_mark_buffer_dirty(leaf
);
845 btrfs_release_path(path
);
851 invalidate_inode_pages2_range(inode
->i_mapping
, 0, index
);
852 BTRFS_I(inode
)->generation
= 0;
854 btrfs_update_inode(trans
, root
, inode
);
858 int btrfs_write_out_cache(struct btrfs_root
*root
,
859 struct btrfs_trans_handle
*trans
,
860 struct btrfs_block_group_cache
*block_group
,
861 struct btrfs_path
*path
)
863 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
867 root
= root
->fs_info
->tree_root
;
869 spin_lock(&block_group
->lock
);
870 if (block_group
->disk_cache_state
< BTRFS_DC_SETUP
) {
871 spin_unlock(&block_group
->lock
);
874 spin_unlock(&block_group
->lock
);
876 inode
= lookup_free_space_inode(root
, block_group
, path
);
880 ret
= __btrfs_write_out_cache(root
, inode
, ctl
, block_group
, trans
,
881 path
, block_group
->key
.objectid
);
883 btrfs_delalloc_release_metadata(inode
, inode
->i_size
);
884 spin_lock(&block_group
->lock
);
885 block_group
->disk_cache_state
= BTRFS_DC_ERROR
;
886 spin_unlock(&block_group
->lock
);
889 printk(KERN_ERR
"btrfs: failed to write free space cace "
890 "for block group %llu\n", block_group
->key
.objectid
);
898 static inline unsigned long offset_to_bit(u64 bitmap_start
, u32 unit
,
901 BUG_ON(offset
< bitmap_start
);
902 offset
-= bitmap_start
;
903 return (unsigned long)(div_u64(offset
, unit
));
906 static inline unsigned long bytes_to_bits(u64 bytes
, u32 unit
)
908 return (unsigned long)(div_u64(bytes
, unit
));
911 static inline u64
offset_to_bitmap(struct btrfs_free_space_ctl
*ctl
,
915 u64 bytes_per_bitmap
;
917 bytes_per_bitmap
= BITS_PER_BITMAP
* ctl
->unit
;
918 bitmap_start
= offset
- ctl
->start
;
919 bitmap_start
= div64_u64(bitmap_start
, bytes_per_bitmap
);
920 bitmap_start
*= bytes_per_bitmap
;
921 bitmap_start
+= ctl
->start
;
926 static int tree_insert_offset(struct rb_root
*root
, u64 offset
,
927 struct rb_node
*node
, int bitmap
)
929 struct rb_node
**p
= &root
->rb_node
;
930 struct rb_node
*parent
= NULL
;
931 struct btrfs_free_space
*info
;
935 info
= rb_entry(parent
, struct btrfs_free_space
, offset_index
);
937 if (offset
< info
->offset
) {
939 } else if (offset
> info
->offset
) {
943 * we could have a bitmap entry and an extent entry
944 * share the same offset. If this is the case, we want
945 * the extent entry to always be found first if we do a
946 * linear search through the tree, since we want to have
947 * the quickest allocation time, and allocating from an
948 * extent is faster than allocating from a bitmap. So
949 * if we're inserting a bitmap and we find an entry at
950 * this offset, we want to go right, or after this entry
951 * logically. If we are inserting an extent and we've
952 * found a bitmap, we want to go left, or before
971 rb_link_node(node
, parent
, p
);
972 rb_insert_color(node
, root
);
978 * searches the tree for the given offset.
980 * fuzzy - If this is set, then we are trying to make an allocation, and we just
981 * want a section that has at least bytes size and comes at or after the given
984 static struct btrfs_free_space
*
985 tree_search_offset(struct btrfs_free_space_ctl
*ctl
,
986 u64 offset
, int bitmap_only
, int fuzzy
)
988 struct rb_node
*n
= ctl
->free_space_offset
.rb_node
;
989 struct btrfs_free_space
*entry
, *prev
= NULL
;
991 /* find entry that is closest to the 'offset' */
998 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1001 if (offset
< entry
->offset
)
1003 else if (offset
> entry
->offset
)
1016 * bitmap entry and extent entry may share same offset,
1017 * in that case, bitmap entry comes after extent entry.
1022 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1023 if (entry
->offset
!= offset
)
1026 WARN_ON(!entry
->bitmap
);
1029 if (entry
->bitmap
) {
1031 * if previous extent entry covers the offset,
1032 * we should return it instead of the bitmap entry
1034 n
= &entry
->offset_index
;
1039 prev
= rb_entry(n
, struct btrfs_free_space
,
1041 if (!prev
->bitmap
) {
1042 if (prev
->offset
+ prev
->bytes
> offset
)
1054 /* find last entry before the 'offset' */
1056 if (entry
->offset
> offset
) {
1057 n
= rb_prev(&entry
->offset_index
);
1059 entry
= rb_entry(n
, struct btrfs_free_space
,
1061 BUG_ON(entry
->offset
> offset
);
1070 if (entry
->bitmap
) {
1071 n
= &entry
->offset_index
;
1076 prev
= rb_entry(n
, struct btrfs_free_space
,
1078 if (!prev
->bitmap
) {
1079 if (prev
->offset
+ prev
->bytes
> offset
)
1084 if (entry
->offset
+ BITS_PER_BITMAP
* ctl
->unit
> offset
)
1086 } else if (entry
->offset
+ entry
->bytes
> offset
)
1093 if (entry
->bitmap
) {
1094 if (entry
->offset
+ BITS_PER_BITMAP
*
1098 if (entry
->offset
+ entry
->bytes
> offset
)
1102 n
= rb_next(&entry
->offset_index
);
1105 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1111 __unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
1112 struct btrfs_free_space
*info
)
1114 rb_erase(&info
->offset_index
, &ctl
->free_space_offset
);
1115 ctl
->free_extents
--;
1118 static void unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
1119 struct btrfs_free_space
*info
)
1121 __unlink_free_space(ctl
, info
);
1122 ctl
->free_space
-= info
->bytes
;
1125 static int link_free_space(struct btrfs_free_space_ctl
*ctl
,
1126 struct btrfs_free_space
*info
)
1130 BUG_ON(!info
->bitmap
&& !info
->bytes
);
1131 ret
= tree_insert_offset(&ctl
->free_space_offset
, info
->offset
,
1132 &info
->offset_index
, (info
->bitmap
!= NULL
));
1136 ctl
->free_space
+= info
->bytes
;
1137 ctl
->free_extents
++;
1141 static void recalculate_thresholds(struct btrfs_free_space_ctl
*ctl
)
1143 struct btrfs_block_group_cache
*block_group
= ctl
->private;
1147 u64 size
= block_group
->key
.offset
;
1148 u64 bytes_per_bg
= BITS_PER_BITMAP
* block_group
->sectorsize
;
1149 int max_bitmaps
= div64_u64(size
+ bytes_per_bg
- 1, bytes_per_bg
);
1151 BUG_ON(ctl
->total_bitmaps
> max_bitmaps
);
1154 * The goal is to keep the total amount of memory used per 1gb of space
1155 * at or below 32k, so we need to adjust how much memory we allow to be
1156 * used by extent based free space tracking
1158 if (size
< 1024 * 1024 * 1024)
1159 max_bytes
= MAX_CACHE_BYTES_PER_GIG
;
1161 max_bytes
= MAX_CACHE_BYTES_PER_GIG
*
1162 div64_u64(size
, 1024 * 1024 * 1024);
1165 * we want to account for 1 more bitmap than what we have so we can make
1166 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1167 * we add more bitmaps.
1169 bitmap_bytes
= (ctl
->total_bitmaps
+ 1) * PAGE_CACHE_SIZE
;
1171 if (bitmap_bytes
>= max_bytes
) {
1172 ctl
->extents_thresh
= 0;
1177 * we want the extent entry threshold to always be at most 1/2 the maxw
1178 * bytes we can have, or whatever is less than that.
1180 extent_bytes
= max_bytes
- bitmap_bytes
;
1181 extent_bytes
= min_t(u64
, extent_bytes
, div64_u64(max_bytes
, 2));
1183 ctl
->extents_thresh
=
1184 div64_u64(extent_bytes
, (sizeof(struct btrfs_free_space
)));
1187 static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl
*ctl
,
1188 struct btrfs_free_space
*info
,
1189 u64 offset
, u64 bytes
)
1191 unsigned long start
, count
;
1193 start
= offset_to_bit(info
->offset
, ctl
->unit
, offset
);
1194 count
= bytes_to_bits(bytes
, ctl
->unit
);
1195 BUG_ON(start
+ count
> BITS_PER_BITMAP
);
1197 bitmap_clear(info
->bitmap
, start
, count
);
1199 info
->bytes
-= bytes
;
1202 static void bitmap_clear_bits(struct btrfs_free_space_ctl
*ctl
,
1203 struct btrfs_free_space
*info
, u64 offset
,
1206 __bitmap_clear_bits(ctl
, info
, offset
, bytes
);
1207 ctl
->free_space
-= bytes
;
1210 static void bitmap_set_bits(struct btrfs_free_space_ctl
*ctl
,
1211 struct btrfs_free_space
*info
, u64 offset
,
1214 unsigned long start
, count
;
1216 start
= offset_to_bit(info
->offset
, ctl
->unit
, offset
);
1217 count
= bytes_to_bits(bytes
, ctl
->unit
);
1218 BUG_ON(start
+ count
> BITS_PER_BITMAP
);
1220 bitmap_set(info
->bitmap
, start
, count
);
1222 info
->bytes
+= bytes
;
1223 ctl
->free_space
+= bytes
;
1226 static int search_bitmap(struct btrfs_free_space_ctl
*ctl
,
1227 struct btrfs_free_space
*bitmap_info
, u64
*offset
,
1230 unsigned long found_bits
= 0;
1231 unsigned long bits
, i
;
1232 unsigned long next_zero
;
1234 i
= offset_to_bit(bitmap_info
->offset
, ctl
->unit
,
1235 max_t(u64
, *offset
, bitmap_info
->offset
));
1236 bits
= bytes_to_bits(*bytes
, ctl
->unit
);
1238 for (i
= find_next_bit(bitmap_info
->bitmap
, BITS_PER_BITMAP
, i
);
1239 i
< BITS_PER_BITMAP
;
1240 i
= find_next_bit(bitmap_info
->bitmap
, BITS_PER_BITMAP
, i
+ 1)) {
1241 next_zero
= find_next_zero_bit(bitmap_info
->bitmap
,
1242 BITS_PER_BITMAP
, i
);
1243 if ((next_zero
- i
) >= bits
) {
1244 found_bits
= next_zero
- i
;
1251 *offset
= (u64
)(i
* ctl
->unit
) + bitmap_info
->offset
;
1252 *bytes
= (u64
)(found_bits
) * ctl
->unit
;
1259 static struct btrfs_free_space
*
1260 find_free_space(struct btrfs_free_space_ctl
*ctl
, u64
*offset
, u64
*bytes
)
1262 struct btrfs_free_space
*entry
;
1263 struct rb_node
*node
;
1266 if (!ctl
->free_space_offset
.rb_node
)
1269 entry
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, *offset
), 0, 1);
1273 for (node
= &entry
->offset_index
; node
; node
= rb_next(node
)) {
1274 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
1275 if (entry
->bytes
< *bytes
)
1278 if (entry
->bitmap
) {
1279 ret
= search_bitmap(ctl
, entry
, offset
, bytes
);
1285 *offset
= entry
->offset
;
1286 *bytes
= entry
->bytes
;
1293 static void add_new_bitmap(struct btrfs_free_space_ctl
*ctl
,
1294 struct btrfs_free_space
*info
, u64 offset
)
1296 info
->offset
= offset_to_bitmap(ctl
, offset
);
1298 link_free_space(ctl
, info
);
1299 ctl
->total_bitmaps
++;
1301 ctl
->op
->recalc_thresholds(ctl
);
1304 static void free_bitmap(struct btrfs_free_space_ctl
*ctl
,
1305 struct btrfs_free_space
*bitmap_info
)
1307 unlink_free_space(ctl
, bitmap_info
);
1308 kfree(bitmap_info
->bitmap
);
1309 kmem_cache_free(btrfs_free_space_cachep
, bitmap_info
);
1310 ctl
->total_bitmaps
--;
1311 ctl
->op
->recalc_thresholds(ctl
);
1314 static noinline
int remove_from_bitmap(struct btrfs_free_space_ctl
*ctl
,
1315 struct btrfs_free_space
*bitmap_info
,
1316 u64
*offset
, u64
*bytes
)
1319 u64 search_start
, search_bytes
;
1323 end
= bitmap_info
->offset
+ (u64
)(BITS_PER_BITMAP
* ctl
->unit
) - 1;
1326 * XXX - this can go away after a few releases.
1328 * since the only user of btrfs_remove_free_space is the tree logging
1329 * stuff, and the only way to test that is under crash conditions, we
1330 * want to have this debug stuff here just in case somethings not
1331 * working. Search the bitmap for the space we are trying to use to
1332 * make sure its actually there. If its not there then we need to stop
1333 * because something has gone wrong.
1335 search_start
= *offset
;
1336 search_bytes
= *bytes
;
1337 search_bytes
= min(search_bytes
, end
- search_start
+ 1);
1338 ret
= search_bitmap(ctl
, bitmap_info
, &search_start
, &search_bytes
);
1339 BUG_ON(ret
< 0 || search_start
!= *offset
);
1341 if (*offset
> bitmap_info
->offset
&& *offset
+ *bytes
> end
) {
1342 bitmap_clear_bits(ctl
, bitmap_info
, *offset
, end
- *offset
+ 1);
1343 *bytes
-= end
- *offset
+ 1;
1345 } else if (*offset
>= bitmap_info
->offset
&& *offset
+ *bytes
<= end
) {
1346 bitmap_clear_bits(ctl
, bitmap_info
, *offset
, *bytes
);
1351 struct rb_node
*next
= rb_next(&bitmap_info
->offset_index
);
1352 if (!bitmap_info
->bytes
)
1353 free_bitmap(ctl
, bitmap_info
);
1356 * no entry after this bitmap, but we still have bytes to
1357 * remove, so something has gone wrong.
1362 bitmap_info
= rb_entry(next
, struct btrfs_free_space
,
1366 * if the next entry isn't a bitmap we need to return to let the
1367 * extent stuff do its work.
1369 if (!bitmap_info
->bitmap
)
1373 * Ok the next item is a bitmap, but it may not actually hold
1374 * the information for the rest of this free space stuff, so
1375 * look for it, and if we don't find it return so we can try
1376 * everything over again.
1378 search_start
= *offset
;
1379 search_bytes
= *bytes
;
1380 ret
= search_bitmap(ctl
, bitmap_info
, &search_start
,
1382 if (ret
< 0 || search_start
!= *offset
)
1386 } else if (!bitmap_info
->bytes
)
1387 free_bitmap(ctl
, bitmap_info
);
1392 static u64
add_bytes_to_bitmap(struct btrfs_free_space_ctl
*ctl
,
1393 struct btrfs_free_space
*info
, u64 offset
,
1396 u64 bytes_to_set
= 0;
1399 end
= info
->offset
+ (u64
)(BITS_PER_BITMAP
* ctl
->unit
);
1401 bytes_to_set
= min(end
- offset
, bytes
);
1403 bitmap_set_bits(ctl
, info
, offset
, bytes_to_set
);
1405 return bytes_to_set
;
1409 static bool use_bitmap(struct btrfs_free_space_ctl
*ctl
,
1410 struct btrfs_free_space
*info
)
1412 struct btrfs_block_group_cache
*block_group
= ctl
->private;
1415 * If we are below the extents threshold then we can add this as an
1416 * extent, and don't have to deal with the bitmap
1418 if (ctl
->free_extents
< ctl
->extents_thresh
) {
1420 * If this block group has some small extents we don't want to
1421 * use up all of our free slots in the cache with them, we want
1422 * to reserve them to larger extents, however if we have plent
1423 * of cache left then go ahead an dadd them, no sense in adding
1424 * the overhead of a bitmap if we don't have to.
1426 if (info
->bytes
<= block_group
->sectorsize
* 4) {
1427 if (ctl
->free_extents
* 2 <= ctl
->extents_thresh
)
1435 * some block groups are so tiny they can't be enveloped by a bitmap, so
1436 * don't even bother to create a bitmap for this
1438 if (BITS_PER_BITMAP
* block_group
->sectorsize
>
1439 block_group
->key
.offset
)
1445 static struct btrfs_free_space_op free_space_op
= {
1446 .recalc_thresholds
= recalculate_thresholds
,
1447 .use_bitmap
= use_bitmap
,
1450 static int insert_into_bitmap(struct btrfs_free_space_ctl
*ctl
,
1451 struct btrfs_free_space
*info
)
1453 struct btrfs_free_space
*bitmap_info
;
1454 struct btrfs_block_group_cache
*block_group
= NULL
;
1456 u64 bytes
, offset
, bytes_added
;
1459 bytes
= info
->bytes
;
1460 offset
= info
->offset
;
1462 if (!ctl
->op
->use_bitmap(ctl
, info
))
1465 if (ctl
->op
== &free_space_op
)
1466 block_group
= ctl
->private;
1469 * Since we link bitmaps right into the cluster we need to see if we
1470 * have a cluster here, and if so and it has our bitmap we need to add
1471 * the free space to that bitmap.
1473 if (block_group
&& !list_empty(&block_group
->cluster_list
)) {
1474 struct btrfs_free_cluster
*cluster
;
1475 struct rb_node
*node
;
1476 struct btrfs_free_space
*entry
;
1478 cluster
= list_entry(block_group
->cluster_list
.next
,
1479 struct btrfs_free_cluster
,
1481 spin_lock(&cluster
->lock
);
1482 node
= rb_first(&cluster
->root
);
1484 spin_unlock(&cluster
->lock
);
1485 goto no_cluster_bitmap
;
1488 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
1489 if (!entry
->bitmap
) {
1490 spin_unlock(&cluster
->lock
);
1491 goto no_cluster_bitmap
;
1494 if (entry
->offset
== offset_to_bitmap(ctl
, offset
)) {
1495 bytes_added
= add_bytes_to_bitmap(ctl
, entry
,
1497 bytes
-= bytes_added
;
1498 offset
+= bytes_added
;
1500 spin_unlock(&cluster
->lock
);
1508 bitmap_info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
1515 bytes_added
= add_bytes_to_bitmap(ctl
, bitmap_info
, offset
, bytes
);
1516 bytes
-= bytes_added
;
1517 offset
+= bytes_added
;
1527 if (info
&& info
->bitmap
) {
1528 add_new_bitmap(ctl
, info
, offset
);
1533 spin_unlock(&ctl
->tree_lock
);
1535 /* no pre-allocated info, allocate a new one */
1537 info
= kmem_cache_zalloc(btrfs_free_space_cachep
,
1540 spin_lock(&ctl
->tree_lock
);
1546 /* allocate the bitmap */
1547 info
->bitmap
= kzalloc(PAGE_CACHE_SIZE
, GFP_NOFS
);
1548 spin_lock(&ctl
->tree_lock
);
1549 if (!info
->bitmap
) {
1559 kfree(info
->bitmap
);
1560 kmem_cache_free(btrfs_free_space_cachep
, info
);
1566 static bool try_merge_free_space(struct btrfs_free_space_ctl
*ctl
,
1567 struct btrfs_free_space
*info
, bool update_stat
)
1569 struct btrfs_free_space
*left_info
;
1570 struct btrfs_free_space
*right_info
;
1571 bool merged
= false;
1572 u64 offset
= info
->offset
;
1573 u64 bytes
= info
->bytes
;
1576 * first we want to see if there is free space adjacent to the range we
1577 * are adding, if there is remove that struct and add a new one to
1578 * cover the entire range
1580 right_info
= tree_search_offset(ctl
, offset
+ bytes
, 0, 0);
1581 if (right_info
&& rb_prev(&right_info
->offset_index
))
1582 left_info
= rb_entry(rb_prev(&right_info
->offset_index
),
1583 struct btrfs_free_space
, offset_index
);
1585 left_info
= tree_search_offset(ctl
, offset
- 1, 0, 0);
1587 if (right_info
&& !right_info
->bitmap
) {
1589 unlink_free_space(ctl
, right_info
);
1591 __unlink_free_space(ctl
, right_info
);
1592 info
->bytes
+= right_info
->bytes
;
1593 kmem_cache_free(btrfs_free_space_cachep
, right_info
);
1597 if (left_info
&& !left_info
->bitmap
&&
1598 left_info
->offset
+ left_info
->bytes
== offset
) {
1600 unlink_free_space(ctl
, left_info
);
1602 __unlink_free_space(ctl
, left_info
);
1603 info
->offset
= left_info
->offset
;
1604 info
->bytes
+= left_info
->bytes
;
1605 kmem_cache_free(btrfs_free_space_cachep
, left_info
);
1612 int __btrfs_add_free_space(struct btrfs_free_space_ctl
*ctl
,
1613 u64 offset
, u64 bytes
)
1615 struct btrfs_free_space
*info
;
1618 info
= kmem_cache_zalloc(btrfs_free_space_cachep
, GFP_NOFS
);
1622 info
->offset
= offset
;
1623 info
->bytes
= bytes
;
1625 spin_lock(&ctl
->tree_lock
);
1627 if (try_merge_free_space(ctl
, info
, true))
1631 * There was no extent directly to the left or right of this new
1632 * extent then we know we're going to have to allocate a new extent, so
1633 * before we do that see if we need to drop this into a bitmap
1635 ret
= insert_into_bitmap(ctl
, info
);
1643 ret
= link_free_space(ctl
, info
);
1645 kmem_cache_free(btrfs_free_space_cachep
, info
);
1647 spin_unlock(&ctl
->tree_lock
);
1650 printk(KERN_CRIT
"btrfs: unable to add free space :%d\n", ret
);
1651 BUG_ON(ret
== -EEXIST
);
1657 int btrfs_remove_free_space(struct btrfs_block_group_cache
*block_group
,
1658 u64 offset
, u64 bytes
)
1660 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1661 struct btrfs_free_space
*info
;
1662 struct btrfs_free_space
*next_info
= NULL
;
1665 spin_lock(&ctl
->tree_lock
);
1668 info
= tree_search_offset(ctl
, offset
, 0, 0);
1671 * oops didn't find an extent that matched the space we wanted
1672 * to remove, look for a bitmap instead
1674 info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
1682 if (info
->bytes
< bytes
&& rb_next(&info
->offset_index
)) {
1684 next_info
= rb_entry(rb_next(&info
->offset_index
),
1685 struct btrfs_free_space
,
1688 if (next_info
->bitmap
)
1689 end
= next_info
->offset
+
1690 BITS_PER_BITMAP
* ctl
->unit
- 1;
1692 end
= next_info
->offset
+ next_info
->bytes
;
1694 if (next_info
->bytes
< bytes
||
1695 next_info
->offset
> offset
|| offset
> end
) {
1696 printk(KERN_CRIT
"Found free space at %llu, size %llu,"
1697 " trying to use %llu\n",
1698 (unsigned long long)info
->offset
,
1699 (unsigned long long)info
->bytes
,
1700 (unsigned long long)bytes
);
1709 if (info
->bytes
== bytes
) {
1710 unlink_free_space(ctl
, info
);
1712 kfree(info
->bitmap
);
1713 ctl
->total_bitmaps
--;
1715 kmem_cache_free(btrfs_free_space_cachep
, info
);
1719 if (!info
->bitmap
&& info
->offset
== offset
) {
1720 unlink_free_space(ctl
, info
);
1721 info
->offset
+= bytes
;
1722 info
->bytes
-= bytes
;
1723 link_free_space(ctl
, info
);
1727 if (!info
->bitmap
&& info
->offset
<= offset
&&
1728 info
->offset
+ info
->bytes
>= offset
+ bytes
) {
1729 u64 old_start
= info
->offset
;
1731 * we're freeing space in the middle of the info,
1732 * this can happen during tree log replay
1734 * first unlink the old info and then
1735 * insert it again after the hole we're creating
1737 unlink_free_space(ctl
, info
);
1738 if (offset
+ bytes
< info
->offset
+ info
->bytes
) {
1739 u64 old_end
= info
->offset
+ info
->bytes
;
1741 info
->offset
= offset
+ bytes
;
1742 info
->bytes
= old_end
- info
->offset
;
1743 ret
= link_free_space(ctl
, info
);
1748 /* the hole we're creating ends at the end
1749 * of the info struct, just free the info
1751 kmem_cache_free(btrfs_free_space_cachep
, info
);
1753 spin_unlock(&ctl
->tree_lock
);
1755 /* step two, insert a new info struct to cover
1756 * anything before the hole
1758 ret
= btrfs_add_free_space(block_group
, old_start
,
1759 offset
- old_start
);
1764 ret
= remove_from_bitmap(ctl
, info
, &offset
, &bytes
);
1769 spin_unlock(&ctl
->tree_lock
);
1774 void btrfs_dump_free_space(struct btrfs_block_group_cache
*block_group
,
1777 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1778 struct btrfs_free_space
*info
;
1782 for (n
= rb_first(&ctl
->free_space_offset
); n
; n
= rb_next(n
)) {
1783 info
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1784 if (info
->bytes
>= bytes
)
1786 printk(KERN_CRIT
"entry offset %llu, bytes %llu, bitmap %s\n",
1787 (unsigned long long)info
->offset
,
1788 (unsigned long long)info
->bytes
,
1789 (info
->bitmap
) ? "yes" : "no");
1791 printk(KERN_INFO
"block group has cluster?: %s\n",
1792 list_empty(&block_group
->cluster_list
) ? "no" : "yes");
1793 printk(KERN_INFO
"%d blocks of free space at or bigger than bytes is"
1797 void btrfs_init_free_space_ctl(struct btrfs_block_group_cache
*block_group
)
1799 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1801 spin_lock_init(&ctl
->tree_lock
);
1802 ctl
->unit
= block_group
->sectorsize
;
1803 ctl
->start
= block_group
->key
.objectid
;
1804 ctl
->private = block_group
;
1805 ctl
->op
= &free_space_op
;
1808 * we only want to have 32k of ram per block group for keeping
1809 * track of free space, and if we pass 1/2 of that we want to
1810 * start converting things over to using bitmaps
1812 ctl
->extents_thresh
= ((1024 * 32) / 2) /
1813 sizeof(struct btrfs_free_space
);
1817 * for a given cluster, put all of its extents back into the free
1818 * space cache. If the block group passed doesn't match the block group
1819 * pointed to by the cluster, someone else raced in and freed the
1820 * cluster already. In that case, we just return without changing anything
1823 __btrfs_return_cluster_to_free_space(
1824 struct btrfs_block_group_cache
*block_group
,
1825 struct btrfs_free_cluster
*cluster
)
1827 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1828 struct btrfs_free_space
*entry
;
1829 struct rb_node
*node
;
1831 spin_lock(&cluster
->lock
);
1832 if (cluster
->block_group
!= block_group
)
1835 cluster
->block_group
= NULL
;
1836 cluster
->window_start
= 0;
1837 list_del_init(&cluster
->block_group_list
);
1839 node
= rb_first(&cluster
->root
);
1843 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
1844 node
= rb_next(&entry
->offset_index
);
1845 rb_erase(&entry
->offset_index
, &cluster
->root
);
1847 bitmap
= (entry
->bitmap
!= NULL
);
1849 try_merge_free_space(ctl
, entry
, false);
1850 tree_insert_offset(&ctl
->free_space_offset
,
1851 entry
->offset
, &entry
->offset_index
, bitmap
);
1853 cluster
->root
= RB_ROOT
;
1856 spin_unlock(&cluster
->lock
);
1857 btrfs_put_block_group(block_group
);
1861 void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl
*ctl
)
1863 struct btrfs_free_space
*info
;
1864 struct rb_node
*node
;
1866 while ((node
= rb_last(&ctl
->free_space_offset
)) != NULL
) {
1867 info
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
1868 if (!info
->bitmap
) {
1869 unlink_free_space(ctl
, info
);
1870 kmem_cache_free(btrfs_free_space_cachep
, info
);
1872 free_bitmap(ctl
, info
);
1874 if (need_resched()) {
1875 spin_unlock(&ctl
->tree_lock
);
1877 spin_lock(&ctl
->tree_lock
);
1882 void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl
*ctl
)
1884 spin_lock(&ctl
->tree_lock
);
1885 __btrfs_remove_free_space_cache_locked(ctl
);
1886 spin_unlock(&ctl
->tree_lock
);
1889 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
*block_group
)
1891 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1892 struct btrfs_free_cluster
*cluster
;
1893 struct list_head
*head
;
1895 spin_lock(&ctl
->tree_lock
);
1896 while ((head
= block_group
->cluster_list
.next
) !=
1897 &block_group
->cluster_list
) {
1898 cluster
= list_entry(head
, struct btrfs_free_cluster
,
1901 WARN_ON(cluster
->block_group
!= block_group
);
1902 __btrfs_return_cluster_to_free_space(block_group
, cluster
);
1903 if (need_resched()) {
1904 spin_unlock(&ctl
->tree_lock
);
1906 spin_lock(&ctl
->tree_lock
);
1909 __btrfs_remove_free_space_cache_locked(ctl
);
1910 spin_unlock(&ctl
->tree_lock
);
1914 u64
btrfs_find_space_for_alloc(struct btrfs_block_group_cache
*block_group
,
1915 u64 offset
, u64 bytes
, u64 empty_size
)
1917 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1918 struct btrfs_free_space
*entry
= NULL
;
1919 u64 bytes_search
= bytes
+ empty_size
;
1922 spin_lock(&ctl
->tree_lock
);
1923 entry
= find_free_space(ctl
, &offset
, &bytes_search
);
1928 if (entry
->bitmap
) {
1929 bitmap_clear_bits(ctl
, entry
, offset
, bytes
);
1931 free_bitmap(ctl
, entry
);
1933 unlink_free_space(ctl
, entry
);
1934 entry
->offset
+= bytes
;
1935 entry
->bytes
-= bytes
;
1937 kmem_cache_free(btrfs_free_space_cachep
, entry
);
1939 link_free_space(ctl
, entry
);
1943 spin_unlock(&ctl
->tree_lock
);
1949 * given a cluster, put all of its extents back into the free space
1950 * cache. If a block group is passed, this function will only free
1951 * a cluster that belongs to the passed block group.
1953 * Otherwise, it'll get a reference on the block group pointed to by the
1954 * cluster and remove the cluster from it.
1956 int btrfs_return_cluster_to_free_space(
1957 struct btrfs_block_group_cache
*block_group
,
1958 struct btrfs_free_cluster
*cluster
)
1960 struct btrfs_free_space_ctl
*ctl
;
1963 /* first, get a safe pointer to the block group */
1964 spin_lock(&cluster
->lock
);
1966 block_group
= cluster
->block_group
;
1968 spin_unlock(&cluster
->lock
);
1971 } else if (cluster
->block_group
!= block_group
) {
1972 /* someone else has already freed it don't redo their work */
1973 spin_unlock(&cluster
->lock
);
1976 atomic_inc(&block_group
->count
);
1977 spin_unlock(&cluster
->lock
);
1979 ctl
= block_group
->free_space_ctl
;
1981 /* now return any extents the cluster had on it */
1982 spin_lock(&ctl
->tree_lock
);
1983 ret
= __btrfs_return_cluster_to_free_space(block_group
, cluster
);
1984 spin_unlock(&ctl
->tree_lock
);
1986 /* finally drop our ref */
1987 btrfs_put_block_group(block_group
);
1991 static u64
btrfs_alloc_from_bitmap(struct btrfs_block_group_cache
*block_group
,
1992 struct btrfs_free_cluster
*cluster
,
1993 struct btrfs_free_space
*entry
,
1994 u64 bytes
, u64 min_start
)
1996 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1998 u64 search_start
= cluster
->window_start
;
1999 u64 search_bytes
= bytes
;
2002 search_start
= min_start
;
2003 search_bytes
= bytes
;
2005 err
= search_bitmap(ctl
, entry
, &search_start
, &search_bytes
);
2010 __bitmap_clear_bits(ctl
, entry
, ret
, bytes
);
2016 * given a cluster, try to allocate 'bytes' from it, returns 0
2017 * if it couldn't find anything suitably large, or a logical disk offset
2018 * if things worked out
2020 u64
btrfs_alloc_from_cluster(struct btrfs_block_group_cache
*block_group
,
2021 struct btrfs_free_cluster
*cluster
, u64 bytes
,
2024 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2025 struct btrfs_free_space
*entry
= NULL
;
2026 struct rb_node
*node
;
2029 spin_lock(&cluster
->lock
);
2030 if (bytes
> cluster
->max_size
)
2033 if (cluster
->block_group
!= block_group
)
2036 node
= rb_first(&cluster
->root
);
2040 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2042 if (entry
->bytes
< bytes
||
2043 (!entry
->bitmap
&& entry
->offset
< min_start
)) {
2044 node
= rb_next(&entry
->offset_index
);
2047 entry
= rb_entry(node
, struct btrfs_free_space
,
2052 if (entry
->bitmap
) {
2053 ret
= btrfs_alloc_from_bitmap(block_group
,
2054 cluster
, entry
, bytes
,
2057 node
= rb_next(&entry
->offset_index
);
2060 entry
= rb_entry(node
, struct btrfs_free_space
,
2065 ret
= entry
->offset
;
2067 entry
->offset
+= bytes
;
2068 entry
->bytes
-= bytes
;
2071 if (entry
->bytes
== 0)
2072 rb_erase(&entry
->offset_index
, &cluster
->root
);
2076 spin_unlock(&cluster
->lock
);
2081 spin_lock(&ctl
->tree_lock
);
2083 ctl
->free_space
-= bytes
;
2084 if (entry
->bytes
== 0) {
2085 ctl
->free_extents
--;
2086 if (entry
->bitmap
) {
2087 kfree(entry
->bitmap
);
2088 ctl
->total_bitmaps
--;
2089 ctl
->op
->recalc_thresholds(ctl
);
2091 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2094 spin_unlock(&ctl
->tree_lock
);
2099 static int btrfs_bitmap_cluster(struct btrfs_block_group_cache
*block_group
,
2100 struct btrfs_free_space
*entry
,
2101 struct btrfs_free_cluster
*cluster
,
2102 u64 offset
, u64 bytes
, u64 min_bytes
)
2104 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2105 unsigned long next_zero
;
2107 unsigned long search_bits
;
2108 unsigned long total_bits
;
2109 unsigned long found_bits
;
2110 unsigned long start
= 0;
2111 unsigned long total_found
= 0;
2115 i
= offset_to_bit(entry
->offset
, block_group
->sectorsize
,
2116 max_t(u64
, offset
, entry
->offset
));
2117 search_bits
= bytes_to_bits(bytes
, block_group
->sectorsize
);
2118 total_bits
= bytes_to_bits(min_bytes
, block_group
->sectorsize
);
2122 for (i
= find_next_bit(entry
->bitmap
, BITS_PER_BITMAP
, i
);
2123 i
< BITS_PER_BITMAP
;
2124 i
= find_next_bit(entry
->bitmap
, BITS_PER_BITMAP
, i
+ 1)) {
2125 next_zero
= find_next_zero_bit(entry
->bitmap
,
2126 BITS_PER_BITMAP
, i
);
2127 if (next_zero
- i
>= search_bits
) {
2128 found_bits
= next_zero
- i
;
2142 total_found
+= found_bits
;
2144 if (cluster
->max_size
< found_bits
* block_group
->sectorsize
)
2145 cluster
->max_size
= found_bits
* block_group
->sectorsize
;
2147 if (total_found
< total_bits
) {
2148 i
= find_next_bit(entry
->bitmap
, BITS_PER_BITMAP
, next_zero
);
2149 if (i
- start
> total_bits
* 2) {
2151 cluster
->max_size
= 0;
2157 cluster
->window_start
= start
* block_group
->sectorsize
+
2159 rb_erase(&entry
->offset_index
, &ctl
->free_space_offset
);
2160 ret
= tree_insert_offset(&cluster
->root
, entry
->offset
,
2161 &entry
->offset_index
, 1);
2168 * This searches the block group for just extents to fill the cluster with.
2171 setup_cluster_no_bitmap(struct btrfs_block_group_cache
*block_group
,
2172 struct btrfs_free_cluster
*cluster
,
2173 struct list_head
*bitmaps
, u64 offset
, u64 bytes
,
2176 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2177 struct btrfs_free_space
*first
= NULL
;
2178 struct btrfs_free_space
*entry
= NULL
;
2179 struct btrfs_free_space
*prev
= NULL
;
2180 struct btrfs_free_space
*last
;
2181 struct rb_node
*node
;
2185 u64 max_gap
= 128 * 1024;
2187 entry
= tree_search_offset(ctl
, offset
, 0, 1);
2192 * We don't want bitmaps, so just move along until we find a normal
2195 while (entry
->bitmap
) {
2196 if (list_empty(&entry
->list
))
2197 list_add_tail(&entry
->list
, bitmaps
);
2198 node
= rb_next(&entry
->offset_index
);
2201 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2204 window_start
= entry
->offset
;
2205 window_free
= entry
->bytes
;
2206 max_extent
= entry
->bytes
;
2211 while (window_free
<= min_bytes
) {
2212 node
= rb_next(&entry
->offset_index
);
2215 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2217 if (entry
->bitmap
) {
2218 if (list_empty(&entry
->list
))
2219 list_add_tail(&entry
->list
, bitmaps
);
2224 * we haven't filled the empty size and the window is
2225 * very large. reset and try again
2227 if (entry
->offset
- (prev
->offset
+ prev
->bytes
) > max_gap
||
2228 entry
->offset
- window_start
> (min_bytes
* 2)) {
2230 window_start
= entry
->offset
;
2231 window_free
= entry
->bytes
;
2233 max_extent
= entry
->bytes
;
2236 window_free
+= entry
->bytes
;
2237 if (entry
->bytes
> max_extent
)
2238 max_extent
= entry
->bytes
;
2243 cluster
->window_start
= first
->offset
;
2245 node
= &first
->offset_index
;
2248 * now we've found our entries, pull them out of the free space
2249 * cache and put them into the cluster rbtree
2254 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2255 node
= rb_next(&entry
->offset_index
);
2259 rb_erase(&entry
->offset_index
, &ctl
->free_space_offset
);
2260 ret
= tree_insert_offset(&cluster
->root
, entry
->offset
,
2261 &entry
->offset_index
, 0);
2263 } while (node
&& entry
!= last
);
2265 cluster
->max_size
= max_extent
;
2271 * This specifically looks for bitmaps that may work in the cluster, we assume
2272 * that we have already failed to find extents that will work.
2275 setup_cluster_bitmap(struct btrfs_block_group_cache
*block_group
,
2276 struct btrfs_free_cluster
*cluster
,
2277 struct list_head
*bitmaps
, u64 offset
, u64 bytes
,
2280 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2281 struct btrfs_free_space
*entry
;
2282 struct rb_node
*node
;
2285 if (ctl
->total_bitmaps
== 0)
2289 * First check our cached list of bitmaps and see if there is an entry
2290 * here that will work.
2292 list_for_each_entry(entry
, bitmaps
, list
) {
2293 if (entry
->bytes
< min_bytes
)
2295 ret
= btrfs_bitmap_cluster(block_group
, entry
, cluster
, offset
,
2302 * If we do have entries on our list and we are here then we didn't find
2303 * anything, so go ahead and get the next entry after the last entry in
2304 * this list and start the search from there.
2306 if (!list_empty(bitmaps
)) {
2307 entry
= list_entry(bitmaps
->prev
, struct btrfs_free_space
,
2309 node
= rb_next(&entry
->offset_index
);
2312 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2316 entry
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
), 0, 1);
2321 node
= &entry
->offset_index
;
2323 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2324 node
= rb_next(&entry
->offset_index
);
2327 if (entry
->bytes
< min_bytes
)
2329 ret
= btrfs_bitmap_cluster(block_group
, entry
, cluster
, offset
,
2331 } while (ret
&& node
);
2337 * here we try to find a cluster of blocks in a block group. The goal
2338 * is to find at least bytes free and up to empty_size + bytes free.
2339 * We might not find them all in one contiguous area.
2341 * returns zero and sets up cluster if things worked out, otherwise
2342 * it returns -enospc
2344 int btrfs_find_space_cluster(struct btrfs_trans_handle
*trans
,
2345 struct btrfs_root
*root
,
2346 struct btrfs_block_group_cache
*block_group
,
2347 struct btrfs_free_cluster
*cluster
,
2348 u64 offset
, u64 bytes
, u64 empty_size
)
2350 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2351 struct list_head bitmaps
;
2352 struct btrfs_free_space
*entry
, *tmp
;
2356 /* for metadata, allow allocates with more holes */
2357 if (btrfs_test_opt(root
, SSD_SPREAD
)) {
2358 min_bytes
= bytes
+ empty_size
;
2359 } else if (block_group
->flags
& BTRFS_BLOCK_GROUP_METADATA
) {
2361 * we want to do larger allocations when we are
2362 * flushing out the delayed refs, it helps prevent
2363 * making more work as we go along.
2365 if (trans
->transaction
->delayed_refs
.flushing
)
2366 min_bytes
= max(bytes
, (bytes
+ empty_size
) >> 1);
2368 min_bytes
= max(bytes
, (bytes
+ empty_size
) >> 4);
2370 min_bytes
= max(bytes
, (bytes
+ empty_size
) >> 2);
2372 spin_lock(&ctl
->tree_lock
);
2375 * If we know we don't have enough space to make a cluster don't even
2376 * bother doing all the work to try and find one.
2378 if (ctl
->free_space
< min_bytes
) {
2379 spin_unlock(&ctl
->tree_lock
);
2383 spin_lock(&cluster
->lock
);
2385 /* someone already found a cluster, hooray */
2386 if (cluster
->block_group
) {
2391 INIT_LIST_HEAD(&bitmaps
);
2392 ret
= setup_cluster_no_bitmap(block_group
, cluster
, &bitmaps
, offset
,
2395 ret
= setup_cluster_bitmap(block_group
, cluster
, &bitmaps
,
2396 offset
, bytes
, min_bytes
);
2398 /* Clear our temporary list */
2399 list_for_each_entry_safe(entry
, tmp
, &bitmaps
, list
)
2400 list_del_init(&entry
->list
);
2403 atomic_inc(&block_group
->count
);
2404 list_add_tail(&cluster
->block_group_list
,
2405 &block_group
->cluster_list
);
2406 cluster
->block_group
= block_group
;
2409 spin_unlock(&cluster
->lock
);
2410 spin_unlock(&ctl
->tree_lock
);
2416 * simple code to zero out a cluster
2418 void btrfs_init_free_cluster(struct btrfs_free_cluster
*cluster
)
2420 spin_lock_init(&cluster
->lock
);
2421 spin_lock_init(&cluster
->refill_lock
);
2422 cluster
->root
= RB_ROOT
;
2423 cluster
->max_size
= 0;
2424 INIT_LIST_HEAD(&cluster
->block_group_list
);
2425 cluster
->block_group
= NULL
;
2428 int btrfs_trim_block_group(struct btrfs_block_group_cache
*block_group
,
2429 u64
*trimmed
, u64 start
, u64 end
, u64 minlen
)
2431 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2432 struct btrfs_free_space
*entry
= NULL
;
2433 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
2435 u64 actually_trimmed
;
2440 while (start
< end
) {
2441 spin_lock(&ctl
->tree_lock
);
2443 if (ctl
->free_space
< minlen
) {
2444 spin_unlock(&ctl
->tree_lock
);
2448 entry
= tree_search_offset(ctl
, start
, 0, 1);
2450 entry
= tree_search_offset(ctl
,
2451 offset_to_bitmap(ctl
, start
),
2454 if (!entry
|| entry
->offset
>= end
) {
2455 spin_unlock(&ctl
->tree_lock
);
2459 if (entry
->bitmap
) {
2460 ret
= search_bitmap(ctl
, entry
, &start
, &bytes
);
2463 spin_unlock(&ctl
->tree_lock
);
2466 bytes
= min(bytes
, end
- start
);
2467 bitmap_clear_bits(ctl
, entry
, start
, bytes
);
2468 if (entry
->bytes
== 0)
2469 free_bitmap(ctl
, entry
);
2471 start
= entry
->offset
+ BITS_PER_BITMAP
*
2472 block_group
->sectorsize
;
2473 spin_unlock(&ctl
->tree_lock
);
2478 start
= entry
->offset
;
2479 bytes
= min(entry
->bytes
, end
- start
);
2480 unlink_free_space(ctl
, entry
);
2481 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2484 spin_unlock(&ctl
->tree_lock
);
2486 if (bytes
>= minlen
) {
2487 struct btrfs_space_info
*space_info
;
2490 space_info
= block_group
->space_info
;
2491 spin_lock(&space_info
->lock
);
2492 spin_lock(&block_group
->lock
);
2493 if (!block_group
->ro
) {
2494 block_group
->reserved
+= bytes
;
2495 space_info
->bytes_reserved
+= bytes
;
2498 spin_unlock(&block_group
->lock
);
2499 spin_unlock(&space_info
->lock
);
2501 ret
= btrfs_error_discard_extent(fs_info
->extent_root
,
2506 btrfs_add_free_space(block_group
, start
, bytes
);
2508 spin_lock(&space_info
->lock
);
2509 spin_lock(&block_group
->lock
);
2510 if (block_group
->ro
)
2511 space_info
->bytes_readonly
+= bytes
;
2512 block_group
->reserved
-= bytes
;
2513 space_info
->bytes_reserved
-= bytes
;
2514 spin_unlock(&space_info
->lock
);
2515 spin_unlock(&block_group
->lock
);
2520 *trimmed
+= actually_trimmed
;
2525 if (fatal_signal_pending(current
)) {
2537 * Find the left-most item in the cache tree, and then return the
2538 * smallest inode number in the item.
2540 * Note: the returned inode number may not be the smallest one in
2541 * the tree, if the left-most item is a bitmap.
2543 u64
btrfs_find_ino_for_alloc(struct btrfs_root
*fs_root
)
2545 struct btrfs_free_space_ctl
*ctl
= fs_root
->free_ino_ctl
;
2546 struct btrfs_free_space
*entry
= NULL
;
2549 spin_lock(&ctl
->tree_lock
);
2551 if (RB_EMPTY_ROOT(&ctl
->free_space_offset
))
2554 entry
= rb_entry(rb_first(&ctl
->free_space_offset
),
2555 struct btrfs_free_space
, offset_index
);
2557 if (!entry
->bitmap
) {
2558 ino
= entry
->offset
;
2560 unlink_free_space(ctl
, entry
);
2564 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2566 link_free_space(ctl
, entry
);
2572 ret
= search_bitmap(ctl
, entry
, &offset
, &count
);
2576 bitmap_clear_bits(ctl
, entry
, offset
, 1);
2577 if (entry
->bytes
== 0)
2578 free_bitmap(ctl
, entry
);
2581 spin_unlock(&ctl
->tree_lock
);
2586 struct inode
*lookup_free_ino_inode(struct btrfs_root
*root
,
2587 struct btrfs_path
*path
)
2589 struct inode
*inode
= NULL
;
2591 spin_lock(&root
->cache_lock
);
2592 if (root
->cache_inode
)
2593 inode
= igrab(root
->cache_inode
);
2594 spin_unlock(&root
->cache_lock
);
2598 inode
= __lookup_free_space_inode(root
, path
, 0);
2602 spin_lock(&root
->cache_lock
);
2603 if (!btrfs_fs_closing(root
->fs_info
))
2604 root
->cache_inode
= igrab(inode
);
2605 spin_unlock(&root
->cache_lock
);
2610 int create_free_ino_inode(struct btrfs_root
*root
,
2611 struct btrfs_trans_handle
*trans
,
2612 struct btrfs_path
*path
)
2614 return __create_free_space_inode(root
, trans
, path
,
2615 BTRFS_FREE_INO_OBJECTID
, 0);
2618 int load_free_ino_cache(struct btrfs_fs_info
*fs_info
, struct btrfs_root
*root
)
2620 struct btrfs_free_space_ctl
*ctl
= root
->free_ino_ctl
;
2621 struct btrfs_path
*path
;
2622 struct inode
*inode
;
2624 u64 root_gen
= btrfs_root_generation(&root
->root_item
);
2626 if (!btrfs_test_opt(root
, INODE_MAP_CACHE
))
2630 * If we're unmounting then just return, since this does a search on the
2631 * normal root and not the commit root and we could deadlock.
2633 if (btrfs_fs_closing(fs_info
))
2636 path
= btrfs_alloc_path();
2640 inode
= lookup_free_ino_inode(root
, path
);
2644 if (root_gen
!= BTRFS_I(inode
)->generation
)
2647 ret
= __load_free_space_cache(root
, inode
, ctl
, path
, 0);
2650 printk(KERN_ERR
"btrfs: failed to load free ino cache for "
2651 "root %llu\n", root
->root_key
.objectid
);
2655 btrfs_free_path(path
);
2659 int btrfs_write_out_ino_cache(struct btrfs_root
*root
,
2660 struct btrfs_trans_handle
*trans
,
2661 struct btrfs_path
*path
)
2663 struct btrfs_free_space_ctl
*ctl
= root
->free_ino_ctl
;
2664 struct inode
*inode
;
2667 if (!btrfs_test_opt(root
, INODE_MAP_CACHE
))
2670 inode
= lookup_free_ino_inode(root
, path
);
2674 ret
= __btrfs_write_out_cache(root
, inode
, ctl
, NULL
, trans
, path
, 0);
2676 btrfs_delalloc_release_metadata(inode
, inode
->i_size
);
2678 printk(KERN_ERR
"btrfs: failed to write free ino cache "
2679 "for root %llu\n", root
->root_key
.objectid
);