2 * Copyright (C) 2008 Red Hat. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/math64.h>
24 #include "free-space-cache.h"
25 #include "transaction.h"
27 #include "extent_io.h"
28 #include "inode-map.h"
30 #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
31 #define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
33 static int link_free_space(struct btrfs_free_space_ctl
*ctl
,
34 struct btrfs_free_space
*info
);
36 static struct inode
*__lookup_free_space_inode(struct btrfs_root
*root
,
37 struct btrfs_path
*path
,
41 struct btrfs_key location
;
42 struct btrfs_disk_key disk_key
;
43 struct btrfs_free_space_header
*header
;
44 struct extent_buffer
*leaf
;
45 struct inode
*inode
= NULL
;
48 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
52 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
56 btrfs_release_path(root
, path
);
57 return ERR_PTR(-ENOENT
);
60 leaf
= path
->nodes
[0];
61 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
62 struct btrfs_free_space_header
);
63 btrfs_free_space_key(leaf
, header
, &disk_key
);
64 btrfs_disk_key_to_cpu(&location
, &disk_key
);
65 btrfs_release_path(root
, path
);
67 inode
= btrfs_iget(root
->fs_info
->sb
, &location
, root
, NULL
);
69 return ERR_PTR(-ENOENT
);
72 if (is_bad_inode(inode
)) {
74 return ERR_PTR(-ENOENT
);
77 inode
->i_mapping
->flags
&= ~__GFP_FS
;
82 struct inode
*lookup_free_space_inode(struct btrfs_root
*root
,
83 struct btrfs_block_group_cache
84 *block_group
, struct btrfs_path
*path
)
86 struct inode
*inode
= NULL
;
88 spin_lock(&block_group
->lock
);
89 if (block_group
->inode
)
90 inode
= igrab(block_group
->inode
);
91 spin_unlock(&block_group
->lock
);
95 inode
= __lookup_free_space_inode(root
, path
,
96 block_group
->key
.objectid
);
100 spin_lock(&block_group
->lock
);
101 if (!root
->fs_info
->closing
) {
102 block_group
->inode
= igrab(inode
);
103 block_group
->iref
= 1;
105 spin_unlock(&block_group
->lock
);
110 int __create_free_space_inode(struct btrfs_root
*root
,
111 struct btrfs_trans_handle
*trans
,
112 struct btrfs_path
*path
, u64 ino
, u64 offset
)
114 struct btrfs_key key
;
115 struct btrfs_disk_key disk_key
;
116 struct btrfs_free_space_header
*header
;
117 struct btrfs_inode_item
*inode_item
;
118 struct extent_buffer
*leaf
;
121 ret
= btrfs_insert_empty_inode(trans
, root
, path
, ino
);
125 leaf
= path
->nodes
[0];
126 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
127 struct btrfs_inode_item
);
128 btrfs_item_key(leaf
, &disk_key
, path
->slots
[0]);
129 memset_extent_buffer(leaf
, 0, (unsigned long)inode_item
,
130 sizeof(*inode_item
));
131 btrfs_set_inode_generation(leaf
, inode_item
, trans
->transid
);
132 btrfs_set_inode_size(leaf
, inode_item
, 0);
133 btrfs_set_inode_nbytes(leaf
, inode_item
, 0);
134 btrfs_set_inode_uid(leaf
, inode_item
, 0);
135 btrfs_set_inode_gid(leaf
, inode_item
, 0);
136 btrfs_set_inode_mode(leaf
, inode_item
, S_IFREG
| 0600);
137 btrfs_set_inode_flags(leaf
, inode_item
, BTRFS_INODE_NOCOMPRESS
|
138 BTRFS_INODE_PREALLOC
| BTRFS_INODE_NODATASUM
);
139 btrfs_set_inode_nlink(leaf
, inode_item
, 1);
140 btrfs_set_inode_transid(leaf
, inode_item
, trans
->transid
);
141 btrfs_set_inode_block_group(leaf
, inode_item
, offset
);
142 btrfs_mark_buffer_dirty(leaf
);
143 btrfs_release_path(root
, path
);
145 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
149 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
150 sizeof(struct btrfs_free_space_header
));
152 btrfs_release_path(root
, path
);
155 leaf
= path
->nodes
[0];
156 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
157 struct btrfs_free_space_header
);
158 memset_extent_buffer(leaf
, 0, (unsigned long)header
, sizeof(*header
));
159 btrfs_set_free_space_key(leaf
, header
, &disk_key
);
160 btrfs_mark_buffer_dirty(leaf
);
161 btrfs_release_path(root
, path
);
166 int create_free_space_inode(struct btrfs_root
*root
,
167 struct btrfs_trans_handle
*trans
,
168 struct btrfs_block_group_cache
*block_group
,
169 struct btrfs_path
*path
)
174 ret
= btrfs_find_free_objectid(root
, &ino
);
178 return __create_free_space_inode(root
, trans
, path
, ino
,
179 block_group
->key
.objectid
);
182 int btrfs_truncate_free_space_cache(struct btrfs_root
*root
,
183 struct btrfs_trans_handle
*trans
,
184 struct btrfs_path
*path
,
190 trans
->block_rsv
= root
->orphan_block_rsv
;
191 ret
= btrfs_block_rsv_check(trans
, root
,
192 root
->orphan_block_rsv
,
197 oldsize
= i_size_read(inode
);
198 btrfs_i_size_write(inode
, 0);
199 truncate_pagecache(inode
, oldsize
, 0);
202 * We don't need an orphan item because truncating the free space cache
203 * will never be split across transactions.
205 ret
= btrfs_truncate_inode_items(trans
, root
, inode
,
206 0, BTRFS_EXTENT_DATA_KEY
);
212 return btrfs_update_inode(trans
, root
, inode
);
215 static int readahead_cache(struct inode
*inode
)
217 struct file_ra_state
*ra
;
218 unsigned long last_index
;
220 ra
= kzalloc(sizeof(*ra
), GFP_NOFS
);
224 file_ra_state_init(ra
, inode
->i_mapping
);
225 last_index
= (i_size_read(inode
) - 1) >> PAGE_CACHE_SHIFT
;
227 page_cache_sync_readahead(inode
->i_mapping
, ra
, NULL
, 0, last_index
);
234 int __load_free_space_cache(struct btrfs_root
*root
, struct inode
*inode
,
235 struct btrfs_free_space_ctl
*ctl
,
236 struct btrfs_path
*path
, u64 offset
)
238 struct btrfs_free_space_header
*header
;
239 struct extent_buffer
*leaf
;
241 u32
*checksums
= NULL
, *crc
;
242 char *disk_crcs
= NULL
;
243 struct btrfs_key key
;
244 struct list_head bitmaps
;
248 u32 cur_crc
= ~(u32
)0;
250 unsigned long first_page_offset
;
254 INIT_LIST_HEAD(&bitmaps
);
256 /* Nothing in the space cache, goodbye */
257 if (!i_size_read(inode
))
260 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
264 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
268 btrfs_release_path(root
, path
);
275 leaf
= path
->nodes
[0];
276 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
277 struct btrfs_free_space_header
);
278 num_entries
= btrfs_free_space_entries(leaf
, header
);
279 num_bitmaps
= btrfs_free_space_bitmaps(leaf
, header
);
280 generation
= btrfs_free_space_generation(leaf
, header
);
281 btrfs_release_path(root
, path
);
283 if (BTRFS_I(inode
)->generation
!= generation
) {
284 printk(KERN_ERR
"btrfs: free space inode generation (%llu) did"
285 " not match free space cache generation (%llu)\n",
286 (unsigned long long)BTRFS_I(inode
)->generation
,
287 (unsigned long long)generation
);
294 /* Setup everything for doing checksumming */
295 num_checksums
= i_size_read(inode
) / PAGE_CACHE_SIZE
;
296 checksums
= crc
= kzalloc(sizeof(u32
) * num_checksums
, GFP_NOFS
);
299 first_page_offset
= (sizeof(u32
) * num_checksums
) + sizeof(u64
);
300 disk_crcs
= kzalloc(first_page_offset
, GFP_NOFS
);
304 ret
= readahead_cache(inode
);
309 struct btrfs_free_space_entry
*entry
;
310 struct btrfs_free_space
*e
;
312 unsigned long offset
= 0;
313 unsigned long start_offset
= 0;
316 if (!num_entries
&& !num_bitmaps
)
320 start_offset
= first_page_offset
;
321 offset
= start_offset
;
324 page
= grab_cache_page(inode
->i_mapping
, index
);
328 if (!PageUptodate(page
)) {
329 btrfs_readpage(NULL
, page
);
331 if (!PageUptodate(page
)) {
333 page_cache_release(page
);
334 printk(KERN_ERR
"btrfs: error reading free "
344 memcpy(disk_crcs
, addr
, first_page_offset
);
345 gen
= addr
+ (sizeof(u32
) * num_checksums
);
346 if (*gen
!= BTRFS_I(inode
)->generation
) {
347 printk(KERN_ERR
"btrfs: space cache generation"
348 " (%llu) does not match inode (%llu)\n",
349 (unsigned long long)*gen
,
351 BTRFS_I(inode
)->generation
);
354 page_cache_release(page
);
357 crc
= (u32
*)disk_crcs
;
359 entry
= addr
+ start_offset
;
361 /* First lets check our crc before we do anything fun */
363 cur_crc
= btrfs_csum_data(root
, addr
+ start_offset
, cur_crc
,
364 PAGE_CACHE_SIZE
- start_offset
);
365 btrfs_csum_final(cur_crc
, (char *)&cur_crc
);
366 if (cur_crc
!= *crc
) {
367 printk(KERN_ERR
"btrfs: crc mismatch for page %lu\n",
371 page_cache_release(page
);
381 e
= kmem_cache_zalloc(btrfs_free_space_cachep
,
386 page_cache_release(page
);
390 e
->offset
= le64_to_cpu(entry
->offset
);
391 e
->bytes
= le64_to_cpu(entry
->bytes
);
394 kmem_cache_free(btrfs_free_space_cachep
, e
);
396 page_cache_release(page
);
400 if (entry
->type
== BTRFS_FREE_SPACE_EXTENT
) {
401 spin_lock(&ctl
->tree_lock
);
402 ret
= link_free_space(ctl
, e
);
403 spin_unlock(&ctl
->tree_lock
);
406 e
->bitmap
= kzalloc(PAGE_CACHE_SIZE
, GFP_NOFS
);
410 btrfs_free_space_cachep
, e
);
412 page_cache_release(page
);
415 spin_lock(&ctl
->tree_lock
);
416 ret2
= link_free_space(ctl
, e
);
417 ctl
->total_bitmaps
++;
418 ctl
->op
->recalc_thresholds(ctl
);
419 spin_unlock(&ctl
->tree_lock
);
420 list_add_tail(&e
->list
, &bitmaps
);
424 offset
+= sizeof(struct btrfs_free_space_entry
);
425 if (offset
+ sizeof(struct btrfs_free_space_entry
) >=
432 * We read an entry out of this page, we need to move on to the
441 * We add the bitmaps at the end of the entries in order that
442 * the bitmap entries are added to the cache.
444 e
= list_entry(bitmaps
.next
, struct btrfs_free_space
, list
);
445 list_del_init(&e
->list
);
446 memcpy(e
->bitmap
, addr
, PAGE_CACHE_SIZE
);
451 page_cache_release(page
);
461 __btrfs_remove_free_space_cache(ctl
);
465 int load_free_space_cache(struct btrfs_fs_info
*fs_info
,
466 struct btrfs_block_group_cache
*block_group
)
468 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
469 struct btrfs_root
*root
= fs_info
->tree_root
;
471 struct btrfs_path
*path
;
474 u64 used
= btrfs_block_group_used(&block_group
->item
);
477 * If we're unmounting then just return, since this does a search on the
478 * normal root and not the commit root and we could deadlock.
481 if (fs_info
->closing
)
485 * If this block group has been marked to be cleared for one reason or
486 * another then we can't trust the on disk cache, so just return.
488 spin_lock(&block_group
->lock
);
489 if (block_group
->disk_cache_state
!= BTRFS_DC_WRITTEN
) {
490 spin_unlock(&block_group
->lock
);
493 spin_unlock(&block_group
->lock
);
495 path
= btrfs_alloc_path();
499 inode
= lookup_free_space_inode(root
, block_group
, path
);
501 btrfs_free_path(path
);
505 ret
= __load_free_space_cache(fs_info
->tree_root
, inode
, ctl
,
506 path
, block_group
->key
.objectid
);
507 btrfs_free_path(path
);
511 spin_lock(&ctl
->tree_lock
);
512 matched
= (ctl
->free_space
== (block_group
->key
.offset
- used
-
513 block_group
->bytes_super
));
514 spin_unlock(&ctl
->tree_lock
);
517 __btrfs_remove_free_space_cache(ctl
);
518 printk(KERN_ERR
"block group %llu has an wrong amount of free "
519 "space\n", block_group
->key
.objectid
);
524 /* This cache is bogus, make sure it gets cleared */
525 spin_lock(&block_group
->lock
);
526 block_group
->disk_cache_state
= BTRFS_DC_CLEAR
;
527 spin_unlock(&block_group
->lock
);
529 printk(KERN_ERR
"btrfs: failed to load free space cache "
530 "for block group %llu\n", block_group
->key
.objectid
);
537 int __btrfs_write_out_cache(struct btrfs_root
*root
, struct inode
*inode
,
538 struct btrfs_free_space_ctl
*ctl
,
539 struct btrfs_block_group_cache
*block_group
,
540 struct btrfs_trans_handle
*trans
,
541 struct btrfs_path
*path
, u64 offset
)
543 struct btrfs_free_space_header
*header
;
544 struct extent_buffer
*leaf
;
545 struct rb_node
*node
;
546 struct list_head
*pos
, *n
;
549 struct extent_state
*cached_state
= NULL
;
550 struct btrfs_free_cluster
*cluster
= NULL
;
551 struct extent_io_tree
*unpin
= NULL
;
552 struct list_head bitmap_list
;
553 struct btrfs_key key
;
556 u32
*crc
, *checksums
;
557 unsigned long first_page_offset
;
558 int index
= 0, num_pages
= 0;
562 bool next_page
= false;
563 bool out_of_space
= false;
565 INIT_LIST_HEAD(&bitmap_list
);
567 node
= rb_first(&ctl
->free_space_offset
);
571 if (!i_size_read(inode
))
574 num_pages
= (i_size_read(inode
) + PAGE_CACHE_SIZE
- 1) >>
576 filemap_write_and_wait(inode
->i_mapping
);
577 btrfs_wait_ordered_range(inode
, inode
->i_size
&
578 ~(root
->sectorsize
- 1), (u64
)-1);
580 /* We need a checksum per page. */
581 crc
= checksums
= kzalloc(sizeof(u32
) * num_pages
, GFP_NOFS
);
585 pages
= kzalloc(sizeof(struct page
*) * num_pages
, GFP_NOFS
);
591 /* Since the first page has all of our checksums and our generation we
592 * need to calculate the offset into the page that we can start writing
595 first_page_offset
= (sizeof(u32
) * num_pages
) + sizeof(u64
);
597 /* Get the cluster for this block_group if it exists */
598 if (block_group
&& !list_empty(&block_group
->cluster_list
))
599 cluster
= list_entry(block_group
->cluster_list
.next
,
600 struct btrfs_free_cluster
,
604 * We shouldn't have switched the pinned extents yet so this is the
607 unpin
= root
->fs_info
->pinned_extents
;
610 * Lock all pages first so we can lock the extent safely.
612 * NOTE: Because we hold the ref the entire time we're going to write to
613 * the page find_get_page should never fail, so we don't do a check
614 * after find_get_page at this point. Just putting this here so people
615 * know and don't freak out.
617 while (index
< num_pages
) {
618 page
= grab_cache_page(inode
->i_mapping
, index
);
622 for (i
= 0; i
< num_pages
; i
++) {
623 unlock_page(pages
[i
]);
624 page_cache_release(pages
[i
]);
633 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, 0, i_size_read(inode
) - 1,
634 0, &cached_state
, GFP_NOFS
);
637 * When searching for pinned extents, we need to start at our start
641 start
= block_group
->key
.objectid
;
643 /* Write out the extent entries */
645 struct btrfs_free_space_entry
*entry
;
647 unsigned long offset
= 0;
648 unsigned long start_offset
= 0;
653 start_offset
= first_page_offset
;
654 offset
= start_offset
;
657 if (index
>= num_pages
) {
665 entry
= addr
+ start_offset
;
667 memset(addr
, 0, PAGE_CACHE_SIZE
);
668 while (node
&& !next_page
) {
669 struct btrfs_free_space
*e
;
671 e
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
674 entry
->offset
= cpu_to_le64(e
->offset
);
675 entry
->bytes
= cpu_to_le64(e
->bytes
);
677 entry
->type
= BTRFS_FREE_SPACE_BITMAP
;
678 list_add_tail(&e
->list
, &bitmap_list
);
681 entry
->type
= BTRFS_FREE_SPACE_EXTENT
;
683 node
= rb_next(node
);
684 if (!node
&& cluster
) {
685 node
= rb_first(&cluster
->root
);
688 offset
+= sizeof(struct btrfs_free_space_entry
);
689 if (offset
+ sizeof(struct btrfs_free_space_entry
) >=
696 * We want to add any pinned extents to our free space cache
697 * so we don't leak the space
699 while (block_group
&& !next_page
&&
700 (start
< block_group
->key
.objectid
+
701 block_group
->key
.offset
)) {
702 ret
= find_first_extent_bit(unpin
, start
, &start
, &end
,
709 /* This pinned extent is out of our range */
710 if (start
>= block_group
->key
.objectid
+
711 block_group
->key
.offset
)
714 len
= block_group
->key
.objectid
+
715 block_group
->key
.offset
- start
;
716 len
= min(len
, end
+ 1 - start
);
719 entry
->offset
= cpu_to_le64(start
);
720 entry
->bytes
= cpu_to_le64(len
);
721 entry
->type
= BTRFS_FREE_SPACE_EXTENT
;
724 offset
+= sizeof(struct btrfs_free_space_entry
);
725 if (offset
+ sizeof(struct btrfs_free_space_entry
) >=
731 *crc
= btrfs_csum_data(root
, addr
+ start_offset
, *crc
,
732 PAGE_CACHE_SIZE
- start_offset
);
735 btrfs_csum_final(*crc
, (char *)crc
);
738 bytes
+= PAGE_CACHE_SIZE
;
741 } while (node
|| next_page
);
743 /* Write out the bitmaps */
744 list_for_each_safe(pos
, n
, &bitmap_list
) {
746 struct btrfs_free_space
*entry
=
747 list_entry(pos
, struct btrfs_free_space
, list
);
749 if (index
>= num_pages
) {
756 memcpy(addr
, entry
->bitmap
, PAGE_CACHE_SIZE
);
758 *crc
= btrfs_csum_data(root
, addr
, *crc
, PAGE_CACHE_SIZE
);
760 btrfs_csum_final(*crc
, (char *)crc
);
762 bytes
+= PAGE_CACHE_SIZE
;
764 list_del_init(&entry
->list
);
769 btrfs_drop_pages(pages
, num_pages
);
770 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, 0,
771 i_size_read(inode
) - 1, &cached_state
,
777 /* Zero out the rest of the pages just to make sure */
778 while (index
< num_pages
) {
783 memset(addr
, 0, PAGE_CACHE_SIZE
);
785 bytes
+= PAGE_CACHE_SIZE
;
789 /* Write the checksums and trans id to the first page */
797 memcpy(addr
, checksums
, sizeof(u32
) * num_pages
);
798 gen
= addr
+ (sizeof(u32
) * num_pages
);
799 *gen
= trans
->transid
;
803 ret
= btrfs_dirty_pages(root
, inode
, pages
, num_pages
, 0,
804 bytes
, &cached_state
);
805 btrfs_drop_pages(pages
, num_pages
);
806 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, 0,
807 i_size_read(inode
) - 1, &cached_state
, GFP_NOFS
);
814 BTRFS_I(inode
)->generation
= trans
->transid
;
816 filemap_write_and_wait(inode
->i_mapping
);
818 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
822 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 1, 1);
825 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0, bytes
- 1,
826 EXTENT_DIRTY
| EXTENT_DELALLOC
|
827 EXTENT_DO_ACCOUNTING
, 0, 0, NULL
, GFP_NOFS
);
830 leaf
= path
->nodes
[0];
832 struct btrfs_key found_key
;
833 BUG_ON(!path
->slots
[0]);
835 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
836 if (found_key
.objectid
!= BTRFS_FREE_SPACE_OBJECTID
||
837 found_key
.offset
!= offset
) {
839 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0, bytes
- 1,
840 EXTENT_DIRTY
| EXTENT_DELALLOC
|
841 EXTENT_DO_ACCOUNTING
, 0, 0, NULL
,
843 btrfs_release_path(root
, path
);
847 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
848 struct btrfs_free_space_header
);
849 btrfs_set_free_space_entries(leaf
, header
, entries
);
850 btrfs_set_free_space_bitmaps(leaf
, header
, bitmaps
);
851 btrfs_set_free_space_generation(leaf
, header
, trans
->transid
);
852 btrfs_mark_buffer_dirty(leaf
);
853 btrfs_release_path(root
, path
);
859 invalidate_inode_pages2_range(inode
->i_mapping
, 0, index
);
860 BTRFS_I(inode
)->generation
= 0;
864 btrfs_update_inode(trans
, root
, inode
);
868 int btrfs_write_out_cache(struct btrfs_root
*root
,
869 struct btrfs_trans_handle
*trans
,
870 struct btrfs_block_group_cache
*block_group
,
871 struct btrfs_path
*path
)
873 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
877 root
= root
->fs_info
->tree_root
;
879 spin_lock(&block_group
->lock
);
880 if (block_group
->disk_cache_state
< BTRFS_DC_SETUP
) {
881 spin_unlock(&block_group
->lock
);
884 spin_unlock(&block_group
->lock
);
886 inode
= lookup_free_space_inode(root
, block_group
, path
);
890 ret
= __btrfs_write_out_cache(root
, inode
, ctl
, block_group
, trans
,
891 path
, block_group
->key
.objectid
);
893 spin_lock(&block_group
->lock
);
894 block_group
->disk_cache_state
= BTRFS_DC_ERROR
;
895 spin_unlock(&block_group
->lock
);
897 printk(KERN_ERR
"btrfs: failed to write free space cace "
898 "for block group %llu\n", block_group
->key
.objectid
);
905 static inline unsigned long offset_to_bit(u64 bitmap_start
, u32 unit
,
908 BUG_ON(offset
< bitmap_start
);
909 offset
-= bitmap_start
;
910 return (unsigned long)(div_u64(offset
, unit
));
913 static inline unsigned long bytes_to_bits(u64 bytes
, u32 unit
)
915 return (unsigned long)(div_u64(bytes
, unit
));
918 static inline u64
offset_to_bitmap(struct btrfs_free_space_ctl
*ctl
,
922 u64 bytes_per_bitmap
;
924 bytes_per_bitmap
= BITS_PER_BITMAP
* ctl
->unit
;
925 bitmap_start
= offset
- ctl
->start
;
926 bitmap_start
= div64_u64(bitmap_start
, bytes_per_bitmap
);
927 bitmap_start
*= bytes_per_bitmap
;
928 bitmap_start
+= ctl
->start
;
933 static int tree_insert_offset(struct rb_root
*root
, u64 offset
,
934 struct rb_node
*node
, int bitmap
)
936 struct rb_node
**p
= &root
->rb_node
;
937 struct rb_node
*parent
= NULL
;
938 struct btrfs_free_space
*info
;
942 info
= rb_entry(parent
, struct btrfs_free_space
, offset_index
);
944 if (offset
< info
->offset
) {
946 } else if (offset
> info
->offset
) {
950 * we could have a bitmap entry and an extent entry
951 * share the same offset. If this is the case, we want
952 * the extent entry to always be found first if we do a
953 * linear search through the tree, since we want to have
954 * the quickest allocation time, and allocating from an
955 * extent is faster than allocating from a bitmap. So
956 * if we're inserting a bitmap and we find an entry at
957 * this offset, we want to go right, or after this entry
958 * logically. If we are inserting an extent and we've
959 * found a bitmap, we want to go left, or before
963 WARN_ON(info
->bitmap
);
966 WARN_ON(!info
->bitmap
);
972 rb_link_node(node
, parent
, p
);
973 rb_insert_color(node
, root
);
979 * searches the tree for the given offset.
981 * fuzzy - If this is set, then we are trying to make an allocation, and we just
982 * want a section that has at least bytes size and comes at or after the given
985 static struct btrfs_free_space
*
986 tree_search_offset(struct btrfs_free_space_ctl
*ctl
,
987 u64 offset
, int bitmap_only
, int fuzzy
)
989 struct rb_node
*n
= ctl
->free_space_offset
.rb_node
;
990 struct btrfs_free_space
*entry
, *prev
= NULL
;
992 /* find entry that is closest to the 'offset' */
999 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1002 if (offset
< entry
->offset
)
1004 else if (offset
> entry
->offset
)
1017 * bitmap entry and extent entry may share same offset,
1018 * in that case, bitmap entry comes after extent entry.
1023 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1024 if (entry
->offset
!= offset
)
1027 WARN_ON(!entry
->bitmap
);
1030 if (entry
->bitmap
) {
1032 * if previous extent entry covers the offset,
1033 * we should return it instead of the bitmap entry
1035 n
= &entry
->offset_index
;
1040 prev
= rb_entry(n
, struct btrfs_free_space
,
1042 if (!prev
->bitmap
) {
1043 if (prev
->offset
+ prev
->bytes
> offset
)
1055 /* find last entry before the 'offset' */
1057 if (entry
->offset
> offset
) {
1058 n
= rb_prev(&entry
->offset_index
);
1060 entry
= rb_entry(n
, struct btrfs_free_space
,
1062 BUG_ON(entry
->offset
> offset
);
1071 if (entry
->bitmap
) {
1072 n
= &entry
->offset_index
;
1077 prev
= rb_entry(n
, struct btrfs_free_space
,
1079 if (!prev
->bitmap
) {
1080 if (prev
->offset
+ prev
->bytes
> offset
)
1085 if (entry
->offset
+ BITS_PER_BITMAP
* ctl
->unit
> offset
)
1087 } else if (entry
->offset
+ entry
->bytes
> offset
)
1094 if (entry
->bitmap
) {
1095 if (entry
->offset
+ BITS_PER_BITMAP
*
1099 if (entry
->offset
+ entry
->bytes
> offset
)
1103 n
= rb_next(&entry
->offset_index
);
1106 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1112 __unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
1113 struct btrfs_free_space
*info
)
1115 rb_erase(&info
->offset_index
, &ctl
->free_space_offset
);
1116 ctl
->free_extents
--;
1119 static void unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
1120 struct btrfs_free_space
*info
)
1122 __unlink_free_space(ctl
, info
);
1123 ctl
->free_space
-= info
->bytes
;
1126 static int link_free_space(struct btrfs_free_space_ctl
*ctl
,
1127 struct btrfs_free_space
*info
)
1131 BUG_ON(!info
->bitmap
&& !info
->bytes
);
1132 ret
= tree_insert_offset(&ctl
->free_space_offset
, info
->offset
,
1133 &info
->offset_index
, (info
->bitmap
!= NULL
));
1137 ctl
->free_space
+= info
->bytes
;
1138 ctl
->free_extents
++;
1142 static void recalculate_thresholds(struct btrfs_free_space_ctl
*ctl
)
1144 struct btrfs_block_group_cache
*block_group
= ctl
->private;
1148 u64 size
= block_group
->key
.offset
;
1149 u64 bytes_per_bg
= BITS_PER_BITMAP
* block_group
->sectorsize
;
1150 int max_bitmaps
= div64_u64(size
+ bytes_per_bg
- 1, bytes_per_bg
);
1152 BUG_ON(ctl
->total_bitmaps
> max_bitmaps
);
1155 * The goal is to keep the total amount of memory used per 1gb of space
1156 * at or below 32k, so we need to adjust how much memory we allow to be
1157 * used by extent based free space tracking
1159 if (size
< 1024 * 1024 * 1024)
1160 max_bytes
= MAX_CACHE_BYTES_PER_GIG
;
1162 max_bytes
= MAX_CACHE_BYTES_PER_GIG
*
1163 div64_u64(size
, 1024 * 1024 * 1024);
1166 * we want to account for 1 more bitmap than what we have so we can make
1167 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1168 * we add more bitmaps.
1170 bitmap_bytes
= (ctl
->total_bitmaps
+ 1) * PAGE_CACHE_SIZE
;
1172 if (bitmap_bytes
>= max_bytes
) {
1173 ctl
->extents_thresh
= 0;
1178 * we want the extent entry threshold to always be at most 1/2 the maxw
1179 * bytes we can have, or whatever is less than that.
1181 extent_bytes
= max_bytes
- bitmap_bytes
;
1182 extent_bytes
= min_t(u64
, extent_bytes
, div64_u64(max_bytes
, 2));
1184 ctl
->extents_thresh
=
1185 div64_u64(extent_bytes
, (sizeof(struct btrfs_free_space
)));
1188 static void bitmap_clear_bits(struct btrfs_free_space_ctl
*ctl
,
1189 struct btrfs_free_space
*info
, u64 offset
,
1192 unsigned long start
, count
;
1194 start
= offset_to_bit(info
->offset
, ctl
->unit
, offset
);
1195 count
= bytes_to_bits(bytes
, ctl
->unit
);
1196 BUG_ON(start
+ count
> BITS_PER_BITMAP
);
1198 bitmap_clear(info
->bitmap
, start
, count
);
1200 info
->bytes
-= bytes
;
1201 ctl
->free_space
-= bytes
;
1204 static void bitmap_set_bits(struct btrfs_free_space_ctl
*ctl
,
1205 struct btrfs_free_space
*info
, u64 offset
,
1208 unsigned long start
, count
;
1210 start
= offset_to_bit(info
->offset
, ctl
->unit
, offset
);
1211 count
= bytes_to_bits(bytes
, ctl
->unit
);
1212 BUG_ON(start
+ count
> BITS_PER_BITMAP
);
1214 bitmap_set(info
->bitmap
, start
, count
);
1216 info
->bytes
+= bytes
;
1217 ctl
->free_space
+= bytes
;
1220 static int search_bitmap(struct btrfs_free_space_ctl
*ctl
,
1221 struct btrfs_free_space
*bitmap_info
, u64
*offset
,
1224 unsigned long found_bits
= 0;
1225 unsigned long bits
, i
;
1226 unsigned long next_zero
;
1228 i
= offset_to_bit(bitmap_info
->offset
, ctl
->unit
,
1229 max_t(u64
, *offset
, bitmap_info
->offset
));
1230 bits
= bytes_to_bits(*bytes
, ctl
->unit
);
1232 for (i
= find_next_bit(bitmap_info
->bitmap
, BITS_PER_BITMAP
, i
);
1233 i
< BITS_PER_BITMAP
;
1234 i
= find_next_bit(bitmap_info
->bitmap
, BITS_PER_BITMAP
, i
+ 1)) {
1235 next_zero
= find_next_zero_bit(bitmap_info
->bitmap
,
1236 BITS_PER_BITMAP
, i
);
1237 if ((next_zero
- i
) >= bits
) {
1238 found_bits
= next_zero
- i
;
1245 *offset
= (u64
)(i
* ctl
->unit
) + bitmap_info
->offset
;
1246 *bytes
= (u64
)(found_bits
) * ctl
->unit
;
1253 static struct btrfs_free_space
*
1254 find_free_space(struct btrfs_free_space_ctl
*ctl
, u64
*offset
, u64
*bytes
)
1256 struct btrfs_free_space
*entry
;
1257 struct rb_node
*node
;
1260 if (!ctl
->free_space_offset
.rb_node
)
1263 entry
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, *offset
), 0, 1);
1267 for (node
= &entry
->offset_index
; node
; node
= rb_next(node
)) {
1268 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
1269 if (entry
->bytes
< *bytes
)
1272 if (entry
->bitmap
) {
1273 ret
= search_bitmap(ctl
, entry
, offset
, bytes
);
1279 *offset
= entry
->offset
;
1280 *bytes
= entry
->bytes
;
1287 static void add_new_bitmap(struct btrfs_free_space_ctl
*ctl
,
1288 struct btrfs_free_space
*info
, u64 offset
)
1290 info
->offset
= offset_to_bitmap(ctl
, offset
);
1292 link_free_space(ctl
, info
);
1293 ctl
->total_bitmaps
++;
1295 ctl
->op
->recalc_thresholds(ctl
);
1298 static void free_bitmap(struct btrfs_free_space_ctl
*ctl
,
1299 struct btrfs_free_space
*bitmap_info
)
1301 unlink_free_space(ctl
, bitmap_info
);
1302 kfree(bitmap_info
->bitmap
);
1303 kmem_cache_free(btrfs_free_space_cachep
, bitmap_info
);
1304 ctl
->total_bitmaps
--;
1305 ctl
->op
->recalc_thresholds(ctl
);
1308 static noinline
int remove_from_bitmap(struct btrfs_free_space_ctl
*ctl
,
1309 struct btrfs_free_space
*bitmap_info
,
1310 u64
*offset
, u64
*bytes
)
1313 u64 search_start
, search_bytes
;
1317 end
= bitmap_info
->offset
+ (u64
)(BITS_PER_BITMAP
* ctl
->unit
) - 1;
1320 * XXX - this can go away after a few releases.
1322 * since the only user of btrfs_remove_free_space is the tree logging
1323 * stuff, and the only way to test that is under crash conditions, we
1324 * want to have this debug stuff here just in case somethings not
1325 * working. Search the bitmap for the space we are trying to use to
1326 * make sure its actually there. If its not there then we need to stop
1327 * because something has gone wrong.
1329 search_start
= *offset
;
1330 search_bytes
= *bytes
;
1331 search_bytes
= min(search_bytes
, end
- search_start
+ 1);
1332 ret
= search_bitmap(ctl
, bitmap_info
, &search_start
, &search_bytes
);
1333 BUG_ON(ret
< 0 || search_start
!= *offset
);
1335 if (*offset
> bitmap_info
->offset
&& *offset
+ *bytes
> end
) {
1336 bitmap_clear_bits(ctl
, bitmap_info
, *offset
, end
- *offset
+ 1);
1337 *bytes
-= end
- *offset
+ 1;
1339 } else if (*offset
>= bitmap_info
->offset
&& *offset
+ *bytes
<= end
) {
1340 bitmap_clear_bits(ctl
, bitmap_info
, *offset
, *bytes
);
1345 struct rb_node
*next
= rb_next(&bitmap_info
->offset_index
);
1346 if (!bitmap_info
->bytes
)
1347 free_bitmap(ctl
, bitmap_info
);
1350 * no entry after this bitmap, but we still have bytes to
1351 * remove, so something has gone wrong.
1356 bitmap_info
= rb_entry(next
, struct btrfs_free_space
,
1360 * if the next entry isn't a bitmap we need to return to let the
1361 * extent stuff do its work.
1363 if (!bitmap_info
->bitmap
)
1367 * Ok the next item is a bitmap, but it may not actually hold
1368 * the information for the rest of this free space stuff, so
1369 * look for it, and if we don't find it return so we can try
1370 * everything over again.
1372 search_start
= *offset
;
1373 search_bytes
= *bytes
;
1374 ret
= search_bitmap(ctl
, bitmap_info
, &search_start
,
1376 if (ret
< 0 || search_start
!= *offset
)
1380 } else if (!bitmap_info
->bytes
)
1381 free_bitmap(ctl
, bitmap_info
);
1386 static bool use_bitmap(struct btrfs_free_space_ctl
*ctl
,
1387 struct btrfs_free_space
*info
)
1389 struct btrfs_block_group_cache
*block_group
= ctl
->private;
1392 * If we are below the extents threshold then we can add this as an
1393 * extent, and don't have to deal with the bitmap
1395 if (ctl
->free_extents
< ctl
->extents_thresh
) {
1397 * If this block group has some small extents we don't want to
1398 * use up all of our free slots in the cache with them, we want
1399 * to reserve them to larger extents, however if we have plent
1400 * of cache left then go ahead an dadd them, no sense in adding
1401 * the overhead of a bitmap if we don't have to.
1403 if (info
->bytes
<= block_group
->sectorsize
* 4) {
1404 if (ctl
->free_extents
* 2 <= ctl
->extents_thresh
)
1412 * some block groups are so tiny they can't be enveloped by a bitmap, so
1413 * don't even bother to create a bitmap for this
1415 if (BITS_PER_BITMAP
* block_group
->sectorsize
>
1416 block_group
->key
.offset
)
1422 static int insert_into_bitmap(struct btrfs_free_space_ctl
*ctl
,
1423 struct btrfs_free_space
*info
)
1425 struct btrfs_free_space
*bitmap_info
;
1427 u64 bytes
, offset
, end
;
1430 bytes
= info
->bytes
;
1431 offset
= info
->offset
;
1433 if (!ctl
->op
->use_bitmap(ctl
, info
))
1437 bitmap_info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
1444 end
= bitmap_info
->offset
+ (u64
)(BITS_PER_BITMAP
* ctl
->unit
);
1446 if (offset
>= bitmap_info
->offset
&& offset
+ bytes
> end
) {
1447 bitmap_set_bits(ctl
, bitmap_info
, offset
, end
- offset
);
1448 bytes
-= end
- offset
;
1451 } else if (offset
>= bitmap_info
->offset
&& offset
+ bytes
<= end
) {
1452 bitmap_set_bits(ctl
, bitmap_info
, offset
, bytes
);
1465 if (info
&& info
->bitmap
) {
1466 add_new_bitmap(ctl
, info
, offset
);
1471 spin_unlock(&ctl
->tree_lock
);
1473 /* no pre-allocated info, allocate a new one */
1475 info
= kmem_cache_zalloc(btrfs_free_space_cachep
,
1478 spin_lock(&ctl
->tree_lock
);
1484 /* allocate the bitmap */
1485 info
->bitmap
= kzalloc(PAGE_CACHE_SIZE
, GFP_NOFS
);
1486 spin_lock(&ctl
->tree_lock
);
1487 if (!info
->bitmap
) {
1497 kfree(info
->bitmap
);
1498 kmem_cache_free(btrfs_free_space_cachep
, info
);
1504 bool try_merge_free_space(struct btrfs_free_space_ctl
*ctl
,
1505 struct btrfs_free_space
*info
, bool update_stat
)
1507 struct btrfs_free_space
*left_info
;
1508 struct btrfs_free_space
*right_info
;
1509 bool merged
= false;
1510 u64 offset
= info
->offset
;
1511 u64 bytes
= info
->bytes
;
1514 * first we want to see if there is free space adjacent to the range we
1515 * are adding, if there is remove that struct and add a new one to
1516 * cover the entire range
1518 right_info
= tree_search_offset(ctl
, offset
+ bytes
, 0, 0);
1519 if (right_info
&& rb_prev(&right_info
->offset_index
))
1520 left_info
= rb_entry(rb_prev(&right_info
->offset_index
),
1521 struct btrfs_free_space
, offset_index
);
1523 left_info
= tree_search_offset(ctl
, offset
- 1, 0, 0);
1525 if (right_info
&& !right_info
->bitmap
) {
1527 unlink_free_space(ctl
, right_info
);
1529 __unlink_free_space(ctl
, right_info
);
1530 info
->bytes
+= right_info
->bytes
;
1531 kmem_cache_free(btrfs_free_space_cachep
, right_info
);
1535 if (left_info
&& !left_info
->bitmap
&&
1536 left_info
->offset
+ left_info
->bytes
== offset
) {
1538 unlink_free_space(ctl
, left_info
);
1540 __unlink_free_space(ctl
, left_info
);
1541 info
->offset
= left_info
->offset
;
1542 info
->bytes
+= left_info
->bytes
;
1543 kmem_cache_free(btrfs_free_space_cachep
, left_info
);
1550 int __btrfs_add_free_space(struct btrfs_free_space_ctl
*ctl
,
1551 u64 offset
, u64 bytes
)
1553 struct btrfs_free_space
*info
;
1556 info
= kmem_cache_zalloc(btrfs_free_space_cachep
, GFP_NOFS
);
1560 info
->offset
= offset
;
1561 info
->bytes
= bytes
;
1563 spin_lock(&ctl
->tree_lock
);
1565 if (try_merge_free_space(ctl
, info
, true))
1569 * There was no extent directly to the left or right of this new
1570 * extent then we know we're going to have to allocate a new extent, so
1571 * before we do that see if we need to drop this into a bitmap
1573 ret
= insert_into_bitmap(ctl
, info
);
1581 ret
= link_free_space(ctl
, info
);
1583 kmem_cache_free(btrfs_free_space_cachep
, info
);
1585 spin_unlock(&ctl
->tree_lock
);
1588 printk(KERN_CRIT
"btrfs: unable to add free space :%d\n", ret
);
1589 BUG_ON(ret
== -EEXIST
);
1595 int btrfs_remove_free_space(struct btrfs_block_group_cache
*block_group
,
1596 u64 offset
, u64 bytes
)
1598 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1599 struct btrfs_free_space
*info
;
1600 struct btrfs_free_space
*next_info
= NULL
;
1603 spin_lock(&ctl
->tree_lock
);
1606 info
= tree_search_offset(ctl
, offset
, 0, 0);
1609 * oops didn't find an extent that matched the space we wanted
1610 * to remove, look for a bitmap instead
1612 info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
1620 if (info
->bytes
< bytes
&& rb_next(&info
->offset_index
)) {
1622 next_info
= rb_entry(rb_next(&info
->offset_index
),
1623 struct btrfs_free_space
,
1626 if (next_info
->bitmap
)
1627 end
= next_info
->offset
+
1628 BITS_PER_BITMAP
* ctl
->unit
- 1;
1630 end
= next_info
->offset
+ next_info
->bytes
;
1632 if (next_info
->bytes
< bytes
||
1633 next_info
->offset
> offset
|| offset
> end
) {
1634 printk(KERN_CRIT
"Found free space at %llu, size %llu,"
1635 " trying to use %llu\n",
1636 (unsigned long long)info
->offset
,
1637 (unsigned long long)info
->bytes
,
1638 (unsigned long long)bytes
);
1647 if (info
->bytes
== bytes
) {
1648 unlink_free_space(ctl
, info
);
1650 kfree(info
->bitmap
);
1651 ctl
->total_bitmaps
--;
1653 kmem_cache_free(btrfs_free_space_cachep
, info
);
1657 if (!info
->bitmap
&& info
->offset
== offset
) {
1658 unlink_free_space(ctl
, info
);
1659 info
->offset
+= bytes
;
1660 info
->bytes
-= bytes
;
1661 link_free_space(ctl
, info
);
1665 if (!info
->bitmap
&& info
->offset
<= offset
&&
1666 info
->offset
+ info
->bytes
>= offset
+ bytes
) {
1667 u64 old_start
= info
->offset
;
1669 * we're freeing space in the middle of the info,
1670 * this can happen during tree log replay
1672 * first unlink the old info and then
1673 * insert it again after the hole we're creating
1675 unlink_free_space(ctl
, info
);
1676 if (offset
+ bytes
< info
->offset
+ info
->bytes
) {
1677 u64 old_end
= info
->offset
+ info
->bytes
;
1679 info
->offset
= offset
+ bytes
;
1680 info
->bytes
= old_end
- info
->offset
;
1681 ret
= link_free_space(ctl
, info
);
1686 /* the hole we're creating ends at the end
1687 * of the info struct, just free the info
1689 kmem_cache_free(btrfs_free_space_cachep
, info
);
1691 spin_unlock(&ctl
->tree_lock
);
1693 /* step two, insert a new info struct to cover
1694 * anything before the hole
1696 ret
= btrfs_add_free_space(block_group
, old_start
,
1697 offset
- old_start
);
1702 ret
= remove_from_bitmap(ctl
, info
, &offset
, &bytes
);
1707 spin_unlock(&ctl
->tree_lock
);
1712 void btrfs_dump_free_space(struct btrfs_block_group_cache
*block_group
,
1715 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1716 struct btrfs_free_space
*info
;
1720 for (n
= rb_first(&ctl
->free_space_offset
); n
; n
= rb_next(n
)) {
1721 info
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1722 if (info
->bytes
>= bytes
)
1724 printk(KERN_CRIT
"entry offset %llu, bytes %llu, bitmap %s\n",
1725 (unsigned long long)info
->offset
,
1726 (unsigned long long)info
->bytes
,
1727 (info
->bitmap
) ? "yes" : "no");
1729 printk(KERN_INFO
"block group has cluster?: %s\n",
1730 list_empty(&block_group
->cluster_list
) ? "no" : "yes");
1731 printk(KERN_INFO
"%d blocks of free space at or bigger than bytes is"
1735 static struct btrfs_free_space_op free_space_op
= {
1736 .recalc_thresholds
= recalculate_thresholds
,
1737 .use_bitmap
= use_bitmap
,
1740 void btrfs_init_free_space_ctl(struct btrfs_block_group_cache
*block_group
)
1742 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1744 spin_lock_init(&ctl
->tree_lock
);
1745 ctl
->unit
= block_group
->sectorsize
;
1746 ctl
->start
= block_group
->key
.objectid
;
1747 ctl
->private = block_group
;
1748 ctl
->op
= &free_space_op
;
1751 * we only want to have 32k of ram per block group for keeping
1752 * track of free space, and if we pass 1/2 of that we want to
1753 * start converting things over to using bitmaps
1755 ctl
->extents_thresh
= ((1024 * 32) / 2) /
1756 sizeof(struct btrfs_free_space
);
1760 * for a given cluster, put all of its extents back into the free
1761 * space cache. If the block group passed doesn't match the block group
1762 * pointed to by the cluster, someone else raced in and freed the
1763 * cluster already. In that case, we just return without changing anything
1766 __btrfs_return_cluster_to_free_space(
1767 struct btrfs_block_group_cache
*block_group
,
1768 struct btrfs_free_cluster
*cluster
)
1770 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1771 struct btrfs_free_space
*entry
;
1772 struct rb_node
*node
;
1774 spin_lock(&cluster
->lock
);
1775 if (cluster
->block_group
!= block_group
)
1778 cluster
->block_group
= NULL
;
1779 cluster
->window_start
= 0;
1780 list_del_init(&cluster
->block_group_list
);
1782 node
= rb_first(&cluster
->root
);
1786 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
1787 node
= rb_next(&entry
->offset_index
);
1788 rb_erase(&entry
->offset_index
, &cluster
->root
);
1790 bitmap
= (entry
->bitmap
!= NULL
);
1792 try_merge_free_space(ctl
, entry
, false);
1793 tree_insert_offset(&ctl
->free_space_offset
,
1794 entry
->offset
, &entry
->offset_index
, bitmap
);
1796 cluster
->root
= RB_ROOT
;
1799 spin_unlock(&cluster
->lock
);
1800 btrfs_put_block_group(block_group
);
1804 void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl
*ctl
)
1806 struct btrfs_free_space
*info
;
1807 struct rb_node
*node
;
1809 spin_lock(&ctl
->tree_lock
);
1810 while ((node
= rb_last(&ctl
->free_space_offset
)) != NULL
) {
1811 info
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
1812 unlink_free_space(ctl
, info
);
1813 kfree(info
->bitmap
);
1814 kmem_cache_free(btrfs_free_space_cachep
, info
);
1815 if (need_resched()) {
1816 spin_unlock(&ctl
->tree_lock
);
1818 spin_lock(&ctl
->tree_lock
);
1821 spin_unlock(&ctl
->tree_lock
);
1824 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
*block_group
)
1826 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1827 struct btrfs_free_cluster
*cluster
;
1828 struct list_head
*head
;
1830 spin_lock(&ctl
->tree_lock
);
1831 while ((head
= block_group
->cluster_list
.next
) !=
1832 &block_group
->cluster_list
) {
1833 cluster
= list_entry(head
, struct btrfs_free_cluster
,
1836 WARN_ON(cluster
->block_group
!= block_group
);
1837 __btrfs_return_cluster_to_free_space(block_group
, cluster
);
1838 if (need_resched()) {
1839 spin_unlock(&ctl
->tree_lock
);
1841 spin_lock(&ctl
->tree_lock
);
1844 spin_unlock(&ctl
->tree_lock
);
1846 __btrfs_remove_free_space_cache(ctl
);
1849 u64
btrfs_find_space_for_alloc(struct btrfs_block_group_cache
*block_group
,
1850 u64 offset
, u64 bytes
, u64 empty_size
)
1852 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1853 struct btrfs_free_space
*entry
= NULL
;
1854 u64 bytes_search
= bytes
+ empty_size
;
1857 spin_lock(&ctl
->tree_lock
);
1858 entry
= find_free_space(ctl
, &offset
, &bytes_search
);
1863 if (entry
->bitmap
) {
1864 bitmap_clear_bits(ctl
, entry
, offset
, bytes
);
1866 free_bitmap(ctl
, entry
);
1868 unlink_free_space(ctl
, entry
);
1869 entry
->offset
+= bytes
;
1870 entry
->bytes
-= bytes
;
1872 kmem_cache_free(btrfs_free_space_cachep
, entry
);
1874 link_free_space(ctl
, entry
);
1878 spin_unlock(&ctl
->tree_lock
);
1884 * given a cluster, put all of its extents back into the free space
1885 * cache. If a block group is passed, this function will only free
1886 * a cluster that belongs to the passed block group.
1888 * Otherwise, it'll get a reference on the block group pointed to by the
1889 * cluster and remove the cluster from it.
1891 int btrfs_return_cluster_to_free_space(
1892 struct btrfs_block_group_cache
*block_group
,
1893 struct btrfs_free_cluster
*cluster
)
1895 struct btrfs_free_space_ctl
*ctl
;
1898 /* first, get a safe pointer to the block group */
1899 spin_lock(&cluster
->lock
);
1901 block_group
= cluster
->block_group
;
1903 spin_unlock(&cluster
->lock
);
1906 } else if (cluster
->block_group
!= block_group
) {
1907 /* someone else has already freed it don't redo their work */
1908 spin_unlock(&cluster
->lock
);
1911 atomic_inc(&block_group
->count
);
1912 spin_unlock(&cluster
->lock
);
1914 ctl
= block_group
->free_space_ctl
;
1916 /* now return any extents the cluster had on it */
1917 spin_lock(&ctl
->tree_lock
);
1918 ret
= __btrfs_return_cluster_to_free_space(block_group
, cluster
);
1919 spin_unlock(&ctl
->tree_lock
);
1921 /* finally drop our ref */
1922 btrfs_put_block_group(block_group
);
1926 static u64
btrfs_alloc_from_bitmap(struct btrfs_block_group_cache
*block_group
,
1927 struct btrfs_free_cluster
*cluster
,
1928 struct btrfs_free_space
*entry
,
1929 u64 bytes
, u64 min_start
)
1931 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1933 u64 search_start
= cluster
->window_start
;
1934 u64 search_bytes
= bytes
;
1937 search_start
= min_start
;
1938 search_bytes
= bytes
;
1940 err
= search_bitmap(ctl
, entry
, &search_start
, &search_bytes
);
1945 bitmap_clear_bits(ctl
, entry
, ret
, bytes
);
1951 * given a cluster, try to allocate 'bytes' from it, returns 0
1952 * if it couldn't find anything suitably large, or a logical disk offset
1953 * if things worked out
1955 u64
btrfs_alloc_from_cluster(struct btrfs_block_group_cache
*block_group
,
1956 struct btrfs_free_cluster
*cluster
, u64 bytes
,
1959 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1960 struct btrfs_free_space
*entry
= NULL
;
1961 struct rb_node
*node
;
1964 spin_lock(&cluster
->lock
);
1965 if (bytes
> cluster
->max_size
)
1968 if (cluster
->block_group
!= block_group
)
1971 node
= rb_first(&cluster
->root
);
1975 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
1977 if (entry
->bytes
< bytes
||
1978 (!entry
->bitmap
&& entry
->offset
< min_start
)) {
1979 struct rb_node
*node
;
1981 node
= rb_next(&entry
->offset_index
);
1984 entry
= rb_entry(node
, struct btrfs_free_space
,
1989 if (entry
->bitmap
) {
1990 ret
= btrfs_alloc_from_bitmap(block_group
,
1991 cluster
, entry
, bytes
,
1994 struct rb_node
*node
;
1995 node
= rb_next(&entry
->offset_index
);
1998 entry
= rb_entry(node
, struct btrfs_free_space
,
2004 ret
= entry
->offset
;
2006 entry
->offset
+= bytes
;
2007 entry
->bytes
-= bytes
;
2010 if (entry
->bytes
== 0)
2011 rb_erase(&entry
->offset_index
, &cluster
->root
);
2015 spin_unlock(&cluster
->lock
);
2020 spin_lock(&ctl
->tree_lock
);
2022 ctl
->free_space
-= bytes
;
2023 if (entry
->bytes
== 0) {
2024 ctl
->free_extents
--;
2025 if (entry
->bitmap
) {
2026 kfree(entry
->bitmap
);
2027 ctl
->total_bitmaps
--;
2028 ctl
->op
->recalc_thresholds(ctl
);
2030 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2033 spin_unlock(&ctl
->tree_lock
);
2038 static int btrfs_bitmap_cluster(struct btrfs_block_group_cache
*block_group
,
2039 struct btrfs_free_space
*entry
,
2040 struct btrfs_free_cluster
*cluster
,
2041 u64 offset
, u64 bytes
, u64 min_bytes
)
2043 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2044 unsigned long next_zero
;
2046 unsigned long search_bits
;
2047 unsigned long total_bits
;
2048 unsigned long found_bits
;
2049 unsigned long start
= 0;
2050 unsigned long total_found
= 0;
2054 i
= offset_to_bit(entry
->offset
, block_group
->sectorsize
,
2055 max_t(u64
, offset
, entry
->offset
));
2056 search_bits
= bytes_to_bits(bytes
, block_group
->sectorsize
);
2057 total_bits
= bytes_to_bits(min_bytes
, block_group
->sectorsize
);
2061 for (i
= find_next_bit(entry
->bitmap
, BITS_PER_BITMAP
, i
);
2062 i
< BITS_PER_BITMAP
;
2063 i
= find_next_bit(entry
->bitmap
, BITS_PER_BITMAP
, i
+ 1)) {
2064 next_zero
= find_next_zero_bit(entry
->bitmap
,
2065 BITS_PER_BITMAP
, i
);
2066 if (next_zero
- i
>= search_bits
) {
2067 found_bits
= next_zero
- i
;
2081 total_found
+= found_bits
;
2083 if (cluster
->max_size
< found_bits
* block_group
->sectorsize
)
2084 cluster
->max_size
= found_bits
* block_group
->sectorsize
;
2086 if (total_found
< total_bits
) {
2087 i
= find_next_bit(entry
->bitmap
, BITS_PER_BITMAP
, next_zero
);
2088 if (i
- start
> total_bits
* 2) {
2090 cluster
->max_size
= 0;
2096 cluster
->window_start
= start
* block_group
->sectorsize
+
2098 rb_erase(&entry
->offset_index
, &ctl
->free_space_offset
);
2099 ret
= tree_insert_offset(&cluster
->root
, entry
->offset
,
2100 &entry
->offset_index
, 1);
2107 * This searches the block group for just extents to fill the cluster with.
2109 static int setup_cluster_no_bitmap(struct btrfs_block_group_cache
*block_group
,
2110 struct btrfs_free_cluster
*cluster
,
2111 u64 offset
, u64 bytes
, u64 min_bytes
)
2113 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2114 struct btrfs_free_space
*first
= NULL
;
2115 struct btrfs_free_space
*entry
= NULL
;
2116 struct btrfs_free_space
*prev
= NULL
;
2117 struct btrfs_free_space
*last
;
2118 struct rb_node
*node
;
2122 u64 max_gap
= 128 * 1024;
2124 entry
= tree_search_offset(ctl
, offset
, 0, 1);
2129 * We don't want bitmaps, so just move along until we find a normal
2132 while (entry
->bitmap
) {
2133 node
= rb_next(&entry
->offset_index
);
2136 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2139 window_start
= entry
->offset
;
2140 window_free
= entry
->bytes
;
2141 max_extent
= entry
->bytes
;
2146 while (window_free
<= min_bytes
) {
2147 node
= rb_next(&entry
->offset_index
);
2150 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2155 * we haven't filled the empty size and the window is
2156 * very large. reset and try again
2158 if (entry
->offset
- (prev
->offset
+ prev
->bytes
) > max_gap
||
2159 entry
->offset
- window_start
> (min_bytes
* 2)) {
2161 window_start
= entry
->offset
;
2162 window_free
= entry
->bytes
;
2164 max_extent
= entry
->bytes
;
2167 window_free
+= entry
->bytes
;
2168 if (entry
->bytes
> max_extent
)
2169 max_extent
= entry
->bytes
;
2174 cluster
->window_start
= first
->offset
;
2176 node
= &first
->offset_index
;
2179 * now we've found our entries, pull them out of the free space
2180 * cache and put them into the cluster rbtree
2185 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2186 node
= rb_next(&entry
->offset_index
);
2190 rb_erase(&entry
->offset_index
, &ctl
->free_space_offset
);
2191 ret
= tree_insert_offset(&cluster
->root
, entry
->offset
,
2192 &entry
->offset_index
, 0);
2194 } while (node
&& entry
!= last
);
2196 cluster
->max_size
= max_extent
;
2202 * This specifically looks for bitmaps that may work in the cluster, we assume
2203 * that we have already failed to find extents that will work.
2205 static int setup_cluster_bitmap(struct btrfs_block_group_cache
*block_group
,
2206 struct btrfs_free_cluster
*cluster
,
2207 u64 offset
, u64 bytes
, u64 min_bytes
)
2209 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2210 struct btrfs_free_space
*entry
;
2211 struct rb_node
*node
;
2214 if (ctl
->total_bitmaps
== 0)
2217 entry
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
), 0, 1);
2221 node
= &entry
->offset_index
;
2223 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2224 node
= rb_next(&entry
->offset_index
);
2227 if (entry
->bytes
< min_bytes
)
2229 ret
= btrfs_bitmap_cluster(block_group
, entry
, cluster
, offset
,
2231 } while (ret
&& node
);
2237 * here we try to find a cluster of blocks in a block group. The goal
2238 * is to find at least bytes free and up to empty_size + bytes free.
2239 * We might not find them all in one contiguous area.
2241 * returns zero and sets up cluster if things worked out, otherwise
2242 * it returns -enospc
2244 int btrfs_find_space_cluster(struct btrfs_trans_handle
*trans
,
2245 struct btrfs_root
*root
,
2246 struct btrfs_block_group_cache
*block_group
,
2247 struct btrfs_free_cluster
*cluster
,
2248 u64 offset
, u64 bytes
, u64 empty_size
)
2250 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2254 /* for metadata, allow allocates with more holes */
2255 if (btrfs_test_opt(root
, SSD_SPREAD
)) {
2256 min_bytes
= bytes
+ empty_size
;
2257 } else if (block_group
->flags
& BTRFS_BLOCK_GROUP_METADATA
) {
2259 * we want to do larger allocations when we are
2260 * flushing out the delayed refs, it helps prevent
2261 * making more work as we go along.
2263 if (trans
->transaction
->delayed_refs
.flushing
)
2264 min_bytes
= max(bytes
, (bytes
+ empty_size
) >> 1);
2266 min_bytes
= max(bytes
, (bytes
+ empty_size
) >> 4);
2268 min_bytes
= max(bytes
, (bytes
+ empty_size
) >> 2);
2270 spin_lock(&ctl
->tree_lock
);
2273 * If we know we don't have enough space to make a cluster don't even
2274 * bother doing all the work to try and find one.
2276 if (ctl
->free_space
< min_bytes
) {
2277 spin_unlock(&ctl
->tree_lock
);
2281 spin_lock(&cluster
->lock
);
2283 /* someone already found a cluster, hooray */
2284 if (cluster
->block_group
) {
2289 ret
= setup_cluster_no_bitmap(block_group
, cluster
, offset
, bytes
,
2292 ret
= setup_cluster_bitmap(block_group
, cluster
, offset
,
2296 atomic_inc(&block_group
->count
);
2297 list_add_tail(&cluster
->block_group_list
,
2298 &block_group
->cluster_list
);
2299 cluster
->block_group
= block_group
;
2302 spin_unlock(&cluster
->lock
);
2303 spin_unlock(&ctl
->tree_lock
);
2309 * simple code to zero out a cluster
2311 void btrfs_init_free_cluster(struct btrfs_free_cluster
*cluster
)
2313 spin_lock_init(&cluster
->lock
);
2314 spin_lock_init(&cluster
->refill_lock
);
2315 cluster
->root
= RB_ROOT
;
2316 cluster
->max_size
= 0;
2317 INIT_LIST_HEAD(&cluster
->block_group_list
);
2318 cluster
->block_group
= NULL
;
2321 int btrfs_trim_block_group(struct btrfs_block_group_cache
*block_group
,
2322 u64
*trimmed
, u64 start
, u64 end
, u64 minlen
)
2324 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2325 struct btrfs_free_space
*entry
= NULL
;
2326 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
2328 u64 actually_trimmed
;
2333 while (start
< end
) {
2334 spin_lock(&ctl
->tree_lock
);
2336 if (ctl
->free_space
< minlen
) {
2337 spin_unlock(&ctl
->tree_lock
);
2341 entry
= tree_search_offset(ctl
, start
, 0, 1);
2343 entry
= tree_search_offset(ctl
,
2344 offset_to_bitmap(ctl
, start
),
2347 if (!entry
|| entry
->offset
>= end
) {
2348 spin_unlock(&ctl
->tree_lock
);
2352 if (entry
->bitmap
) {
2353 ret
= search_bitmap(ctl
, entry
, &start
, &bytes
);
2356 spin_unlock(&ctl
->tree_lock
);
2359 bytes
= min(bytes
, end
- start
);
2360 bitmap_clear_bits(ctl
, entry
, start
, bytes
);
2361 if (entry
->bytes
== 0)
2362 free_bitmap(ctl
, entry
);
2364 start
= entry
->offset
+ BITS_PER_BITMAP
*
2365 block_group
->sectorsize
;
2366 spin_unlock(&ctl
->tree_lock
);
2371 start
= entry
->offset
;
2372 bytes
= min(entry
->bytes
, end
- start
);
2373 unlink_free_space(ctl
, entry
);
2377 spin_unlock(&ctl
->tree_lock
);
2379 if (bytes
>= minlen
) {
2381 update_ret
= btrfs_update_reserved_bytes(block_group
,
2384 ret
= btrfs_error_discard_extent(fs_info
->extent_root
,
2389 btrfs_add_free_space(block_group
, start
, bytes
);
2391 btrfs_update_reserved_bytes(block_group
,
2396 *trimmed
+= actually_trimmed
;
2401 if (fatal_signal_pending(current
)) {
2413 * Find the left-most item in the cache tree, and then return the
2414 * smallest inode number in the item.
2416 * Note: the returned inode number may not be the smallest one in
2417 * the tree, if the left-most item is a bitmap.
2419 u64
btrfs_find_ino_for_alloc(struct btrfs_root
*fs_root
)
2421 struct btrfs_free_space_ctl
*ctl
= fs_root
->free_ino_ctl
;
2422 struct btrfs_free_space
*entry
= NULL
;
2425 spin_lock(&ctl
->tree_lock
);
2427 if (RB_EMPTY_ROOT(&ctl
->free_space_offset
))
2430 entry
= rb_entry(rb_first(&ctl
->free_space_offset
),
2431 struct btrfs_free_space
, offset_index
);
2433 if (!entry
->bitmap
) {
2434 ino
= entry
->offset
;
2436 unlink_free_space(ctl
, entry
);
2440 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2442 link_free_space(ctl
, entry
);
2448 ret
= search_bitmap(ctl
, entry
, &offset
, &count
);
2452 bitmap_clear_bits(ctl
, entry
, offset
, 1);
2453 if (entry
->bytes
== 0)
2454 free_bitmap(ctl
, entry
);
2457 spin_unlock(&ctl
->tree_lock
);