2 * Copyright (C) 2008 Red Hat. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/math64.h>
24 #include "free-space-cache.h"
25 #include "transaction.h"
27 #include "extent_io.h"
28 #include "inode-map.h"
30 #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
31 #define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
33 static int link_free_space(struct btrfs_free_space_ctl
*ctl
,
34 struct btrfs_free_space
*info
);
36 struct inode
*lookup_free_space_inode(struct btrfs_root
*root
,
37 struct btrfs_block_group_cache
38 *block_group
, struct btrfs_path
*path
)
41 struct btrfs_key location
;
42 struct btrfs_disk_key disk_key
;
43 struct btrfs_free_space_header
*header
;
44 struct extent_buffer
*leaf
;
45 struct inode
*inode
= NULL
;
48 spin_lock(&block_group
->lock
);
49 if (block_group
->inode
)
50 inode
= igrab(block_group
->inode
);
51 spin_unlock(&block_group
->lock
);
55 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
56 key
.offset
= block_group
->key
.objectid
;
59 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
63 btrfs_release_path(root
, path
);
64 return ERR_PTR(-ENOENT
);
67 leaf
= path
->nodes
[0];
68 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
69 struct btrfs_free_space_header
);
70 btrfs_free_space_key(leaf
, header
, &disk_key
);
71 btrfs_disk_key_to_cpu(&location
, &disk_key
);
72 btrfs_release_path(root
, path
);
74 inode
= btrfs_iget(root
->fs_info
->sb
, &location
, root
, NULL
);
76 return ERR_PTR(-ENOENT
);
79 if (is_bad_inode(inode
)) {
81 return ERR_PTR(-ENOENT
);
84 inode
->i_mapping
->flags
&= ~__GFP_FS
;
86 spin_lock(&block_group
->lock
);
87 if (!root
->fs_info
->closing
) {
88 block_group
->inode
= igrab(inode
);
89 block_group
->iref
= 1;
91 spin_unlock(&block_group
->lock
);
96 int create_free_space_inode(struct btrfs_root
*root
,
97 struct btrfs_trans_handle
*trans
,
98 struct btrfs_block_group_cache
*block_group
,
99 struct btrfs_path
*path
)
101 struct btrfs_key key
;
102 struct btrfs_disk_key disk_key
;
103 struct btrfs_free_space_header
*header
;
104 struct btrfs_inode_item
*inode_item
;
105 struct extent_buffer
*leaf
;
109 ret
= btrfs_find_free_objectid(root
, &objectid
);
113 ret
= btrfs_insert_empty_inode(trans
, root
, path
, objectid
);
117 leaf
= path
->nodes
[0];
118 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
119 struct btrfs_inode_item
);
120 btrfs_item_key(leaf
, &disk_key
, path
->slots
[0]);
121 memset_extent_buffer(leaf
, 0, (unsigned long)inode_item
,
122 sizeof(*inode_item
));
123 btrfs_set_inode_generation(leaf
, inode_item
, trans
->transid
);
124 btrfs_set_inode_size(leaf
, inode_item
, 0);
125 btrfs_set_inode_nbytes(leaf
, inode_item
, 0);
126 btrfs_set_inode_uid(leaf
, inode_item
, 0);
127 btrfs_set_inode_gid(leaf
, inode_item
, 0);
128 btrfs_set_inode_mode(leaf
, inode_item
, S_IFREG
| 0600);
129 btrfs_set_inode_flags(leaf
, inode_item
, BTRFS_INODE_NOCOMPRESS
|
130 BTRFS_INODE_PREALLOC
| BTRFS_INODE_NODATASUM
);
131 btrfs_set_inode_nlink(leaf
, inode_item
, 1);
132 btrfs_set_inode_transid(leaf
, inode_item
, trans
->transid
);
133 btrfs_set_inode_block_group(leaf
, inode_item
,
134 block_group
->key
.objectid
);
135 btrfs_mark_buffer_dirty(leaf
);
136 btrfs_release_path(root
, path
);
138 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
139 key
.offset
= block_group
->key
.objectid
;
142 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
143 sizeof(struct btrfs_free_space_header
));
145 btrfs_release_path(root
, path
);
148 leaf
= path
->nodes
[0];
149 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
150 struct btrfs_free_space_header
);
151 memset_extent_buffer(leaf
, 0, (unsigned long)header
, sizeof(*header
));
152 btrfs_set_free_space_key(leaf
, header
, &disk_key
);
153 btrfs_mark_buffer_dirty(leaf
);
154 btrfs_release_path(root
, path
);
159 int btrfs_truncate_free_space_cache(struct btrfs_root
*root
,
160 struct btrfs_trans_handle
*trans
,
161 struct btrfs_path
*path
,
167 trans
->block_rsv
= root
->orphan_block_rsv
;
168 ret
= btrfs_block_rsv_check(trans
, root
,
169 root
->orphan_block_rsv
,
174 oldsize
= i_size_read(inode
);
175 btrfs_i_size_write(inode
, 0);
176 truncate_pagecache(inode
, oldsize
, 0);
179 * We don't need an orphan item because truncating the free space cache
180 * will never be split across transactions.
182 ret
= btrfs_truncate_inode_items(trans
, root
, inode
,
183 0, BTRFS_EXTENT_DATA_KEY
);
189 return btrfs_update_inode(trans
, root
, inode
);
192 static int readahead_cache(struct inode
*inode
)
194 struct file_ra_state
*ra
;
195 unsigned long last_index
;
197 ra
= kzalloc(sizeof(*ra
), GFP_NOFS
);
201 file_ra_state_init(ra
, inode
->i_mapping
);
202 last_index
= (i_size_read(inode
) - 1) >> PAGE_CACHE_SHIFT
;
204 page_cache_sync_readahead(inode
->i_mapping
, ra
, NULL
, 0, last_index
);
211 int load_free_space_cache(struct btrfs_fs_info
*fs_info
,
212 struct btrfs_block_group_cache
*block_group
)
214 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
215 struct btrfs_root
*root
= fs_info
->tree_root
;
217 struct btrfs_free_space_header
*header
;
218 struct extent_buffer
*leaf
;
220 struct btrfs_path
*path
;
221 u32
*checksums
= NULL
, *crc
;
222 char *disk_crcs
= NULL
;
223 struct btrfs_key key
;
224 struct list_head bitmaps
;
228 u64 used
= btrfs_block_group_used(&block_group
->item
);
229 u32 cur_crc
= ~(u32
)0;
231 unsigned long first_page_offset
;
236 * If we're unmounting then just return, since this does a search on the
237 * normal root and not the commit root and we could deadlock.
240 if (fs_info
->closing
)
244 * If this block group has been marked to be cleared for one reason or
245 * another then we can't trust the on disk cache, so just return.
247 spin_lock(&block_group
->lock
);
248 if (block_group
->disk_cache_state
!= BTRFS_DC_WRITTEN
) {
249 spin_unlock(&block_group
->lock
);
252 spin_unlock(&block_group
->lock
);
254 INIT_LIST_HEAD(&bitmaps
);
256 path
= btrfs_alloc_path();
260 inode
= lookup_free_space_inode(root
, block_group
, path
);
262 btrfs_free_path(path
);
266 /* Nothing in the space cache, goodbye */
267 if (!i_size_read(inode
)) {
268 btrfs_free_path(path
);
272 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
273 key
.offset
= block_group
->key
.objectid
;
276 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
278 btrfs_free_path(path
);
282 leaf
= path
->nodes
[0];
283 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
284 struct btrfs_free_space_header
);
285 num_entries
= btrfs_free_space_entries(leaf
, header
);
286 num_bitmaps
= btrfs_free_space_bitmaps(leaf
, header
);
287 generation
= btrfs_free_space_generation(leaf
, header
);
288 btrfs_free_path(path
);
290 if (BTRFS_I(inode
)->generation
!= generation
) {
291 printk(KERN_ERR
"btrfs: free space inode generation (%llu) did"
292 " not match free space cache generation (%llu) for "
293 "block group %llu\n",
294 (unsigned long long)BTRFS_I(inode
)->generation
,
295 (unsigned long long)generation
,
296 (unsigned long long)block_group
->key
.objectid
);
303 /* Setup everything for doing checksumming */
304 num_checksums
= i_size_read(inode
) / PAGE_CACHE_SIZE
;
305 checksums
= crc
= kzalloc(sizeof(u32
) * num_checksums
, GFP_NOFS
);
308 first_page_offset
= (sizeof(u32
) * num_checksums
) + sizeof(u64
);
309 disk_crcs
= kzalloc(first_page_offset
, GFP_NOFS
);
313 ret
= readahead_cache(inode
);
320 struct btrfs_free_space_entry
*entry
;
321 struct btrfs_free_space
*e
;
323 unsigned long offset
= 0;
324 unsigned long start_offset
= 0;
327 if (!num_entries
&& !num_bitmaps
)
331 start_offset
= first_page_offset
;
332 offset
= start_offset
;
335 page
= grab_cache_page(inode
->i_mapping
, index
);
341 if (!PageUptodate(page
)) {
342 btrfs_readpage(NULL
, page
);
344 if (!PageUptodate(page
)) {
346 page_cache_release(page
);
347 printk(KERN_ERR
"btrfs: error reading free "
348 "space cache: %llu\n",
350 block_group
->key
.objectid
);
359 memcpy(disk_crcs
, addr
, first_page_offset
);
360 gen
= addr
+ (sizeof(u32
) * num_checksums
);
361 if (*gen
!= BTRFS_I(inode
)->generation
) {
362 printk(KERN_ERR
"btrfs: space cache generation"
363 " (%llu) does not match inode (%llu) "
364 "for block group %llu\n",
365 (unsigned long long)*gen
,
367 BTRFS_I(inode
)->generation
,
369 block_group
->key
.objectid
);
372 page_cache_release(page
);
375 crc
= (u32
*)disk_crcs
;
377 entry
= addr
+ start_offset
;
379 /* First lets check our crc before we do anything fun */
381 cur_crc
= btrfs_csum_data(root
, addr
+ start_offset
, cur_crc
,
382 PAGE_CACHE_SIZE
- start_offset
);
383 btrfs_csum_final(cur_crc
, (char *)&cur_crc
);
384 if (cur_crc
!= *crc
) {
385 printk(KERN_ERR
"btrfs: crc mismatch for page %lu in "
386 "block group %llu\n", index
,
387 (unsigned long long)block_group
->key
.objectid
);
390 page_cache_release(page
);
400 e
= kmem_cache_zalloc(btrfs_free_space_cachep
,
405 page_cache_release(page
);
409 e
->offset
= le64_to_cpu(entry
->offset
);
410 e
->bytes
= le64_to_cpu(entry
->bytes
);
413 kmem_cache_free(btrfs_free_space_cachep
, e
);
415 page_cache_release(page
);
419 if (entry
->type
== BTRFS_FREE_SPACE_EXTENT
) {
420 spin_lock(&ctl
->tree_lock
);
421 ret
= link_free_space(ctl
, e
);
422 spin_unlock(&ctl
->tree_lock
);
425 e
->bitmap
= kzalloc(PAGE_CACHE_SIZE
, GFP_NOFS
);
429 btrfs_free_space_cachep
, e
);
431 page_cache_release(page
);
434 spin_lock(&ctl
->tree_lock
);
435 ret
= link_free_space(ctl
, e
);
436 ctl
->total_bitmaps
++;
437 ctl
->op
->recalc_thresholds(ctl
);
438 spin_unlock(&ctl
->tree_lock
);
439 list_add_tail(&e
->list
, &bitmaps
);
443 offset
+= sizeof(struct btrfs_free_space_entry
);
444 if (offset
+ sizeof(struct btrfs_free_space_entry
) >=
451 * We read an entry out of this page, we need to move on to the
460 * We add the bitmaps at the end of the entries in order that
461 * the bitmap entries are added to the cache.
463 e
= list_entry(bitmaps
.next
, struct btrfs_free_space
, list
);
464 list_del_init(&e
->list
);
465 memcpy(e
->bitmap
, addr
, PAGE_CACHE_SIZE
);
470 page_cache_release(page
);
474 spin_lock(&ctl
->tree_lock
);
475 if (ctl
->free_space
!= (block_group
->key
.offset
- used
-
476 block_group
->bytes_super
)) {
477 spin_unlock(&ctl
->tree_lock
);
478 printk(KERN_ERR
"block group %llu has an wrong amount of free "
479 "space\n", block_group
->key
.objectid
);
483 spin_unlock(&ctl
->tree_lock
);
493 /* This cache is bogus, make sure it gets cleared */
494 spin_lock(&block_group
->lock
);
495 block_group
->disk_cache_state
= BTRFS_DC_CLEAR
;
496 spin_unlock(&block_group
->lock
);
497 btrfs_remove_free_space_cache(block_group
);
501 int btrfs_write_out_cache(struct btrfs_root
*root
,
502 struct btrfs_trans_handle
*trans
,
503 struct btrfs_block_group_cache
*block_group
,
504 struct btrfs_path
*path
)
506 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
507 struct btrfs_free_space_header
*header
;
508 struct extent_buffer
*leaf
;
510 struct rb_node
*node
;
511 struct list_head
*pos
, *n
;
514 struct extent_state
*cached_state
= NULL
;
515 struct btrfs_free_cluster
*cluster
= NULL
;
516 struct extent_io_tree
*unpin
= NULL
;
517 struct list_head bitmap_list
;
518 struct btrfs_key key
;
521 u32
*crc
, *checksums
;
522 unsigned long first_page_offset
;
523 int index
= 0, num_pages
= 0;
527 bool next_page
= false;
528 bool out_of_space
= false;
530 root
= root
->fs_info
->tree_root
;
532 INIT_LIST_HEAD(&bitmap_list
);
534 spin_lock(&block_group
->lock
);
535 if (block_group
->disk_cache_state
< BTRFS_DC_SETUP
) {
536 spin_unlock(&block_group
->lock
);
539 spin_unlock(&block_group
->lock
);
541 inode
= lookup_free_space_inode(root
, block_group
, path
);
545 if (!i_size_read(inode
)) {
550 node
= rb_first(&ctl
->free_space_offset
);
556 num_pages
= (i_size_read(inode
) + PAGE_CACHE_SIZE
- 1) >>
558 filemap_write_and_wait(inode
->i_mapping
);
559 btrfs_wait_ordered_range(inode
, inode
->i_size
&
560 ~(root
->sectorsize
- 1), (u64
)-1);
562 /* We need a checksum per page. */
563 crc
= checksums
= kzalloc(sizeof(u32
) * num_pages
, GFP_NOFS
);
569 pages
= kzalloc(sizeof(struct page
*) * num_pages
, GFP_NOFS
);
576 /* Since the first page has all of our checksums and our generation we
577 * need to calculate the offset into the page that we can start writing
580 first_page_offset
= (sizeof(u32
) * num_pages
) + sizeof(u64
);
582 /* Get the cluster for this block_group if it exists */
583 if (!list_empty(&block_group
->cluster_list
))
584 cluster
= list_entry(block_group
->cluster_list
.next
,
585 struct btrfs_free_cluster
,
589 * We shouldn't have switched the pinned extents yet so this is the
592 unpin
= root
->fs_info
->pinned_extents
;
595 * Lock all pages first so we can lock the extent safely.
597 * NOTE: Because we hold the ref the entire time we're going to write to
598 * the page find_get_page should never fail, so we don't do a check
599 * after find_get_page at this point. Just putting this here so people
600 * know and don't freak out.
602 while (index
< num_pages
) {
603 page
= grab_cache_page(inode
->i_mapping
, index
);
607 for (i
= 0; i
< num_pages
; i
++) {
608 unlock_page(pages
[i
]);
609 page_cache_release(pages
[i
]);
618 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, 0, i_size_read(inode
) - 1,
619 0, &cached_state
, GFP_NOFS
);
622 * When searching for pinned extents, we need to start at our start
625 start
= block_group
->key
.objectid
;
627 /* Write out the extent entries */
629 struct btrfs_free_space_entry
*entry
;
631 unsigned long offset
= 0;
632 unsigned long start_offset
= 0;
637 start_offset
= first_page_offset
;
638 offset
= start_offset
;
641 if (index
>= num_pages
) {
649 entry
= addr
+ start_offset
;
651 memset(addr
, 0, PAGE_CACHE_SIZE
);
652 while (node
&& !next_page
) {
653 struct btrfs_free_space
*e
;
655 e
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
658 entry
->offset
= cpu_to_le64(e
->offset
);
659 entry
->bytes
= cpu_to_le64(e
->bytes
);
661 entry
->type
= BTRFS_FREE_SPACE_BITMAP
;
662 list_add_tail(&e
->list
, &bitmap_list
);
665 entry
->type
= BTRFS_FREE_SPACE_EXTENT
;
667 node
= rb_next(node
);
668 if (!node
&& cluster
) {
669 node
= rb_first(&cluster
->root
);
672 offset
+= sizeof(struct btrfs_free_space_entry
);
673 if (offset
+ sizeof(struct btrfs_free_space_entry
) >=
680 * We want to add any pinned extents to our free space cache
681 * so we don't leak the space
683 while (!next_page
&& (start
< block_group
->key
.objectid
+
684 block_group
->key
.offset
)) {
685 ret
= find_first_extent_bit(unpin
, start
, &start
, &end
,
692 /* This pinned extent is out of our range */
693 if (start
>= block_group
->key
.objectid
+
694 block_group
->key
.offset
)
697 len
= block_group
->key
.objectid
+
698 block_group
->key
.offset
- start
;
699 len
= min(len
, end
+ 1 - start
);
702 entry
->offset
= cpu_to_le64(start
);
703 entry
->bytes
= cpu_to_le64(len
);
704 entry
->type
= BTRFS_FREE_SPACE_EXTENT
;
707 offset
+= sizeof(struct btrfs_free_space_entry
);
708 if (offset
+ sizeof(struct btrfs_free_space_entry
) >=
714 *crc
= btrfs_csum_data(root
, addr
+ start_offset
, *crc
,
715 PAGE_CACHE_SIZE
- start_offset
);
718 btrfs_csum_final(*crc
, (char *)crc
);
721 bytes
+= PAGE_CACHE_SIZE
;
724 } while (node
|| next_page
);
726 /* Write out the bitmaps */
727 list_for_each_safe(pos
, n
, &bitmap_list
) {
729 struct btrfs_free_space
*entry
=
730 list_entry(pos
, struct btrfs_free_space
, list
);
732 if (index
>= num_pages
) {
739 memcpy(addr
, entry
->bitmap
, PAGE_CACHE_SIZE
);
741 *crc
= btrfs_csum_data(root
, addr
, *crc
, PAGE_CACHE_SIZE
);
743 btrfs_csum_final(*crc
, (char *)crc
);
745 bytes
+= PAGE_CACHE_SIZE
;
747 list_del_init(&entry
->list
);
752 btrfs_drop_pages(pages
, num_pages
);
753 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, 0,
754 i_size_read(inode
) - 1, &cached_state
,
760 /* Zero out the rest of the pages just to make sure */
761 while (index
< num_pages
) {
766 memset(addr
, 0, PAGE_CACHE_SIZE
);
768 bytes
+= PAGE_CACHE_SIZE
;
772 /* Write the checksums and trans id to the first page */
780 memcpy(addr
, checksums
, sizeof(u32
) * num_pages
);
781 gen
= addr
+ (sizeof(u32
) * num_pages
);
782 *gen
= trans
->transid
;
786 ret
= btrfs_dirty_pages(root
, inode
, pages
, num_pages
, 0,
787 bytes
, &cached_state
);
788 btrfs_drop_pages(pages
, num_pages
);
789 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, 0,
790 i_size_read(inode
) - 1, &cached_state
, GFP_NOFS
);
797 BTRFS_I(inode
)->generation
= trans
->transid
;
799 filemap_write_and_wait(inode
->i_mapping
);
801 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
802 key
.offset
= block_group
->key
.objectid
;
805 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 1, 1);
808 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0, bytes
- 1,
809 EXTENT_DIRTY
| EXTENT_DELALLOC
|
810 EXTENT_DO_ACCOUNTING
, 0, 0, NULL
, GFP_NOFS
);
813 leaf
= path
->nodes
[0];
815 struct btrfs_key found_key
;
816 BUG_ON(!path
->slots
[0]);
818 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
819 if (found_key
.objectid
!= BTRFS_FREE_SPACE_OBJECTID
||
820 found_key
.offset
!= block_group
->key
.objectid
) {
822 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0, bytes
- 1,
823 EXTENT_DIRTY
| EXTENT_DELALLOC
|
824 EXTENT_DO_ACCOUNTING
, 0, 0, NULL
,
826 btrfs_release_path(root
, path
);
830 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
831 struct btrfs_free_space_header
);
832 btrfs_set_free_space_entries(leaf
, header
, entries
);
833 btrfs_set_free_space_bitmaps(leaf
, header
, bitmaps
);
834 btrfs_set_free_space_generation(leaf
, header
, trans
->transid
);
835 btrfs_mark_buffer_dirty(leaf
);
836 btrfs_release_path(root
, path
);
842 invalidate_inode_pages2_range(inode
->i_mapping
, 0, index
);
843 spin_lock(&block_group
->lock
);
844 block_group
->disk_cache_state
= BTRFS_DC_ERROR
;
845 spin_unlock(&block_group
->lock
);
846 BTRFS_I(inode
)->generation
= 0;
850 btrfs_update_inode(trans
, root
, inode
);
855 static inline unsigned long offset_to_bit(u64 bitmap_start
, u32 unit
,
858 BUG_ON(offset
< bitmap_start
);
859 offset
-= bitmap_start
;
860 return (unsigned long)(div_u64(offset
, unit
));
863 static inline unsigned long bytes_to_bits(u64 bytes
, u32 unit
)
865 return (unsigned long)(div_u64(bytes
, unit
));
868 static inline u64
offset_to_bitmap(struct btrfs_free_space_ctl
*ctl
,
872 u64 bytes_per_bitmap
;
874 bytes_per_bitmap
= BITS_PER_BITMAP
* ctl
->unit
;
875 bitmap_start
= offset
- ctl
->start
;
876 bitmap_start
= div64_u64(bitmap_start
, bytes_per_bitmap
);
877 bitmap_start
*= bytes_per_bitmap
;
878 bitmap_start
+= ctl
->start
;
883 static int tree_insert_offset(struct rb_root
*root
, u64 offset
,
884 struct rb_node
*node
, int bitmap
)
886 struct rb_node
**p
= &root
->rb_node
;
887 struct rb_node
*parent
= NULL
;
888 struct btrfs_free_space
*info
;
892 info
= rb_entry(parent
, struct btrfs_free_space
, offset_index
);
894 if (offset
< info
->offset
) {
896 } else if (offset
> info
->offset
) {
900 * we could have a bitmap entry and an extent entry
901 * share the same offset. If this is the case, we want
902 * the extent entry to always be found first if we do a
903 * linear search through the tree, since we want to have
904 * the quickest allocation time, and allocating from an
905 * extent is faster than allocating from a bitmap. So
906 * if we're inserting a bitmap and we find an entry at
907 * this offset, we want to go right, or after this entry
908 * logically. If we are inserting an extent and we've
909 * found a bitmap, we want to go left, or before
913 WARN_ON(info
->bitmap
);
916 WARN_ON(!info
->bitmap
);
922 rb_link_node(node
, parent
, p
);
923 rb_insert_color(node
, root
);
929 * searches the tree for the given offset.
931 * fuzzy - If this is set, then we are trying to make an allocation, and we just
932 * want a section that has at least bytes size and comes at or after the given
935 static struct btrfs_free_space
*
936 tree_search_offset(struct btrfs_free_space_ctl
*ctl
,
937 u64 offset
, int bitmap_only
, int fuzzy
)
939 struct rb_node
*n
= ctl
->free_space_offset
.rb_node
;
940 struct btrfs_free_space
*entry
, *prev
= NULL
;
942 /* find entry that is closest to the 'offset' */
949 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
952 if (offset
< entry
->offset
)
954 else if (offset
> entry
->offset
)
967 * bitmap entry and extent entry may share same offset,
968 * in that case, bitmap entry comes after extent entry.
973 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
974 if (entry
->offset
!= offset
)
977 WARN_ON(!entry
->bitmap
);
982 * if previous extent entry covers the offset,
983 * we should return it instead of the bitmap entry
985 n
= &entry
->offset_index
;
990 prev
= rb_entry(n
, struct btrfs_free_space
,
993 if (prev
->offset
+ prev
->bytes
> offset
)
1005 /* find last entry before the 'offset' */
1007 if (entry
->offset
> offset
) {
1008 n
= rb_prev(&entry
->offset_index
);
1010 entry
= rb_entry(n
, struct btrfs_free_space
,
1012 BUG_ON(entry
->offset
> offset
);
1021 if (entry
->bitmap
) {
1022 n
= &entry
->offset_index
;
1027 prev
= rb_entry(n
, struct btrfs_free_space
,
1029 if (!prev
->bitmap
) {
1030 if (prev
->offset
+ prev
->bytes
> offset
)
1035 if (entry
->offset
+ BITS_PER_BITMAP
* ctl
->unit
> offset
)
1037 } else if (entry
->offset
+ entry
->bytes
> offset
)
1044 if (entry
->bitmap
) {
1045 if (entry
->offset
+ BITS_PER_BITMAP
*
1049 if (entry
->offset
+ entry
->bytes
> offset
)
1053 n
= rb_next(&entry
->offset_index
);
1056 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1062 __unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
1063 struct btrfs_free_space
*info
)
1065 rb_erase(&info
->offset_index
, &ctl
->free_space_offset
);
1066 ctl
->free_extents
--;
1069 static void unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
1070 struct btrfs_free_space
*info
)
1072 __unlink_free_space(ctl
, info
);
1073 ctl
->free_space
-= info
->bytes
;
1076 static int link_free_space(struct btrfs_free_space_ctl
*ctl
,
1077 struct btrfs_free_space
*info
)
1081 BUG_ON(!info
->bitmap
&& !info
->bytes
);
1082 ret
= tree_insert_offset(&ctl
->free_space_offset
, info
->offset
,
1083 &info
->offset_index
, (info
->bitmap
!= NULL
));
1087 ctl
->free_space
+= info
->bytes
;
1088 ctl
->free_extents
++;
1092 static void recalculate_thresholds(struct btrfs_free_space_ctl
*ctl
)
1094 struct btrfs_block_group_cache
*block_group
= ctl
->private;
1098 u64 size
= block_group
->key
.offset
;
1099 u64 bytes_per_bg
= BITS_PER_BITMAP
* block_group
->sectorsize
;
1100 int max_bitmaps
= div64_u64(size
+ bytes_per_bg
- 1, bytes_per_bg
);
1102 BUG_ON(ctl
->total_bitmaps
> max_bitmaps
);
1105 * The goal is to keep the total amount of memory used per 1gb of space
1106 * at or below 32k, so we need to adjust how much memory we allow to be
1107 * used by extent based free space tracking
1109 if (size
< 1024 * 1024 * 1024)
1110 max_bytes
= MAX_CACHE_BYTES_PER_GIG
;
1112 max_bytes
= MAX_CACHE_BYTES_PER_GIG
*
1113 div64_u64(size
, 1024 * 1024 * 1024);
1116 * we want to account for 1 more bitmap than what we have so we can make
1117 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1118 * we add more bitmaps.
1120 bitmap_bytes
= (ctl
->total_bitmaps
+ 1) * PAGE_CACHE_SIZE
;
1122 if (bitmap_bytes
>= max_bytes
) {
1123 ctl
->extents_thresh
= 0;
1128 * we want the extent entry threshold to always be at most 1/2 the maxw
1129 * bytes we can have, or whatever is less than that.
1131 extent_bytes
= max_bytes
- bitmap_bytes
;
1132 extent_bytes
= min_t(u64
, extent_bytes
, div64_u64(max_bytes
, 2));
1134 ctl
->extents_thresh
=
1135 div64_u64(extent_bytes
, (sizeof(struct btrfs_free_space
)));
1138 static void bitmap_clear_bits(struct btrfs_free_space_ctl
*ctl
,
1139 struct btrfs_free_space
*info
, u64 offset
,
1142 unsigned long start
, count
;
1144 start
= offset_to_bit(info
->offset
, ctl
->unit
, offset
);
1145 count
= bytes_to_bits(bytes
, ctl
->unit
);
1146 BUG_ON(start
+ count
> BITS_PER_BITMAP
);
1148 bitmap_clear(info
->bitmap
, start
, count
);
1150 info
->bytes
-= bytes
;
1151 ctl
->free_space
-= bytes
;
1154 static void bitmap_set_bits(struct btrfs_free_space_ctl
*ctl
,
1155 struct btrfs_free_space
*info
, u64 offset
,
1158 unsigned long start
, count
;
1160 start
= offset_to_bit(info
->offset
, ctl
->unit
, offset
);
1161 count
= bytes_to_bits(bytes
, ctl
->unit
);
1162 BUG_ON(start
+ count
> BITS_PER_BITMAP
);
1164 bitmap_set(info
->bitmap
, start
, count
);
1166 info
->bytes
+= bytes
;
1167 ctl
->free_space
+= bytes
;
1170 static int search_bitmap(struct btrfs_free_space_ctl
*ctl
,
1171 struct btrfs_free_space
*bitmap_info
, u64
*offset
,
1174 unsigned long found_bits
= 0;
1175 unsigned long bits
, i
;
1176 unsigned long next_zero
;
1178 i
= offset_to_bit(bitmap_info
->offset
, ctl
->unit
,
1179 max_t(u64
, *offset
, bitmap_info
->offset
));
1180 bits
= bytes_to_bits(*bytes
, ctl
->unit
);
1182 for (i
= find_next_bit(bitmap_info
->bitmap
, BITS_PER_BITMAP
, i
);
1183 i
< BITS_PER_BITMAP
;
1184 i
= find_next_bit(bitmap_info
->bitmap
, BITS_PER_BITMAP
, i
+ 1)) {
1185 next_zero
= find_next_zero_bit(bitmap_info
->bitmap
,
1186 BITS_PER_BITMAP
, i
);
1187 if ((next_zero
- i
) >= bits
) {
1188 found_bits
= next_zero
- i
;
1195 *offset
= (u64
)(i
* ctl
->unit
) + bitmap_info
->offset
;
1196 *bytes
= (u64
)(found_bits
) * ctl
->unit
;
1203 static struct btrfs_free_space
*
1204 find_free_space(struct btrfs_free_space_ctl
*ctl
, u64
*offset
, u64
*bytes
)
1206 struct btrfs_free_space
*entry
;
1207 struct rb_node
*node
;
1210 if (!ctl
->free_space_offset
.rb_node
)
1213 entry
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, *offset
), 0, 1);
1217 for (node
= &entry
->offset_index
; node
; node
= rb_next(node
)) {
1218 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
1219 if (entry
->bytes
< *bytes
)
1222 if (entry
->bitmap
) {
1223 ret
= search_bitmap(ctl
, entry
, offset
, bytes
);
1229 *offset
= entry
->offset
;
1230 *bytes
= entry
->bytes
;
1237 static void add_new_bitmap(struct btrfs_free_space_ctl
*ctl
,
1238 struct btrfs_free_space
*info
, u64 offset
)
1240 info
->offset
= offset_to_bitmap(ctl
, offset
);
1242 link_free_space(ctl
, info
);
1243 ctl
->total_bitmaps
++;
1245 ctl
->op
->recalc_thresholds(ctl
);
1248 static void free_bitmap(struct btrfs_free_space_ctl
*ctl
,
1249 struct btrfs_free_space
*bitmap_info
)
1251 unlink_free_space(ctl
, bitmap_info
);
1252 kfree(bitmap_info
->bitmap
);
1253 kmem_cache_free(btrfs_free_space_cachep
, bitmap_info
);
1254 ctl
->total_bitmaps
--;
1255 ctl
->op
->recalc_thresholds(ctl
);
1258 static noinline
int remove_from_bitmap(struct btrfs_free_space_ctl
*ctl
,
1259 struct btrfs_free_space
*bitmap_info
,
1260 u64
*offset
, u64
*bytes
)
1263 u64 search_start
, search_bytes
;
1267 end
= bitmap_info
->offset
+ (u64
)(BITS_PER_BITMAP
* ctl
->unit
) - 1;
1270 * XXX - this can go away after a few releases.
1272 * since the only user of btrfs_remove_free_space is the tree logging
1273 * stuff, and the only way to test that is under crash conditions, we
1274 * want to have this debug stuff here just in case somethings not
1275 * working. Search the bitmap for the space we are trying to use to
1276 * make sure its actually there. If its not there then we need to stop
1277 * because something has gone wrong.
1279 search_start
= *offset
;
1280 search_bytes
= *bytes
;
1281 search_bytes
= min(search_bytes
, end
- search_start
+ 1);
1282 ret
= search_bitmap(ctl
, bitmap_info
, &search_start
, &search_bytes
);
1283 BUG_ON(ret
< 0 || search_start
!= *offset
);
1285 if (*offset
> bitmap_info
->offset
&& *offset
+ *bytes
> end
) {
1286 bitmap_clear_bits(ctl
, bitmap_info
, *offset
, end
- *offset
+ 1);
1287 *bytes
-= end
- *offset
+ 1;
1289 } else if (*offset
>= bitmap_info
->offset
&& *offset
+ *bytes
<= end
) {
1290 bitmap_clear_bits(ctl
, bitmap_info
, *offset
, *bytes
);
1295 struct rb_node
*next
= rb_next(&bitmap_info
->offset_index
);
1296 if (!bitmap_info
->bytes
)
1297 free_bitmap(ctl
, bitmap_info
);
1300 * no entry after this bitmap, but we still have bytes to
1301 * remove, so something has gone wrong.
1306 bitmap_info
= rb_entry(next
, struct btrfs_free_space
,
1310 * if the next entry isn't a bitmap we need to return to let the
1311 * extent stuff do its work.
1313 if (!bitmap_info
->bitmap
)
1317 * Ok the next item is a bitmap, but it may not actually hold
1318 * the information for the rest of this free space stuff, so
1319 * look for it, and if we don't find it return so we can try
1320 * everything over again.
1322 search_start
= *offset
;
1323 search_bytes
= *bytes
;
1324 ret
= search_bitmap(ctl
, bitmap_info
, &search_start
,
1326 if (ret
< 0 || search_start
!= *offset
)
1330 } else if (!bitmap_info
->bytes
)
1331 free_bitmap(ctl
, bitmap_info
);
1336 static bool use_bitmap(struct btrfs_free_space_ctl
*ctl
,
1337 struct btrfs_free_space
*info
)
1339 struct btrfs_block_group_cache
*block_group
= ctl
->private;
1342 * If we are below the extents threshold then we can add this as an
1343 * extent, and don't have to deal with the bitmap
1345 if (ctl
->free_extents
< ctl
->extents_thresh
) {
1347 * If this block group has some small extents we don't want to
1348 * use up all of our free slots in the cache with them, we want
1349 * to reserve them to larger extents, however if we have plent
1350 * of cache left then go ahead an dadd them, no sense in adding
1351 * the overhead of a bitmap if we don't have to.
1353 if (info
->bytes
<= block_group
->sectorsize
* 4) {
1354 if (ctl
->free_extents
* 2 <= ctl
->extents_thresh
)
1362 * some block groups are so tiny they can't be enveloped by a bitmap, so
1363 * don't even bother to create a bitmap for this
1365 if (BITS_PER_BITMAP
* block_group
->sectorsize
>
1366 block_group
->key
.offset
)
1372 static int insert_into_bitmap(struct btrfs_free_space_ctl
*ctl
,
1373 struct btrfs_free_space
*info
)
1375 struct btrfs_free_space
*bitmap_info
;
1377 u64 bytes
, offset
, end
;
1380 bytes
= info
->bytes
;
1381 offset
= info
->offset
;
1383 if (!ctl
->op
->use_bitmap(ctl
, info
))
1387 bitmap_info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
1394 end
= bitmap_info
->offset
+ (u64
)(BITS_PER_BITMAP
* ctl
->unit
);
1396 if (offset
>= bitmap_info
->offset
&& offset
+ bytes
> end
) {
1397 bitmap_set_bits(ctl
, bitmap_info
, offset
, end
- offset
);
1398 bytes
-= end
- offset
;
1401 } else if (offset
>= bitmap_info
->offset
&& offset
+ bytes
<= end
) {
1402 bitmap_set_bits(ctl
, bitmap_info
, offset
, bytes
);
1415 if (info
&& info
->bitmap
) {
1416 add_new_bitmap(ctl
, info
, offset
);
1421 spin_unlock(&ctl
->tree_lock
);
1423 /* no pre-allocated info, allocate a new one */
1425 info
= kmem_cache_zalloc(btrfs_free_space_cachep
,
1428 spin_lock(&ctl
->tree_lock
);
1434 /* allocate the bitmap */
1435 info
->bitmap
= kzalloc(PAGE_CACHE_SIZE
, GFP_NOFS
);
1436 spin_lock(&ctl
->tree_lock
);
1437 if (!info
->bitmap
) {
1447 kfree(info
->bitmap
);
1448 kmem_cache_free(btrfs_free_space_cachep
, info
);
1454 bool try_merge_free_space(struct btrfs_free_space_ctl
*ctl
,
1455 struct btrfs_free_space
*info
, bool update_stat
)
1457 struct btrfs_free_space
*left_info
;
1458 struct btrfs_free_space
*right_info
;
1459 bool merged
= false;
1460 u64 offset
= info
->offset
;
1461 u64 bytes
= info
->bytes
;
1464 * first we want to see if there is free space adjacent to the range we
1465 * are adding, if there is remove that struct and add a new one to
1466 * cover the entire range
1468 right_info
= tree_search_offset(ctl
, offset
+ bytes
, 0, 0);
1469 if (right_info
&& rb_prev(&right_info
->offset_index
))
1470 left_info
= rb_entry(rb_prev(&right_info
->offset_index
),
1471 struct btrfs_free_space
, offset_index
);
1473 left_info
= tree_search_offset(ctl
, offset
- 1, 0, 0);
1475 if (right_info
&& !right_info
->bitmap
) {
1477 unlink_free_space(ctl
, right_info
);
1479 __unlink_free_space(ctl
, right_info
);
1480 info
->bytes
+= right_info
->bytes
;
1481 kmem_cache_free(btrfs_free_space_cachep
, right_info
);
1485 if (left_info
&& !left_info
->bitmap
&&
1486 left_info
->offset
+ left_info
->bytes
== offset
) {
1488 unlink_free_space(ctl
, left_info
);
1490 __unlink_free_space(ctl
, left_info
);
1491 info
->offset
= left_info
->offset
;
1492 info
->bytes
+= left_info
->bytes
;
1493 kmem_cache_free(btrfs_free_space_cachep
, left_info
);
1500 int __btrfs_add_free_space(struct btrfs_free_space_ctl
*ctl
,
1501 u64 offset
, u64 bytes
)
1503 struct btrfs_free_space
*info
;
1506 info
= kmem_cache_zalloc(btrfs_free_space_cachep
, GFP_NOFS
);
1510 info
->offset
= offset
;
1511 info
->bytes
= bytes
;
1513 spin_lock(&ctl
->tree_lock
);
1515 if (try_merge_free_space(ctl
, info
, true))
1519 * There was no extent directly to the left or right of this new
1520 * extent then we know we're going to have to allocate a new extent, so
1521 * before we do that see if we need to drop this into a bitmap
1523 ret
= insert_into_bitmap(ctl
, info
);
1531 ret
= link_free_space(ctl
, info
);
1533 kmem_cache_free(btrfs_free_space_cachep
, info
);
1535 spin_unlock(&ctl
->tree_lock
);
1538 printk(KERN_CRIT
"btrfs: unable to add free space :%d\n", ret
);
1539 BUG_ON(ret
== -EEXIST
);
1545 int btrfs_remove_free_space(struct btrfs_block_group_cache
*block_group
,
1546 u64 offset
, u64 bytes
)
1548 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1549 struct btrfs_free_space
*info
;
1550 struct btrfs_free_space
*next_info
= NULL
;
1553 spin_lock(&ctl
->tree_lock
);
1556 info
= tree_search_offset(ctl
, offset
, 0, 0);
1559 * oops didn't find an extent that matched the space we wanted
1560 * to remove, look for a bitmap instead
1562 info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
1570 if (info
->bytes
< bytes
&& rb_next(&info
->offset_index
)) {
1572 next_info
= rb_entry(rb_next(&info
->offset_index
),
1573 struct btrfs_free_space
,
1576 if (next_info
->bitmap
)
1577 end
= next_info
->offset
+
1578 BITS_PER_BITMAP
* ctl
->unit
- 1;
1580 end
= next_info
->offset
+ next_info
->bytes
;
1582 if (next_info
->bytes
< bytes
||
1583 next_info
->offset
> offset
|| offset
> end
) {
1584 printk(KERN_CRIT
"Found free space at %llu, size %llu,"
1585 " trying to use %llu\n",
1586 (unsigned long long)info
->offset
,
1587 (unsigned long long)info
->bytes
,
1588 (unsigned long long)bytes
);
1597 if (info
->bytes
== bytes
) {
1598 unlink_free_space(ctl
, info
);
1600 kfree(info
->bitmap
);
1601 ctl
->total_bitmaps
--;
1603 kmem_cache_free(btrfs_free_space_cachep
, info
);
1607 if (!info
->bitmap
&& info
->offset
== offset
) {
1608 unlink_free_space(ctl
, info
);
1609 info
->offset
+= bytes
;
1610 info
->bytes
-= bytes
;
1611 link_free_space(ctl
, info
);
1615 if (!info
->bitmap
&& info
->offset
<= offset
&&
1616 info
->offset
+ info
->bytes
>= offset
+ bytes
) {
1617 u64 old_start
= info
->offset
;
1619 * we're freeing space in the middle of the info,
1620 * this can happen during tree log replay
1622 * first unlink the old info and then
1623 * insert it again after the hole we're creating
1625 unlink_free_space(ctl
, info
);
1626 if (offset
+ bytes
< info
->offset
+ info
->bytes
) {
1627 u64 old_end
= info
->offset
+ info
->bytes
;
1629 info
->offset
= offset
+ bytes
;
1630 info
->bytes
= old_end
- info
->offset
;
1631 ret
= link_free_space(ctl
, info
);
1636 /* the hole we're creating ends at the end
1637 * of the info struct, just free the info
1639 kmem_cache_free(btrfs_free_space_cachep
, info
);
1641 spin_unlock(&ctl
->tree_lock
);
1643 /* step two, insert a new info struct to cover
1644 * anything before the hole
1646 ret
= btrfs_add_free_space(block_group
, old_start
,
1647 offset
- old_start
);
1652 ret
= remove_from_bitmap(ctl
, info
, &offset
, &bytes
);
1657 spin_unlock(&ctl
->tree_lock
);
1662 void btrfs_dump_free_space(struct btrfs_block_group_cache
*block_group
,
1665 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1666 struct btrfs_free_space
*info
;
1670 for (n
= rb_first(&ctl
->free_space_offset
); n
; n
= rb_next(n
)) {
1671 info
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1672 if (info
->bytes
>= bytes
)
1674 printk(KERN_CRIT
"entry offset %llu, bytes %llu, bitmap %s\n",
1675 (unsigned long long)info
->offset
,
1676 (unsigned long long)info
->bytes
,
1677 (info
->bitmap
) ? "yes" : "no");
1679 printk(KERN_INFO
"block group has cluster?: %s\n",
1680 list_empty(&block_group
->cluster_list
) ? "no" : "yes");
1681 printk(KERN_INFO
"%d blocks of free space at or bigger than bytes is"
1685 static struct btrfs_free_space_op free_space_op
= {
1686 .recalc_thresholds
= recalculate_thresholds
,
1687 .use_bitmap
= use_bitmap
,
1690 void btrfs_init_free_space_ctl(struct btrfs_block_group_cache
*block_group
)
1692 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1694 spin_lock_init(&ctl
->tree_lock
);
1695 ctl
->unit
= block_group
->sectorsize
;
1696 ctl
->start
= block_group
->key
.objectid
;
1697 ctl
->private = block_group
;
1698 ctl
->op
= &free_space_op
;
1701 * we only want to have 32k of ram per block group for keeping
1702 * track of free space, and if we pass 1/2 of that we want to
1703 * start converting things over to using bitmaps
1705 ctl
->extents_thresh
= ((1024 * 32) / 2) /
1706 sizeof(struct btrfs_free_space
);
1710 * for a given cluster, put all of its extents back into the free
1711 * space cache. If the block group passed doesn't match the block group
1712 * pointed to by the cluster, someone else raced in and freed the
1713 * cluster already. In that case, we just return without changing anything
1716 __btrfs_return_cluster_to_free_space(
1717 struct btrfs_block_group_cache
*block_group
,
1718 struct btrfs_free_cluster
*cluster
)
1720 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1721 struct btrfs_free_space
*entry
;
1722 struct rb_node
*node
;
1724 spin_lock(&cluster
->lock
);
1725 if (cluster
->block_group
!= block_group
)
1728 cluster
->block_group
= NULL
;
1729 cluster
->window_start
= 0;
1730 list_del_init(&cluster
->block_group_list
);
1732 node
= rb_first(&cluster
->root
);
1736 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
1737 node
= rb_next(&entry
->offset_index
);
1738 rb_erase(&entry
->offset_index
, &cluster
->root
);
1740 bitmap
= (entry
->bitmap
!= NULL
);
1742 try_merge_free_space(ctl
, entry
, false);
1743 tree_insert_offset(&ctl
->free_space_offset
,
1744 entry
->offset
, &entry
->offset_index
, bitmap
);
1746 cluster
->root
= RB_ROOT
;
1749 spin_unlock(&cluster
->lock
);
1750 btrfs_put_block_group(block_group
);
1754 void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl
*ctl
)
1756 struct btrfs_free_space
*info
;
1757 struct rb_node
*node
;
1759 spin_lock(&ctl
->tree_lock
);
1760 while ((node
= rb_last(&ctl
->free_space_offset
)) != NULL
) {
1761 info
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
1762 unlink_free_space(ctl
, info
);
1763 kfree(info
->bitmap
);
1764 kmem_cache_free(btrfs_free_space_cachep
, info
);
1765 if (need_resched()) {
1766 spin_unlock(&ctl
->tree_lock
);
1768 spin_lock(&ctl
->tree_lock
);
1771 spin_unlock(&ctl
->tree_lock
);
1774 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
*block_group
)
1776 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1777 struct btrfs_free_cluster
*cluster
;
1778 struct list_head
*head
;
1780 spin_lock(&ctl
->tree_lock
);
1781 while ((head
= block_group
->cluster_list
.next
) !=
1782 &block_group
->cluster_list
) {
1783 cluster
= list_entry(head
, struct btrfs_free_cluster
,
1786 WARN_ON(cluster
->block_group
!= block_group
);
1787 __btrfs_return_cluster_to_free_space(block_group
, cluster
);
1788 if (need_resched()) {
1789 spin_unlock(&ctl
->tree_lock
);
1791 spin_lock(&ctl
->tree_lock
);
1794 spin_unlock(&ctl
->tree_lock
);
1796 __btrfs_remove_free_space_cache(ctl
);
1799 u64
btrfs_find_space_for_alloc(struct btrfs_block_group_cache
*block_group
,
1800 u64 offset
, u64 bytes
, u64 empty_size
)
1802 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1803 struct btrfs_free_space
*entry
= NULL
;
1804 u64 bytes_search
= bytes
+ empty_size
;
1807 spin_lock(&ctl
->tree_lock
);
1808 entry
= find_free_space(ctl
, &offset
, &bytes_search
);
1813 if (entry
->bitmap
) {
1814 bitmap_clear_bits(ctl
, entry
, offset
, bytes
);
1816 free_bitmap(ctl
, entry
);
1818 unlink_free_space(ctl
, entry
);
1819 entry
->offset
+= bytes
;
1820 entry
->bytes
-= bytes
;
1822 kmem_cache_free(btrfs_free_space_cachep
, entry
);
1824 link_free_space(ctl
, entry
);
1828 spin_unlock(&ctl
->tree_lock
);
1834 * given a cluster, put all of its extents back into the free space
1835 * cache. If a block group is passed, this function will only free
1836 * a cluster that belongs to the passed block group.
1838 * Otherwise, it'll get a reference on the block group pointed to by the
1839 * cluster and remove the cluster from it.
1841 int btrfs_return_cluster_to_free_space(
1842 struct btrfs_block_group_cache
*block_group
,
1843 struct btrfs_free_cluster
*cluster
)
1845 struct btrfs_free_space_ctl
*ctl
;
1848 /* first, get a safe pointer to the block group */
1849 spin_lock(&cluster
->lock
);
1851 block_group
= cluster
->block_group
;
1853 spin_unlock(&cluster
->lock
);
1856 } else if (cluster
->block_group
!= block_group
) {
1857 /* someone else has already freed it don't redo their work */
1858 spin_unlock(&cluster
->lock
);
1861 atomic_inc(&block_group
->count
);
1862 spin_unlock(&cluster
->lock
);
1864 ctl
= block_group
->free_space_ctl
;
1866 /* now return any extents the cluster had on it */
1867 spin_lock(&ctl
->tree_lock
);
1868 ret
= __btrfs_return_cluster_to_free_space(block_group
, cluster
);
1869 spin_unlock(&ctl
->tree_lock
);
1871 /* finally drop our ref */
1872 btrfs_put_block_group(block_group
);
1876 static u64
btrfs_alloc_from_bitmap(struct btrfs_block_group_cache
*block_group
,
1877 struct btrfs_free_cluster
*cluster
,
1878 struct btrfs_free_space
*entry
,
1879 u64 bytes
, u64 min_start
)
1881 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1883 u64 search_start
= cluster
->window_start
;
1884 u64 search_bytes
= bytes
;
1887 search_start
= min_start
;
1888 search_bytes
= bytes
;
1890 err
= search_bitmap(ctl
, entry
, &search_start
, &search_bytes
);
1895 bitmap_clear_bits(ctl
, entry
, ret
, bytes
);
1901 * given a cluster, try to allocate 'bytes' from it, returns 0
1902 * if it couldn't find anything suitably large, or a logical disk offset
1903 * if things worked out
1905 u64
btrfs_alloc_from_cluster(struct btrfs_block_group_cache
*block_group
,
1906 struct btrfs_free_cluster
*cluster
, u64 bytes
,
1909 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1910 struct btrfs_free_space
*entry
= NULL
;
1911 struct rb_node
*node
;
1914 spin_lock(&cluster
->lock
);
1915 if (bytes
> cluster
->max_size
)
1918 if (cluster
->block_group
!= block_group
)
1921 node
= rb_first(&cluster
->root
);
1925 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
1927 if (entry
->bytes
< bytes
||
1928 (!entry
->bitmap
&& entry
->offset
< min_start
)) {
1929 struct rb_node
*node
;
1931 node
= rb_next(&entry
->offset_index
);
1934 entry
= rb_entry(node
, struct btrfs_free_space
,
1939 if (entry
->bitmap
) {
1940 ret
= btrfs_alloc_from_bitmap(block_group
,
1941 cluster
, entry
, bytes
,
1944 struct rb_node
*node
;
1945 node
= rb_next(&entry
->offset_index
);
1948 entry
= rb_entry(node
, struct btrfs_free_space
,
1954 ret
= entry
->offset
;
1956 entry
->offset
+= bytes
;
1957 entry
->bytes
-= bytes
;
1960 if (entry
->bytes
== 0)
1961 rb_erase(&entry
->offset_index
, &cluster
->root
);
1965 spin_unlock(&cluster
->lock
);
1970 spin_lock(&ctl
->tree_lock
);
1972 ctl
->free_space
-= bytes
;
1973 if (entry
->bytes
== 0) {
1974 ctl
->free_extents
--;
1975 if (entry
->bitmap
) {
1976 kfree(entry
->bitmap
);
1977 ctl
->total_bitmaps
--;
1978 ctl
->op
->recalc_thresholds(ctl
);
1980 kmem_cache_free(btrfs_free_space_cachep
, entry
);
1983 spin_unlock(&ctl
->tree_lock
);
1988 static int btrfs_bitmap_cluster(struct btrfs_block_group_cache
*block_group
,
1989 struct btrfs_free_space
*entry
,
1990 struct btrfs_free_cluster
*cluster
,
1991 u64 offset
, u64 bytes
, u64 min_bytes
)
1993 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1994 unsigned long next_zero
;
1996 unsigned long search_bits
;
1997 unsigned long total_bits
;
1998 unsigned long found_bits
;
1999 unsigned long start
= 0;
2000 unsigned long total_found
= 0;
2004 i
= offset_to_bit(entry
->offset
, block_group
->sectorsize
,
2005 max_t(u64
, offset
, entry
->offset
));
2006 search_bits
= bytes_to_bits(bytes
, block_group
->sectorsize
);
2007 total_bits
= bytes_to_bits(min_bytes
, block_group
->sectorsize
);
2011 for (i
= find_next_bit(entry
->bitmap
, BITS_PER_BITMAP
, i
);
2012 i
< BITS_PER_BITMAP
;
2013 i
= find_next_bit(entry
->bitmap
, BITS_PER_BITMAP
, i
+ 1)) {
2014 next_zero
= find_next_zero_bit(entry
->bitmap
,
2015 BITS_PER_BITMAP
, i
);
2016 if (next_zero
- i
>= search_bits
) {
2017 found_bits
= next_zero
- i
;
2031 total_found
+= found_bits
;
2033 if (cluster
->max_size
< found_bits
* block_group
->sectorsize
)
2034 cluster
->max_size
= found_bits
* block_group
->sectorsize
;
2036 if (total_found
< total_bits
) {
2037 i
= find_next_bit(entry
->bitmap
, BITS_PER_BITMAP
, next_zero
);
2038 if (i
- start
> total_bits
* 2) {
2040 cluster
->max_size
= 0;
2046 cluster
->window_start
= start
* block_group
->sectorsize
+
2048 rb_erase(&entry
->offset_index
, &ctl
->free_space_offset
);
2049 ret
= tree_insert_offset(&cluster
->root
, entry
->offset
,
2050 &entry
->offset_index
, 1);
2057 * This searches the block group for just extents to fill the cluster with.
2059 static int setup_cluster_no_bitmap(struct btrfs_block_group_cache
*block_group
,
2060 struct btrfs_free_cluster
*cluster
,
2061 u64 offset
, u64 bytes
, u64 min_bytes
)
2063 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2064 struct btrfs_free_space
*first
= NULL
;
2065 struct btrfs_free_space
*entry
= NULL
;
2066 struct btrfs_free_space
*prev
= NULL
;
2067 struct btrfs_free_space
*last
;
2068 struct rb_node
*node
;
2072 u64 max_gap
= 128 * 1024;
2074 entry
= tree_search_offset(ctl
, offset
, 0, 1);
2079 * We don't want bitmaps, so just move along until we find a normal
2082 while (entry
->bitmap
) {
2083 node
= rb_next(&entry
->offset_index
);
2086 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2089 window_start
= entry
->offset
;
2090 window_free
= entry
->bytes
;
2091 max_extent
= entry
->bytes
;
2096 while (window_free
<= min_bytes
) {
2097 node
= rb_next(&entry
->offset_index
);
2100 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2105 * we haven't filled the empty size and the window is
2106 * very large. reset and try again
2108 if (entry
->offset
- (prev
->offset
+ prev
->bytes
) > max_gap
||
2109 entry
->offset
- window_start
> (min_bytes
* 2)) {
2111 window_start
= entry
->offset
;
2112 window_free
= entry
->bytes
;
2114 max_extent
= entry
->bytes
;
2117 window_free
+= entry
->bytes
;
2118 if (entry
->bytes
> max_extent
)
2119 max_extent
= entry
->bytes
;
2124 cluster
->window_start
= first
->offset
;
2126 node
= &first
->offset_index
;
2129 * now we've found our entries, pull them out of the free space
2130 * cache and put them into the cluster rbtree
2135 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2136 node
= rb_next(&entry
->offset_index
);
2140 rb_erase(&entry
->offset_index
, &ctl
->free_space_offset
);
2141 ret
= tree_insert_offset(&cluster
->root
, entry
->offset
,
2142 &entry
->offset_index
, 0);
2144 } while (node
&& entry
!= last
);
2146 cluster
->max_size
= max_extent
;
2152 * This specifically looks for bitmaps that may work in the cluster, we assume
2153 * that we have already failed to find extents that will work.
2155 static int setup_cluster_bitmap(struct btrfs_block_group_cache
*block_group
,
2156 struct btrfs_free_cluster
*cluster
,
2157 u64 offset
, u64 bytes
, u64 min_bytes
)
2159 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2160 struct btrfs_free_space
*entry
;
2161 struct rb_node
*node
;
2164 if (ctl
->total_bitmaps
== 0)
2167 entry
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
), 0, 1);
2171 node
= &entry
->offset_index
;
2173 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2174 node
= rb_next(&entry
->offset_index
);
2177 if (entry
->bytes
< min_bytes
)
2179 ret
= btrfs_bitmap_cluster(block_group
, entry
, cluster
, offset
,
2181 } while (ret
&& node
);
2187 * here we try to find a cluster of blocks in a block group. The goal
2188 * is to find at least bytes free and up to empty_size + bytes free.
2189 * We might not find them all in one contiguous area.
2191 * returns zero and sets up cluster if things worked out, otherwise
2192 * it returns -enospc
2194 int btrfs_find_space_cluster(struct btrfs_trans_handle
*trans
,
2195 struct btrfs_root
*root
,
2196 struct btrfs_block_group_cache
*block_group
,
2197 struct btrfs_free_cluster
*cluster
,
2198 u64 offset
, u64 bytes
, u64 empty_size
)
2200 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2204 /* for metadata, allow allocates with more holes */
2205 if (btrfs_test_opt(root
, SSD_SPREAD
)) {
2206 min_bytes
= bytes
+ empty_size
;
2207 } else if (block_group
->flags
& BTRFS_BLOCK_GROUP_METADATA
) {
2209 * we want to do larger allocations when we are
2210 * flushing out the delayed refs, it helps prevent
2211 * making more work as we go along.
2213 if (trans
->transaction
->delayed_refs
.flushing
)
2214 min_bytes
= max(bytes
, (bytes
+ empty_size
) >> 1);
2216 min_bytes
= max(bytes
, (bytes
+ empty_size
) >> 4);
2218 min_bytes
= max(bytes
, (bytes
+ empty_size
) >> 2);
2220 spin_lock(&ctl
->tree_lock
);
2223 * If we know we don't have enough space to make a cluster don't even
2224 * bother doing all the work to try and find one.
2226 if (ctl
->free_space
< min_bytes
) {
2227 spin_unlock(&ctl
->tree_lock
);
2231 spin_lock(&cluster
->lock
);
2233 /* someone already found a cluster, hooray */
2234 if (cluster
->block_group
) {
2239 ret
= setup_cluster_no_bitmap(block_group
, cluster
, offset
, bytes
,
2242 ret
= setup_cluster_bitmap(block_group
, cluster
, offset
,
2246 atomic_inc(&block_group
->count
);
2247 list_add_tail(&cluster
->block_group_list
,
2248 &block_group
->cluster_list
);
2249 cluster
->block_group
= block_group
;
2252 spin_unlock(&cluster
->lock
);
2253 spin_unlock(&ctl
->tree_lock
);
2259 * simple code to zero out a cluster
2261 void btrfs_init_free_cluster(struct btrfs_free_cluster
*cluster
)
2263 spin_lock_init(&cluster
->lock
);
2264 spin_lock_init(&cluster
->refill_lock
);
2265 cluster
->root
= RB_ROOT
;
2266 cluster
->max_size
= 0;
2267 INIT_LIST_HEAD(&cluster
->block_group_list
);
2268 cluster
->block_group
= NULL
;
2271 int btrfs_trim_block_group(struct btrfs_block_group_cache
*block_group
,
2272 u64
*trimmed
, u64 start
, u64 end
, u64 minlen
)
2274 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2275 struct btrfs_free_space
*entry
= NULL
;
2276 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
2278 u64 actually_trimmed
;
2283 while (start
< end
) {
2284 spin_lock(&ctl
->tree_lock
);
2286 if (ctl
->free_space
< minlen
) {
2287 spin_unlock(&ctl
->tree_lock
);
2291 entry
= tree_search_offset(ctl
, start
, 0, 1);
2293 entry
= tree_search_offset(ctl
,
2294 offset_to_bitmap(ctl
, start
),
2297 if (!entry
|| entry
->offset
>= end
) {
2298 spin_unlock(&ctl
->tree_lock
);
2302 if (entry
->bitmap
) {
2303 ret
= search_bitmap(ctl
, entry
, &start
, &bytes
);
2306 spin_unlock(&ctl
->tree_lock
);
2309 bytes
= min(bytes
, end
- start
);
2310 bitmap_clear_bits(ctl
, entry
, start
, bytes
);
2311 if (entry
->bytes
== 0)
2312 free_bitmap(ctl
, entry
);
2314 start
= entry
->offset
+ BITS_PER_BITMAP
*
2315 block_group
->sectorsize
;
2316 spin_unlock(&ctl
->tree_lock
);
2321 start
= entry
->offset
;
2322 bytes
= min(entry
->bytes
, end
- start
);
2323 unlink_free_space(ctl
, entry
);
2327 spin_unlock(&ctl
->tree_lock
);
2329 if (bytes
>= minlen
) {
2331 update_ret
= btrfs_update_reserved_bytes(block_group
,
2334 ret
= btrfs_error_discard_extent(fs_info
->extent_root
,
2339 btrfs_add_free_space(block_group
, start
, bytes
);
2341 btrfs_update_reserved_bytes(block_group
,
2346 *trimmed
+= actually_trimmed
;
2351 if (fatal_signal_pending(current
)) {
2363 * Find the left-most item in the cache tree, and then return the
2364 * smallest inode number in the item.
2366 * Note: the returned inode number may not be the smallest one in
2367 * the tree, if the left-most item is a bitmap.
2369 u64
btrfs_find_ino_for_alloc(struct btrfs_root
*fs_root
)
2371 struct btrfs_free_space_ctl
*ctl
= fs_root
->free_ino_ctl
;
2372 struct btrfs_free_space
*entry
= NULL
;
2375 spin_lock(&ctl
->tree_lock
);
2377 if (RB_EMPTY_ROOT(&ctl
->free_space_offset
))
2380 entry
= rb_entry(rb_first(&ctl
->free_space_offset
),
2381 struct btrfs_free_space
, offset_index
);
2383 if (!entry
->bitmap
) {
2384 ino
= entry
->offset
;
2386 unlink_free_space(ctl
, entry
);
2390 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2392 link_free_space(ctl
, entry
);
2398 ret
= search_bitmap(ctl
, entry
, &offset
, &count
);
2402 bitmap_clear_bits(ctl
, entry
, offset
, 1);
2403 if (entry
->bytes
== 0)
2404 free_bitmap(ctl
, entry
);
2407 spin_unlock(&ctl
->tree_lock
);